source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
binance_futures_websocket.py
|
# coding: UTF-8
import hashlib
import hmac
import json
import os
import threading
import time
import traceback
import urllib
import websocket
from datetime import datetime
from pytz import UTC
from src import logger, to_data_frame, notify
from src.config import config as conf
from src.exchange.binance_futures.binance_futures_api import Client
def generate_nonce():
return int(round(time.time() * 1000))
def get_listenkey(api_key, api_secret, testnet):
client = Client(api_key=api_key, api_secret=api_secret, testnet=testnet)
listenKey = client.stream_get_listen_key()
return listenKey
class BinanceFuturesWs:
def __init__(self, account, pair, test=False):
"""
constructor
"""
# Account
self.account = account
# Pair
self.pair = pair.lower()
# testnet
self.testnet = test
# domain
domain = None
# Use healthchecks.io
self.use_healthcecks = False
# Last Heartbeat
self.last_heartbeat = 0
# condition that the bot runs on.
self.is_running = True
# Notification destination listener
self.handlers = {}
# listen key
self.listenKey = None
# API keys
self.api_key = conf['binance_test_keys'][self.account]['API_KEY'] if self.testnet else conf['binance_keys'][self.account]['API_KEY']
self.api_secret = conf['binance_test_keys'][self.account]['SECRET_KEY'] if self.testnet else conf['binance_keys'][self.account]['SECRET_KEY']
if test:
self.domain = 'stream.binancefuture.com'
else:
self.domain = 'fstream.binance.com'
self.__get_auth_user_data_streams()
self.endpoint = 'wss://' + self.domain + '/stream?streams=' + self.listenKey + '/' + self.pair + '@ticker/' + self.pair + '@kline_1m/' \
+ self.pair + '@kline_5m/' + self.pair + '@kline_30m/' \
+ self.pair + '@kline_1h/' + self.pair + '@kline_1d/' + self.pair + '@kline_1w/' \
+ self.pair + '@depth20@100ms/' + self.pair + '@bookTicker'
self.ws = websocket.WebSocketApp(self.endpoint,
on_message=self.__on_message,
on_error=self.__on_error,
on_close=self.__on_close)
self.wst = threading.Thread(target=self.__start)
self.wst.daemon = True
self.wst.start()
self.__keep_alive_user_datastream(self.listenKey)
def __get_auth_user_data_streams(self):
"""
authenticate user data streams
"""
if len(self.api_key) > 0 and len(self.api_secret):
self.listenKey = get_listenkey(self.api_key, self.api_secret, testnet=self.testnet)
else:
logger.info("WebSocket is not able to get listenKey for user data streams")
def __start(self):
"""
start the websocket.
"""
self.ws.run_forever()
def __keep_alive_user_datastream(self, listenKey):
"""
keep alive user data stream, needs to ping every 60m
"""
client = Client(self.api_key, self.api_secret, self.testnet)
def loop_function():
while self.is_running:
try:
# retries 10 times over 486secs
# before raising error/exception
# check binance_futures_api.py line 113
# for implementation details
#client.stream_keepalive()
listenKey = client.stream_get_listen_key()
if self.listenKey != listenKey:
logger.info("listenKey Changed!")
notify("listenKey Changed!")
self.listenKey = listenKey
self.ws.close()
# Send a heartbeat to Healthchecks.io
if self.use_healthcecks:
try:
requests.get(conf['healthchecks.io'][self.account]['listenkey_heartbeat'])
#logger.info("Listen Key Heart Beat sent!")
except Exception as e:
pass
time.sleep(600)
except Exception as e:
logger.error(f"Keep Alive Error - {str(e)}")
#logger.error(traceback.format_exc())
notify(f"Keep Alive Error - {str(e)}")
#notify(traceback.format_exc())
timer = threading.Timer(10, loop_function)
timer.daemon = True
if listenKey is not None:
timer.start()
else:
self.__get_auth_user_data_streams()
timer.start()
def __on_error(self, ws, message):
"""
On Error listener
:param ws:
:param message:
"""
logger.error(message)
logger.error(traceback.format_exc())
notify(f"Error occurred. {message}")
notify(traceback.format_exc())
def __on_message(self, ws, message):
"""
On Message listener
:param ws:
:param message:
:return:
"""
try:
obj = json.loads(message)
if 'e' in obj['data']:
e = obj['data']['e']
action = ""
datas = obj['data']
if e.startswith("kline"):
if self.use_healthcecks:
current_minute = datetime.now().time().minute
if self.last_heartbeat != current_minute:
# Send a heartbeat to Healthchecks.io
try:
requests.get(conf['healthchecks.io'][self.account]['websocket_heartbeat'])
#logger.info("WS Heart Beat sent!")
self.last_heartbeat = current_minute
except Exception as e:
pass
data = [{
"timestamp" : datas['k']['T'],
"high" : float(datas['k']['h']),
"low" : float(datas['k']['l']),
"open" : float(datas['k']['o']),
"close" : float(datas['k']['c']),
"volume" : float(datas['k']['v'])
}]
data[0]['timestamp'] = datetime.fromtimestamp(data[0]['timestamp']/1000).astimezone(UTC)
self.__emit(obj['data']['k']['i'], obj['data']['k']['i'], to_data_frame([data[0]]))
elif e.startswith("24hrTicker"):
self.__emit(e, action, datas)
elif e.startswith("ACCOUNT_UPDATE"):
self.__emit(e, action, datas['a']['P'])
self.__emit('wallet', action, datas['a']['B'][0])
self.__emit('margin', action, datas['a']['B'][0])
# todo ORDER_TRADE_UPDATE
elif e.startswith("ORDER_TRADE_UPDATE"):
self.__emit(e, action, datas['o'])
#todo orderbook stream
# elif table.startswith(""):
# self.__emit(e, action, data)
elif e.startswith("listenKeyExpired"):
self.__emit('close', action, datas)
self.__get_auth_user_data_streams()
logger.info(f"listenKeyExpired!!!")
#self.__on_close(ws)
self.ws.close()
elif not 'e' in obj['data']:
e = 'IndividualSymbolBookTickerStreams'
action = ''
data = obj['data']
#logger.info(f"{data}")
self.__emit(e, action, data)
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
def __emit(self, key, action, value):
"""
send data
"""
if key in self.handlers:
self.handlers[key](action, value)
def __on_close(self, ws):
"""
On Close Listener
:param ws:
"""
if 'close' in self.handlers:
self.handlers['close']()
if self.is_running:
logger.info(f"Websocket On Close: Restart")
notify(f"Websocket On Close: Restart")
time.sleep(60)
self.endpoint = 'wss://' + self.domain + '/stream?streams=' + self.listenKey + '/' + self.pair + '@ticker/' + self.pair + '@kline_1m/' \
+ self.pair + '@kline_5m/' + self.pair + '@kline_30m/' \
+ self.pair + '@kline_1h/' + self.pair + '@kline_1d/' + self.pair + '@kline_1w/' \
+ self.pair + '@depth20@100ms/' + self.pair + '@bookTicker'
self.ws = websocket.WebSocketApp(self.endpoint,
on_message=self.__on_message,
on_error=self.__on_error,
on_close=self.__on_close)
self.wst = threading.Thread(target=self.__start)
self.wst.daemon = True
self.wst.start()
def on_close(self, func):
"""
on close fn
:param func:
"""
self.handlers['close'] = func
def bind(self, key, func):
"""
bind fn
:param key:
:param func:
"""
if key == '1m':
self.handlers['1m'] = func
if key == '5m':
self.handlers['5m'] = func
if key == '1h':
self.handlers['1h'] = func
if key == '1d':
self.handlers['1d'] = func
if key == 'instrument':
self.handlers['24hrTicker'] = func
if key == 'margin':
self.handlers['margin'] = func
if key == 'position':
self.handlers['ACCOUNT_UPDATE'] = func
if key == 'order':
self.handlers['ORDER_TRADE_UPDATE'] = func
if key == 'wallet':
self.handlers['wallet'] = func
if key == 'IndividualSymbolBookTickerStreams':
self.handlers['IndividualSymbolBookTickerStreams'] = func
if key == 'orderBookL2':
self.handlers['orderBookL2'] = func
def close(self):
"""
close websocket
"""
self.is_running = False
self.ws.close()
|
killing_processes.py
|
import multiprocessing
import time
def foo():
print('Starting function')
for i in range(0, 10):
print('-->%d\n' % i)
time.sleep(1)
print('Finished function')
if __name__ == '__main__':
p = multiprocessing.Process(target=foo)
print('Process before execution:', p, p.is_alive())
p.start()
print('Process running:', p, p.is_alive())
p.terminate()
print('Process terminated:', p, p.is_alive())
p.join()
print('Process joined:', p, p.is_alive())
print('Process exit code:', p.exitcode)
|
notificator.py
|
# in case python2
from __future__ import print_function
import os
import threading
import yaml
from .notification import MailNotification, SlackNotification, TwitterNotification
class Notificator:
def __init__(self, secrets="~/.secrets/notificator_secrets.yaml", suppress_err=True):
"""
secrets: path to your secret .yaml, or dictionary which contains the secrets
for mail keys
"MAIL_PASSWORD", "MAIL_ACCOUNT", "MAIL_TO_ADDRESS", "MAIL_BCC_ADDRESS", "MAIL_SUBJECT"
for slack keys
"SLACK_USER_NAME", "SLACK_CHANNEL", "SLACK_HOOK_URL"
for twitter keys
"API_KEY", "API_SECRET", "ACCESS_TOKEN", "ACCESS_SECRET"
"""
self._notificators = []
self.suppress_err = suppress_err
self._set_mail = False
self._set_slack = False
self._set_twitter = False
if isinstance(secrets, str):
if os.path.exists(os.path.expanduser(secrets)):
with open(os.path.expanduser(secrets), "r") as f:
self.secrets = yaml.load(f)
else:
self.secrets = secrets
# set default mail notification, which is written in secret.py
def setMail(self):
if not self._set_mail:
self._notificators.append(MailNotification(self.secrets["MAIL_PASSWORD"], self.secrets["MAIL_ACCOUNT"], self.secrets["MAIL_TO_ADDRESS"], self.secrets["MAIL_BCC_ADDRESS"], self.secrets["MAIL_SUBJECT"], self.suppress_err))
self._set_mail = True
def addMailNotify(self, passwd, account, to_addr, bcc_addr, subject, suppress_err=True):
self._notificators.append(MailNotification(passwd, account, to_addr, bcc_addr, subject, suppress_err))
# set default Slack notification, which is written in secret.py
def setSlack(self):
if not self._set_slack:
self._notificators.append(SlackNotification(self.secrets["SLACK_USER_NAME"], self.secrets["SLACK_CHANNEL"], self.secrets["SLACK_HOOK_URL"], self.suppress_err))
self._set_slack = True
def addSlackNotify(self, user_name, channel, hook_url, suppress_err=True):
self._notificators.append(SlackNotification(user_name, channel, hook_url, suppress_err))
# set default Twitter notification, which is written in secret.py
def setTwitter(self):
if not self._set_twitter:
self._notificators.append(TwitterNotification(self.secrets["API_KEY"], self.secrets["API_SECRET"], self.secrets["ACCESS_TOKEN"], self.secrets["ACCESS_SECRET"], self.suppress_err))
self._set_twitter = True
def addTwitterNotify(self, api_key, api_secret, access_token, access_secret, suppress_err=True):
self._notificators.append(TwitterNotification(api_key, api_secret, access_token, access_secret, suppress_err))
def show_list(self):
for idx, noti in enumerate(self._notificators):
print("{:3} : {}".format(idx, noti.contents()))
def del_notify(self, idx):
if idx < len(self._notificators) and idx >= 0:
_ = self._notificators.pop(idx)
return True
return False
def get_list(self):
return self._notificators
def _send_notification(self, msg):
# broadcast notification
for noti in self._notificators:
noti.send_message(msg)
def notify(self, msg, use_thread=False):
# sometimes it take time, so if you want low latency, set use_thread=True
if use_thread:
try:
th = threading.Thread(target=self._send_notification, args=(msg,))
th.start()
except Exception as e:
import traceback
traceback.print_exc()
print(e)
else:
self._send_notification(msg)
|
demo_enip.py
|
# -*- coding: utf-8 -*-
# @Time : 2018/9/6 下午2:13
# @Author : shijie luan
# @Email : lsjfy0411@163.com
# @File : demo_protocol.py
# @Software: PyCharm
'''
本模块主要是为了和10个协议的仿真接轨,可以复用数据库模块、server模块、分类模块
鉴于FANUC蜜罐已经实现了各模块,所以此模块改动前可参照上述提到的几个模块
基本思路:
1.筛选10个协议:需要 请求(最好是nmap脚本的,没有也可以标明功能)—— 应答对
2.数据库存储:id-请求-应答-协议-功能,复用connect_database.py(用来连接数据库)、
具体数据库的填充用人工还是自动化填充,看心情,如果自动化的就可以复用docker_mysql.py
3.分类:复用classify.py,目前只做10个协议单一类的,所以在模版一侧添加的数据只需要一个协议一个dict即可
4.切换:启动时,每个协议对应server端的一个socket,绑定一个端口,若存在多个协议对应一个端口,则复用该端口
注意:只有协议和端口都稳合才可以回复响应的回复
'''
import binascii
import socket
import threading
import time
import sys
data = [
#s7
{'request_data':'0300001611e00000001400c1020100c2020102c0010a',
'response_data':'0300001611d00005001000c0010ac1020100c2020200',
'function':'cotp','id':1},
{'request_data':'0300001611e00000000500c1020100c2020200c0010a',
'response_data':'0300001611d00005001000c0010ac1020100c2020200',
'function':'cotp','id':2},
{'request_data':'0300001902f08032010000000000080000f0000001000101e0',
'response_data':'0300001b02f080320300000000000800000000f0000001000100f0',
'function':'setup','id':3},
{'request_data': '0300002102f080320700000000000800080001120411440100ff09000400110001',
'response_data': '0300007d02f080320700000000000c0060000112081284010100000000ff09005c00110001001c0003000136455337203231352d31414734302d3058423020000000012020000636455337203231352d31414734302d3058423020000000012020000736455337203231352d31414734302d3058423020000056040000',
'function': 'userdata','id':4},
{'request_data': '0300002102f080320700000000000800080001120411440100ff090004001c0001',
'response_data': '0300002102f080320700000000000c000400011208128401010000d4010a000000',
'function': 'userdata','id':5},
#enip
{'request_data':'63000000000000000000000000000000c1debed100000000',
'response_data':'63003300000000000000000000000000c1debed10000000001000c002d0001000002af120a0101a4000000000000000001000c003a00040330008e4d52000b313735362d454e42542f4103',
'function':'listdict','id':6}
]
#63000000000000000000000000000000c1debed100000000
#63000000000000000000000000000000c1debed100000000
#70001c0001002a00000000000000000000000000000000000000000001000200a1000400224095ffb1000800e7000602208e2401
# 0300007d02f080320700000000000c0060000112081284010100000000ff09005c00110001001c0003000136455337203231352d31414734302d3058423020000000012020000636455337203231352d31414734302d3058423020000000012020000736455337203231352d31414734302d3058423020000056040000
# data = {id:1,
# 'request_data':'',
# 'response_data':'',
# 'protocol':'S7',
# 'functions':'get_info'}
def processRecv(strRecv):
all = strRecv.split('030000')
# print(all)
for i in all:
if i == '':
all.remove(i)
# if all[0] == '' and all[-1]:
# all.remove('')
if all != []:
for i in range(len(all)):
all[i] = '030000' + all[i]
# print(all[i])
else:
# 此处设置警报信息
# 造成这种情况的包一般是'a0a0a0a0'或''
pass
# print(all)
return all
def b2a_str(data_temp):
# 将网络字节流转为ascii码
data_recv = binascii.b2a_hex(data_temp)
data_recv = data_recv.decode('utf-8')
# print(type(str(data)))
# 将字节流转为list
# request_list = processRecv(data)
return data_recv
def processRequest(request):
return 0
def findresponse(request):
#此处的request为ascii码格式
for i in data:
if i['request_data'] == request:
return binascii.a2b_hex(i['response_data'])
def s7link(sock,addr):
print("Accept new connection from %s:%s" % addr)
count = 0
id = 0
while True:
if count <= 100:
try:
data_temp = sock.recv(1024)
if data_temp != b'':
# print(data_temp)
print(count)
# time.sleep(0.1)
data_recv = b2a_str(data_temp)
print(data_recv)
# if request_list[0]['function'] == 'cotp' and request_list[0]['id'] < 1:
# id += 1
# else:
try:
response_data = findresponse(data_recv)
sock.send(response_data)
count = 0
except:
print('response cannot find!%s'%data_recv)
#没有时,随便发
sock.send(binascii.a2b_hex('0300001611d00005001000c0010ac1020100c2020200'))
else:
count += 1
except:
count += 1
print('no request!')
# sock.send(binascii.a2b_hex('0300001611d00005001000c0010ac1020100c2020200'))
else:
sock.close()
print("connection from %s:%s has been broken! " % addr)
break
time.sleep(0.2)
sock.close()
def opens7(ip,port=102):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# s.bind(('127.0.0.1', 102))
s.bind((ip, port))
# s.bind(('192.168.127.94', 44818))
# 设置最大连接数
s.listen(100)
# s.setblocking(0)
# s.setblocking(0)
print('Waiting for connecting...')
'''
建立连接的server
'''
while True:
sock, addr = s.accept()
# 设置为非阻塞式
sock.setblocking(False)
t = threading.Thread(target=s7link, args=(sock, addr))
t.start()
print("ok")
def openEnip():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# s1.bind(('127.0.0.1', 102))
s.bind(('192.168.127.94', 44818))
# 设置最大连接数
s.listen(100)
# s.setblocking(0)
# s.setblocking(0)
print('Waiting for connecting...')
'''
建立连接的server
'''
while True:
sock, addr = s.accept()
# 设置为非阻塞式
sock.setblocking(False)
t = threading.Thread(target=s7link, args=(sock, addr))
t.start()
print("ok")
# 可以封装成函数,方便 Python 的程序调用
def get_host_ip():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
return ip
if __name__ == "__main__":
# request = binascii.a2b_hex("0300001611e00000000500c1020100c2020200c0010a")
# indata = b2a_str(request)
# for i in indata:
# response = findresponse(i)
# print(response)
# opens7()
ip_addr = get_host_ip()
if ip_addr == '':
# ip_port = sys.argv[1]
# else:
try:
ip_addr = sys.argv[1]
# ip_port = sys.argv[2]
except:
print("error, You have to input your ip address")
openEnip(ip_addr)
# print(find(data))
|
client.py
|
import socket
import threading
from threading import Thread
from colored import fg, bg, attr
color1 = fg('green')
color2 = fg('red')
color3 = fg('yellow')
reset = attr('reset')
try:
file1 = open('client-header.txt', 'r')
print(' ')
print (color3 + file1.read() + reset)
file1.close()
except IOError:
print('\nBanner File not found!')
host = input(color1 + '\nEnter server IP: ' + reset)
print('\n')
port = int(input(color1 + 'Enter TCP port no.: ' + reset))
print('\n')
#host = '127.0.0.1'
#port = 64352
username = input(color1 + '\nEnter a username : ' + reset)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((host, port))
def receive():
while True:
try:
message = client.recv(1024).decode('utf-8')
if (message == 'USERNAME'):
client.send(username.encode('utf-8'))
else:
print(color1 + message + reset)
except:
print(color2 + 'An error occurred!!!' + reset)
client.close()
break
def write():
while True:
message = f'\n{username}: {input("")}\n'
client.send(message.encode('utf-8'))
receive_thread = threading.Thread(target=receive)
receive_thread.start()
write_thread = threading.Thread(target=write)
write_thread.start()
print('\n')
|
build.py
|
## @file
# build a platform or a module
#
# Copyright (c) 2014, Hewlett-Packard Development Company, L.P.<BR>
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2018, Hewlett Packard Enterprise Development, L.P.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from __future__ import print_function
import Common.LongFilePathOs as os
import re
from io import BytesIO
import sys
import glob
import time
import platform
import traceback
import encodings.ascii
import itertools
import multiprocessing
from struct import *
from threading import *
from optparse import OptionParser
from subprocess import *
from Common import Misc as Utils
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.TargetTxtClassObject import TargetTxtClassObject
from Common.ToolDefClassObject import ToolDefClassObject
from Common.DataType import *
from Common.BuildVersion import gBUILD_VERSION
from AutoGen.AutoGen import *
from Common.BuildToolError import *
from Workspace.WorkspaceDatabase import WorkspaceDatabase
from Common.MultipleWorkspace import MultipleWorkspace as mws
from BuildReport import BuildReport
from GenPatchPcdTable.GenPatchPcdTable import *
from PatchPcdValue.PatchPcdValue import *
import Common.EdkLogger
import Common.GlobalData as GlobalData
from GenFds.GenFds import GenFds, GenFdsApi
from collections import OrderedDict, defaultdict
# Version and Copyright
VersionNumber = "0.60" + ' ' + gBUILD_VERSION
__version__ = "%prog Version " + VersionNumber
__copyright__ = "Copyright (c) 2007 - 2018, Intel Corporation All rights reserved."
## standard targets of build command
gSupportedTarget = ['all', 'genc', 'genmake', 'modules', 'libraries', 'fds', 'clean', 'cleanall', 'cleanlib', 'run']
## build configuration file
gBuildConfiguration = "target.txt"
gToolsDefinition = "tools_def.txt"
TemporaryTablePattern = re.compile(r'^_\d+_\d+_[a-fA-F0-9]+$')
TmpTableDict = {}
## Check environment PATH variable to make sure the specified tool is found
#
# If the tool is found in the PATH, then True is returned
# Otherwise, False is returned
#
def IsToolInPath(tool):
if 'PATHEXT' in os.environ:
extns = os.environ['PATHEXT'].split(os.path.pathsep)
else:
extns = ('',)
for pathDir in os.environ['PATH'].split(os.path.pathsep):
for ext in extns:
if os.path.exists(os.path.join(pathDir, tool + ext)):
return True
return False
## Check environment variables
#
# Check environment variables that must be set for build. Currently they are
#
# WORKSPACE The directory all packages/platforms start from
# EDK_TOOLS_PATH The directory contains all tools needed by the build
# PATH $(EDK_TOOLS_PATH)/Bin/<sys> must be set in PATH
#
# If any of above environment variable is not set or has error, the build
# will be broken.
#
def CheckEnvVariable():
# check WORKSPACE
if "WORKSPACE" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="WORKSPACE")
WorkspaceDir = os.path.normcase(os.path.normpath(os.environ["WORKSPACE"]))
if not os.path.exists(WorkspaceDir):
EdkLogger.error("build", FILE_NOT_FOUND, "WORKSPACE doesn't exist", ExtraData=WorkspaceDir)
elif ' ' in WorkspaceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in WORKSPACE path",
ExtraData=WorkspaceDir)
os.environ["WORKSPACE"] = WorkspaceDir
# set multiple workspace
PackagesPath = os.getenv("PACKAGES_PATH")
mws.setWs(WorkspaceDir, PackagesPath)
if mws.PACKAGES_PATH:
for Path in mws.PACKAGES_PATH:
if not os.path.exists(Path):
EdkLogger.error("build", FILE_NOT_FOUND, "One Path in PACKAGES_PATH doesn't exist", ExtraData=Path)
elif ' ' in Path:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in PACKAGES_PATH", ExtraData=Path)
#
# Check EFI_SOURCE (Edk build convention). EDK_SOURCE will always point to ECP
#
if "ECP_SOURCE" not in os.environ:
os.environ["ECP_SOURCE"] = mws.join(WorkspaceDir, GlobalData.gEdkCompatibilityPkg)
if "EFI_SOURCE" not in os.environ:
os.environ["EFI_SOURCE"] = os.environ["ECP_SOURCE"]
if "EDK_SOURCE" not in os.environ:
os.environ["EDK_SOURCE"] = os.environ["ECP_SOURCE"]
#
# Unify case of characters on case-insensitive systems
#
EfiSourceDir = os.path.normcase(os.path.normpath(os.environ["EFI_SOURCE"]))
EdkSourceDir = os.path.normcase(os.path.normpath(os.environ["EDK_SOURCE"]))
EcpSourceDir = os.path.normcase(os.path.normpath(os.environ["ECP_SOURCE"]))
os.environ["EFI_SOURCE"] = EfiSourceDir
os.environ["EDK_SOURCE"] = EdkSourceDir
os.environ["ECP_SOURCE"] = EcpSourceDir
os.environ["EDK_TOOLS_PATH"] = os.path.normcase(os.environ["EDK_TOOLS_PATH"])
if not os.path.exists(EcpSourceDir):
EdkLogger.verbose("ECP_SOURCE = %s doesn't exist. Edk modules could not be built." % EcpSourceDir)
elif ' ' in EcpSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in ECP_SOURCE path",
ExtraData=EcpSourceDir)
if not os.path.exists(EdkSourceDir):
if EdkSourceDir == EcpSourceDir:
EdkLogger.verbose("EDK_SOURCE = %s doesn't exist. Edk modules could not be built." % EdkSourceDir)
else:
EdkLogger.error("build", PARAMETER_INVALID, "EDK_SOURCE does not exist",
ExtraData=EdkSourceDir)
elif ' ' in EdkSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in EDK_SOURCE path",
ExtraData=EdkSourceDir)
if not os.path.exists(EfiSourceDir):
if EfiSourceDir == EcpSourceDir:
EdkLogger.verbose("EFI_SOURCE = %s doesn't exist. Edk modules could not be built." % EfiSourceDir)
else:
EdkLogger.error("build", PARAMETER_INVALID, "EFI_SOURCE does not exist",
ExtraData=EfiSourceDir)
elif ' ' in EfiSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in EFI_SOURCE path",
ExtraData=EfiSourceDir)
# check those variables on single workspace case
if not PackagesPath:
# change absolute path to relative path to WORKSPACE
if EfiSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "EFI_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n EFI_SOURCE = %s" % (WorkspaceDir, EfiSourceDir))
if EdkSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "EDK_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n EDK_SOURCE = %s" % (WorkspaceDir, EdkSourceDir))
if EcpSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "ECP_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n ECP_SOURCE = %s" % (WorkspaceDir, EcpSourceDir))
# check EDK_TOOLS_PATH
if "EDK_TOOLS_PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="EDK_TOOLS_PATH")
# check PATH
if "PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="PATH")
GlobalData.gWorkspace = WorkspaceDir
GlobalData.gEfiSource = EfiSourceDir
GlobalData.gEdkSource = EdkSourceDir
GlobalData.gEcpSource = EcpSourceDir
GlobalData.gGlobalDefines["WORKSPACE"] = WorkspaceDir
GlobalData.gGlobalDefines["EFI_SOURCE"] = EfiSourceDir
GlobalData.gGlobalDefines["EDK_SOURCE"] = EdkSourceDir
GlobalData.gGlobalDefines["ECP_SOURCE"] = EcpSourceDir
GlobalData.gGlobalDefines["EDK_TOOLS_PATH"] = os.environ["EDK_TOOLS_PATH"]
## Get normalized file path
#
# Convert the path to be local format, and remove the WORKSPACE path at the
# beginning if the file path is given in full path.
#
# @param FilePath File path to be normalized
# @param Workspace Workspace path which the FilePath will be checked against
#
# @retval string The normalized file path
#
def NormFile(FilePath, Workspace):
# check if the path is absolute or relative
if os.path.isabs(FilePath):
FileFullPath = os.path.normpath(FilePath)
else:
FileFullPath = os.path.normpath(mws.join(Workspace, FilePath))
Workspace = mws.getWs(Workspace, FilePath)
# check if the file path exists or not
if not os.path.isfile(FileFullPath):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData="\t%s (Please give file in absolute path or relative to WORKSPACE)" % FileFullPath)
# remove workspace directory from the beginning part of the file path
if Workspace[-1] in ["\\", "/"]:
return FileFullPath[len(Workspace):]
else:
return FileFullPath[(len(Workspace) + 1):]
## Get the output of an external program
#
# This is the entrance method of thread reading output of an external program and
# putting them in STDOUT/STDERR of current program.
#
# @param From The stream message read from
# @param To The stream message put on
# @param ExitFlag The flag used to indicate stopping reading
#
def ReadMessage(From, To, ExitFlag):
while True:
# read one line a time
Line = From.readline()
# empty string means "end"
if Line is not None and Line != "":
To(Line.rstrip())
else:
break
if ExitFlag.isSet():
break
## Launch an external program
#
# This method will call subprocess.Popen to execute an external program with
# given options in specified directory. Because of the dead-lock issue during
# redirecting output of the external program, threads are used to to do the
# redirection work.
#
# @param Command A list or string containing the call of the program
# @param WorkingDir The directory in which the program will be running
#
def LaunchCommand(Command, WorkingDir):
BeginTime = time.time()
# if working directory doesn't exist, Popen() will raise an exception
if not os.path.isdir(WorkingDir):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=WorkingDir)
# Command is used as the first Argument in following Popen().
# It could be a string or sequence. We find that if command is a string in following Popen(),
# ubuntu may fail with an error message that the command is not found.
# So here we may need convert command from string to list instance.
if platform.system() != 'Windows':
if not isinstance(Command, list):
Command = Command.split()
Command = ' '.join(Command)
Proc = None
EndOfProcedure = None
try:
# launch the command
Proc = Popen(Command, stdout=PIPE, stderr=PIPE, env=os.environ, cwd=WorkingDir, bufsize=-1, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Proc.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Proc.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Proc.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Proc.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Proc.wait()
except: # in case of aborting
# terminate the threads redirecting the program output
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
if EndOfProcedure is not None:
EndOfProcedure.set()
if Proc is None:
if not isinstance(Command, type("")):
Command = " ".join(Command)
EdkLogger.error("build", COMMAND_FAILURE, "Failed to start command", ExtraData="%s [%s]" % (Command, WorkingDir))
if Proc.stdout:
StdOutThread.join()
if Proc.stderr:
StdErrThread.join()
# check the return code of the program
if Proc.returncode != 0:
if not isinstance(Command, type("")):
Command = " ".join(Command)
# print out the Response file and its content when make failure
RespFile = os.path.join(WorkingDir, 'OUTPUT', 'respfilelist.txt')
if os.path.isfile(RespFile):
f = open(RespFile)
RespContent = f.read()
f.close()
EdkLogger.info(RespContent)
EdkLogger.error("build", COMMAND_FAILURE, ExtraData="%s [%s]" % (Command, WorkingDir))
return "%dms" % (int(round((time.time() - BeginTime) * 1000)))
## The smallest unit that can be built in multi-thread build mode
#
# This is the base class of build unit. The "Obj" parameter must provide
# __str__(), __eq__() and __hash__() methods. Otherwise there could be build units
# missing build.
#
# Currently the "Obj" should be only ModuleAutoGen or PlatformAutoGen objects.
#
class BuildUnit:
## The constructor
#
# @param self The object pointer
# @param Obj The object the build is working on
# @param Target The build target name, one of gSupportedTarget
# @param Dependency The BuildUnit(s) which must be completed in advance
# @param WorkingDir The directory build command starts in
#
def __init__(self, Obj, BuildCommand, Target, Dependency, WorkingDir="."):
self.BuildObject = Obj
self.Dependency = Dependency
self.WorkingDir = WorkingDir
self.Target = Target
self.BuildCommand = BuildCommand
if not BuildCommand:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(Obj.BuildTarget, Obj.ToolChain, Obj.Arch),
ExtraData=str(Obj))
## str() method
#
# It just returns the string representation of self.BuildObject
#
# @param self The object pointer
#
def __str__(self):
return str(self.BuildObject)
## "==" operator method
#
# It just compares self.BuildObject with "Other". So self.BuildObject must
# provide its own __eq__() method.
#
# @param self The object pointer
# @param Other The other BuildUnit object compared to
#
def __eq__(self, Other):
return Other and self.BuildObject == Other.BuildObject \
and Other.BuildObject \
and self.BuildObject.Arch == Other.BuildObject.Arch
## hash() method
#
# It just returns the hash value of self.BuildObject which must be hashable.
#
# @param self The object pointer
#
def __hash__(self):
return hash(self.BuildObject) + hash(self.BuildObject.Arch)
def __repr__(self):
return repr(self.BuildObject)
## The smallest module unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for module build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only ModuleAutoGen object.
#
class ModuleMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The ModuleAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, Target):
Dependency = [ModuleMakeUnit(La, Target) for La in Obj.LibraryAutoGenList]
BuildUnit.__init__(self, Obj, Obj.BuildCommand, Target, Dependency, Obj.MakeFileDir)
if Target in [None, "", "all"]:
self.Target = "tbuild"
## The smallest platform unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for platform build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only PlatformAutoGen object.
#
class PlatformMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The PlatformAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, Target):
Dependency = [ModuleMakeUnit(Lib, Target) for Lib in self.BuildObject.LibraryAutoGenList]
Dependency.extend([ModuleMakeUnit(Mod, Target) for Mod in self.BuildObject.ModuleAutoGenList])
BuildUnit.__init__(self, Obj, Obj.BuildCommand, Target, Dependency, Obj.MakeFileDir)
## The class representing the task of a module build or platform build
#
# This class manages the build tasks in multi-thread build mode. Its jobs include
# scheduling thread running, catching thread error, monitor the thread status, etc.
#
class BuildTask:
# queue for tasks waiting for schedule
_PendingQueue = OrderedDict()
_PendingQueueLock = threading.Lock()
# queue for tasks ready for running
_ReadyQueue = OrderedDict()
_ReadyQueueLock = threading.Lock()
# queue for run tasks
_RunningQueue = OrderedDict()
_RunningQueueLock = threading.Lock()
# queue containing all build tasks, in case duplicate build
_TaskQueue = OrderedDict()
# flag indicating error occurs in a running thread
_ErrorFlag = threading.Event()
_ErrorFlag.clear()
_ErrorMessage = ""
# BoundedSemaphore object used to control the number of running threads
_Thread = None
# flag indicating if the scheduler is started or not
_SchedulerStopped = threading.Event()
_SchedulerStopped.set()
## Start the task scheduler thread
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def StartScheduler(MaxThreadNumber, ExitFlag):
SchedulerThread = Thread(target=BuildTask.Scheduler, args=(MaxThreadNumber, ExitFlag))
SchedulerThread.setName("Build-Task-Scheduler")
SchedulerThread.setDaemon(False)
SchedulerThread.start()
# wait for the scheduler to be started, especially useful in Linux
while not BuildTask.IsOnGoing():
time.sleep(0.01)
## Scheduler method
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def Scheduler(MaxThreadNumber, ExitFlag):
BuildTask._SchedulerStopped.clear()
try:
# use BoundedSemaphore to control the maximum running threads
BuildTask._Thread = BoundedSemaphore(MaxThreadNumber)
#
# scheduling loop, which will exits when no pending/ready task and
# indicated to do so, or there's error in running thread
#
while (len(BuildTask._PendingQueue) > 0 or len(BuildTask._ReadyQueue) > 0 \
or not ExitFlag.isSet()) and not BuildTask._ErrorFlag.isSet():
EdkLogger.debug(EdkLogger.DEBUG_8, "Pending Queue (%d), Ready Queue (%d)"
% (len(BuildTask._PendingQueue), len(BuildTask._ReadyQueue)))
# get all pending tasks
BuildTask._PendingQueueLock.acquire()
BuildObjectList = BuildTask._PendingQueue.keys()
#
# check if their dependency is resolved, and if true, move them
# into ready queue
#
for BuildObject in BuildObjectList:
Bt = BuildTask._PendingQueue[BuildObject]
if Bt.IsReady():
BuildTask._ReadyQueue[BuildObject] = BuildTask._PendingQueue.pop(BuildObject)
BuildTask._PendingQueueLock.release()
# launch build thread until the maximum number of threads is reached
while not BuildTask._ErrorFlag.isSet():
# empty ready queue, do nothing further
if len(BuildTask._ReadyQueue) == 0:
break
# wait for active thread(s) exit
BuildTask._Thread.acquire(True)
# start a new build thread
Bo, Bt = BuildTask._ReadyQueue.popitem()
# move into running queue
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue[Bo] = Bt
BuildTask._RunningQueueLock.release()
Bt.Start()
# avoid tense loop
time.sleep(0.01)
# avoid tense loop
time.sleep(0.01)
# wait for all running threads exit
if BuildTask._ErrorFlag.isSet():
EdkLogger.quiet("\nWaiting for all build threads exit...")
# while not BuildTask._ErrorFlag.isSet() and \
while len(BuildTask._RunningQueue) > 0:
EdkLogger.verbose("Waiting for thread ending...(%d)" % len(BuildTask._RunningQueue))
EdkLogger.debug(EdkLogger.DEBUG_8, "Threads [%s]" % ", ".join(Th.getName() for Th in threading.enumerate()))
# avoid tense loop
time.sleep(0.1)
except BaseException as X:
#
# TRICK: hide the output of threads left runing, so that the user can
# catch the error message easily
#
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "build thread scheduler error\n\t%s" % str(X)
BuildTask._PendingQueue.clear()
BuildTask._ReadyQueue.clear()
BuildTask._RunningQueue.clear()
BuildTask._TaskQueue.clear()
BuildTask._SchedulerStopped.set()
## Wait for all running method exit
#
@staticmethod
def WaitForComplete():
BuildTask._SchedulerStopped.wait()
## Check if the scheduler is running or not
#
@staticmethod
def IsOnGoing():
return not BuildTask._SchedulerStopped.isSet()
## Abort the build
@staticmethod
def Abort():
if BuildTask.IsOnGoing():
BuildTask._ErrorFlag.set()
BuildTask.WaitForComplete()
## Check if there's error in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use threading.Event to communicate this formation to main thread.
#
@staticmethod
def HasError():
return BuildTask._ErrorFlag.isSet()
## Get error message in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use a static variable to communicate this message to main thread.
#
@staticmethod
def GetErrorMessage():
return BuildTask._ErrorMessage
## Factory method to create a BuildTask object
#
# This method will check if a module is building or has been built. And if
# true, just return the associated BuildTask object in the _TaskQueue. If
# not, create and return a new BuildTask object. The new BuildTask object
# will be appended to the _PendingQueue for scheduling later.
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
@staticmethod
def New(BuildItem, Dependency=None):
if BuildItem in BuildTask._TaskQueue:
Bt = BuildTask._TaskQueue[BuildItem]
return Bt
Bt = BuildTask()
Bt._Init(BuildItem, Dependency)
BuildTask._TaskQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.acquire()
BuildTask._PendingQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.release()
return Bt
## The real constructor of BuildTask
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
def _Init(self, BuildItem, Dependency=None):
self.BuildItem = BuildItem
self.DependencyList = []
if Dependency is None:
Dependency = BuildItem.Dependency
else:
Dependency.extend(BuildItem.Dependency)
self.AddDependency(Dependency)
# flag indicating build completes, used to avoid unnecessary re-build
self.CompleteFlag = False
## Check if all dependent build tasks are completed or not
#
def IsReady(self):
ReadyFlag = True
for Dep in self.DependencyList:
if Dep.CompleteFlag == True:
continue
ReadyFlag = False
break
return ReadyFlag
## Add dependent build task
#
# @param Dependency The list of dependent build objects
#
def AddDependency(self, Dependency):
for Dep in Dependency:
if not Dep.BuildObject.IsBinaryModule:
self.DependencyList.append(BuildTask.New(Dep)) # BuildTask list
## The thread wrapper of LaunchCommand function
#
# @param Command A list or string contains the call of the command
# @param WorkingDir The directory in which the program will be running
#
def _CommandThread(self, Command, WorkingDir):
try:
self.BuildItem.BuildObject.BuildTime = LaunchCommand(Command, WorkingDir)
self.CompleteFlag = True
except:
#
# TRICK: hide the output of threads left runing, so that the user can
# catch the error message easily
#
if not BuildTask._ErrorFlag.isSet():
GlobalData.gBuildingModule = "%s [%s, %s, %s]" % (str(self.BuildItem.BuildObject),
self.BuildItem.BuildObject.Arch,
self.BuildItem.BuildObject.ToolChain,
self.BuildItem.BuildObject.BuildTarget
)
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "%s broken\n %s [%s]" % \
(threading.currentThread().getName(), Command, WorkingDir)
# indicate there's a thread is available for another build task
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue.pop(self.BuildItem)
BuildTask._RunningQueueLock.release()
BuildTask._Thread.release()
## Start build task thread
#
def Start(self):
EdkLogger.quiet("Building ... %s" % repr(self.BuildItem))
Command = self.BuildItem.BuildCommand + [self.BuildItem.Target]
self.BuildTread = Thread(target=self._CommandThread, args=(Command, self.BuildItem.WorkingDir))
self.BuildTread.setName("build thread")
self.BuildTread.setDaemon(False)
self.BuildTread.start()
## The class contains the information related to EFI image
#
class PeImageInfo():
## Constructor
#
# Constructor will load all required image information.
#
# @param BaseName The full file path of image.
# @param Guid The GUID for image.
# @param Arch Arch of this image.
# @param OutputDir The output directory for image.
# @param DebugDir The debug directory for image.
# @param ImageClass PeImage Information
#
def __init__(self, BaseName, Guid, Arch, OutputDir, DebugDir, ImageClass):
self.BaseName = BaseName
self.Guid = Guid
self.Arch = Arch
self.OutputDir = OutputDir
self.DebugDir = DebugDir
self.Image = ImageClass
self.Image.Size = (self.Image.Size / 0x1000 + 1) * 0x1000
## The class implementing the EDK2 build process
#
# The build process includes:
# 1. Load configuration from target.txt and tools_def.txt in $(WORKSPACE)/Conf
# 2. Parse DSC file of active platform
# 3. Parse FDF file if any
# 4. Establish build database, including parse all other files (module, package)
# 5. Create AutoGen files (C code file, depex file, makefile) if necessary
# 6. Call build command
#
class Build():
## Constructor
#
# Constructor will load all necessary configurations, parse platform, modules
# and packages and the establish a database for AutoGen.
#
# @param Target The build command target, one of gSupportedTarget
# @param WorkspaceDir The directory of workspace
# @param BuildOptions Build options passed from command line
#
def __init__(self, Target, WorkspaceDir, BuildOptions):
self.WorkspaceDir = WorkspaceDir
self.Target = Target
self.PlatformFile = BuildOptions.PlatformFile
self.ModuleFile = BuildOptions.ModuleFile
self.ArchList = BuildOptions.TargetArch
self.ToolChainList = BuildOptions.ToolChain
self.BuildTargetList= BuildOptions.BuildTarget
self.Fdf = BuildOptions.FdfFile
self.FdList = BuildOptions.RomImage
self.FvList = BuildOptions.FvImage
self.CapList = BuildOptions.CapName
self.SilentMode = BuildOptions.SilentMode
self.ThreadNumber = BuildOptions.ThreadNumber
self.SkipAutoGen = BuildOptions.SkipAutoGen
self.Reparse = BuildOptions.Reparse
self.SkuId = BuildOptions.SkuId
if self.SkuId:
GlobalData.gSKUID_CMD = self.SkuId
self.ConfDirectory = BuildOptions.ConfDirectory
self.SpawnMode = True
self.BuildReport = BuildReport(BuildOptions.ReportFile, BuildOptions.ReportType)
self.TargetTxt = TargetTxtClassObject()
self.ToolDef = ToolDefClassObject()
self.AutoGenTime = 0
self.MakeTime = 0
self.GenFdsTime = 0
GlobalData.BuildOptionPcd = BuildOptions.OptionPcd if BuildOptions.OptionPcd else []
#Set global flag for build mode
GlobalData.gIgnoreSource = BuildOptions.IgnoreSources
GlobalData.gUseHashCache = BuildOptions.UseHashCache
GlobalData.gBinCacheDest = BuildOptions.BinCacheDest
GlobalData.gBinCacheSource = BuildOptions.BinCacheSource
GlobalData.gEnableGenfdsMultiThread = BuildOptions.GenfdsMultiThread
if GlobalData.gBinCacheDest and not GlobalData.gUseHashCache:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-destination must be used together with --hash.")
if GlobalData.gBinCacheSource and not GlobalData.gUseHashCache:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-source must be used together with --hash.")
if GlobalData.gBinCacheDest and GlobalData.gBinCacheSource:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-destination can not be used together with --binary-source.")
if GlobalData.gBinCacheSource:
BinCacheSource = os.path.normpath(GlobalData.gBinCacheSource)
if not os.path.isabs(BinCacheSource):
BinCacheSource = mws.join(self.WorkspaceDir, BinCacheSource)
GlobalData.gBinCacheSource = BinCacheSource
else:
if GlobalData.gBinCacheSource is not None:
EdkLogger.error("build", OPTION_VALUE_INVALID, ExtraData="Invalid value of option --binary-source.")
if GlobalData.gBinCacheDest:
BinCacheDest = os.path.normpath(GlobalData.gBinCacheDest)
if not os.path.isabs(BinCacheDest):
BinCacheDest = mws.join(self.WorkspaceDir, BinCacheDest)
GlobalData.gBinCacheDest = BinCacheDest
else:
if GlobalData.gBinCacheDest is not None:
EdkLogger.error("build", OPTION_VALUE_INVALID, ExtraData="Invalid value of option --binary-destination.")
if self.ConfDirectory:
# Get alternate Conf location, if it is absolute, then just use the absolute directory name
ConfDirectoryPath = os.path.normpath(self.ConfDirectory)
if not os.path.isabs(ConfDirectoryPath):
# Since alternate directory name is not absolute, the alternate directory is located within the WORKSPACE
# This also handles someone specifying the Conf directory in the workspace. Using --conf=Conf
ConfDirectoryPath = mws.join(self.WorkspaceDir, ConfDirectoryPath)
else:
if "CONF_PATH" in os.environ:
ConfDirectoryPath = os.path.normcase(os.path.normpath(os.environ["CONF_PATH"]))
else:
# Get standard WORKSPACE/Conf use the absolute path to the WORKSPACE/Conf
ConfDirectoryPath = mws.join(self.WorkspaceDir, 'Conf')
GlobalData.gConfDirectory = ConfDirectoryPath
GlobalData.gDatabasePath = os.path.normpath(os.path.join(ConfDirectoryPath, GlobalData.gDatabasePath))
self.Db = WorkspaceDatabase()
self.BuildDatabase = self.Db.BuildObject
self.Platform = None
self.ToolChainFamily = None
self.LoadFixAddress = 0
self.UniFlag = BuildOptions.Flag
self.BuildModules = []
self.HashSkipModules = []
self.Db_Flag = False
self.LaunchPrebuildFlag = False
self.PlatformBuildPath = os.path.join(GlobalData.gConfDirectory, '.cache', '.PlatformBuild')
if BuildOptions.CommandLength:
GlobalData.gCommandMaxLength = BuildOptions.CommandLength
# print dot character during doing some time-consuming work
self.Progress = Utils.Progressor()
# print current build environment and configuration
EdkLogger.quiet("%-16s = %s" % ("WORKSPACE", os.environ["WORKSPACE"]))
if "PACKAGES_PATH" in os.environ:
# WORKSPACE env has been converted before. Print the same path style with WORKSPACE env.
EdkLogger.quiet("%-16s = %s" % ("PACKAGES_PATH", os.path.normcase(os.path.normpath(os.environ["PACKAGES_PATH"]))))
EdkLogger.quiet("%-16s = %s" % ("ECP_SOURCE", os.environ["ECP_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EDK_SOURCE", os.environ["EDK_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EFI_SOURCE", os.environ["EFI_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_PATH", os.environ["EDK_TOOLS_PATH"]))
if "EDK_TOOLS_BIN" in os.environ:
# Print the same path style with WORKSPACE env.
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_BIN", os.path.normcase(os.path.normpath(os.environ["EDK_TOOLS_BIN"]))))
EdkLogger.quiet("%-16s = %s" % ("CONF_PATH", GlobalData.gConfDirectory))
self.InitPreBuild()
self.InitPostBuild()
if self.Prebuild:
EdkLogger.quiet("%-16s = %s" % ("PREBUILD", self.Prebuild))
if self.Postbuild:
EdkLogger.quiet("%-16s = %s" % ("POSTBUILD", self.Postbuild))
if self.Prebuild:
self.LaunchPrebuild()
self.TargetTxt = TargetTxtClassObject()
self.ToolDef = ToolDefClassObject()
if not (self.LaunchPrebuildFlag and os.path.exists(self.PlatformBuildPath)):
self.InitBuild()
EdkLogger.info("")
os.chdir(self.WorkspaceDir)
## Load configuration
#
# This method will parse target.txt and get the build configurations.
#
def LoadConfiguration(self):
#
# Check target.txt and tools_def.txt and Init them
#
BuildConfigurationFile = os.path.normpath(os.path.join(GlobalData.gConfDirectory, gBuildConfiguration))
if os.path.isfile(BuildConfigurationFile) == True:
StatusCode = self.TargetTxt.LoadTargetTxtFile(BuildConfigurationFile)
ToolDefinitionFile = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TOOL_CHAIN_CONF]
if ToolDefinitionFile == '':
ToolDefinitionFile = gToolsDefinition
ToolDefinitionFile = os.path.normpath(mws.join(self.WorkspaceDir, 'Conf', ToolDefinitionFile))
if os.path.isfile(ToolDefinitionFile) == True:
StatusCode = self.ToolDef.LoadToolDefFile(ToolDefinitionFile)
else:
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=ToolDefinitionFile)
else:
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=BuildConfigurationFile)
# if no ARCH given in command line, get it from target.txt
if not self.ArchList:
self.ArchList = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TARGET_ARCH]
self.ArchList = tuple(self.ArchList)
# if no build target given in command line, get it from target.txt
if not self.BuildTargetList:
self.BuildTargetList = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TARGET]
# if no tool chain given in command line, get it from target.txt
if not self.ToolChainList:
self.ToolChainList = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TOOL_CHAIN_TAG]
if self.ToolChainList is None or len(self.ToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE, ExtraData="No toolchain given. Don't know how to build.\n")
# check if the tool chains are defined or not
NewToolChainList = []
for ToolChain in self.ToolChainList:
if ToolChain not in self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG]:
EdkLogger.warn("build", "Tool chain [%s] is not defined" % ToolChain)
else:
NewToolChainList.append(ToolChain)
# if no tool chain available, break the build
if len(NewToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="[%s] not defined. No toolchain available for build!\n" % ", ".join(self.ToolChainList))
else:
self.ToolChainList = NewToolChainList
ToolChainFamily = []
ToolDefinition = self.ToolDef.ToolsDefTxtDatabase
for Tool in self.ToolChainList:
if TAB_TOD_DEFINES_FAMILY not in ToolDefinition or Tool not in ToolDefinition[TAB_TOD_DEFINES_FAMILY] \
or not ToolDefinition[TAB_TOD_DEFINES_FAMILY][Tool]:
EdkLogger.warn("build", "No tool chain family found in configuration for %s. Default to MSFT." % Tool)
ToolChainFamily.append(TAB_COMPILER_MSFT)
else:
ToolChainFamily.append(ToolDefinition[TAB_TOD_DEFINES_FAMILY][Tool])
self.ToolChainFamily = ToolChainFamily
if self.ThreadNumber is None:
self.ThreadNumber = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER]
if self.ThreadNumber == '':
self.ThreadNumber = 0
else:
self.ThreadNumber = int(self.ThreadNumber, 0)
if self.ThreadNumber == 0:
try:
self.ThreadNumber = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
self.ThreadNumber = 1
if not self.PlatformFile:
PlatformFile = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_ACTIVE_PLATFORM]
if not PlatformFile:
# Try to find one in current directory
WorkingDirectory = os.getcwd()
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.dsc')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_MISSING,
ExtraData="There are %d DSC files in %s. Use '-p' to specify one.\n" % (FileNum, WorkingDirectory))
elif FileNum == 1:
PlatformFile = FileList[0]
else:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="No active platform specified in target.txt or command line! Nothing can be built.\n")
self.PlatformFile = PathClass(NormFile(PlatformFile, self.WorkspaceDir), self.WorkspaceDir)
## Initialize build configuration
#
# This method will parse DSC file and merge the configurations from
# command line and target.txt, then get the final build configurations.
#
def InitBuild(self):
# parse target.txt, tools_def.txt, and platform file
self.LoadConfiguration()
# Allow case-insensitive for those from command line or configuration file
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
def InitPreBuild(self):
self.LoadConfiguration()
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = self.BuildTargetList[0]
if self.ArchList:
GlobalData.gGlobalDefines['ARCH'] = self.ArchList[0]
if self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = self.ToolChainList[0]
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = self.ToolChainList[0]
if self.ToolChainFamily:
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[0]
if 'PREBUILD' in GlobalData.gCommandLineDefines:
self.Prebuild = GlobalData.gCommandLineDefines.get('PREBUILD')
else:
self.Db_Flag = True
Platform = self.Db.MapPlatform(str(self.PlatformFile))
self.Prebuild = str(Platform.Prebuild)
if self.Prebuild:
PrebuildList = []
#
# Evaluate all arguments and convert arguments that are WORKSPACE
# relative paths to absolute paths. Filter arguments that look like
# flags or do not follow the file/dir naming rules to avoid false
# positives on this conversion.
#
for Arg in self.Prebuild.split():
#
# Do not modify Arg if it looks like a flag or an absolute file path
#
if Arg.startswith('-') or os.path.isabs(Arg):
PrebuildList.append(Arg)
continue
#
# Do not modify Arg if it does not look like a Workspace relative
# path that starts with a valid package directory name
#
if not Arg[0].isalpha() or os.path.dirname(Arg) == '':
PrebuildList.append(Arg)
continue
#
# If Arg looks like a WORKSPACE relative path, then convert to an
# absolute path and check to see if the file exists.
#
Temp = mws.join(self.WorkspaceDir, Arg)
if os.path.isfile(Temp):
Arg = Temp
PrebuildList.append(Arg)
self.Prebuild = ' '.join(PrebuildList)
self.Prebuild += self.PassCommandOption(self.BuildTargetList, self.ArchList, self.ToolChainList, self.PlatformFile, self.Target)
def InitPostBuild(self):
if 'POSTBUILD' in GlobalData.gCommandLineDefines:
self.Postbuild = GlobalData.gCommandLineDefines.get('POSTBUILD')
else:
Platform = self.Db.MapPlatform(str(self.PlatformFile))
self.Postbuild = str(Platform.Postbuild)
if self.Postbuild:
PostbuildList = []
#
# Evaluate all arguments and convert arguments that are WORKSPACE
# relative paths to absolute paths. Filter arguments that look like
# flags or do not follow the file/dir naming rules to avoid false
# positives on this conversion.
#
for Arg in self.Postbuild.split():
#
# Do not modify Arg if it looks like a flag or an absolute file path
#
if Arg.startswith('-') or os.path.isabs(Arg):
PostbuildList.append(Arg)
continue
#
# Do not modify Arg if it does not look like a Workspace relative
# path that starts with a valid package directory name
#
if not Arg[0].isalpha() or os.path.dirname(Arg) == '':
PostbuildList.append(Arg)
continue
#
# If Arg looks like a WORKSPACE relative path, then convert to an
# absolute path and check to see if the file exists.
#
Temp = mws.join(self.WorkspaceDir, Arg)
if os.path.isfile(Temp):
Arg = Temp
PostbuildList.append(Arg)
self.Postbuild = ' '.join(PostbuildList)
self.Postbuild += self.PassCommandOption(self.BuildTargetList, self.ArchList, self.ToolChainList, self.PlatformFile, self.Target)
def PassCommandOption(self, BuildTarget, TargetArch, ToolChain, PlatformFile, Target):
BuildStr = ''
if GlobalData.gCommand and isinstance(GlobalData.gCommand, list):
BuildStr += ' ' + ' '.join(GlobalData.gCommand)
TargetFlag = False
ArchFlag = False
ToolChainFlag = False
PlatformFileFlag = False
if GlobalData.gOptions and not GlobalData.gOptions.BuildTarget:
TargetFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.TargetArch:
ArchFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.ToolChain:
ToolChainFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.PlatformFile:
PlatformFileFlag = True
if TargetFlag and BuildTarget:
if isinstance(BuildTarget, list) or isinstance(BuildTarget, tuple):
BuildStr += ' -b ' + ' -b '.join(BuildTarget)
elif isinstance(BuildTarget, str):
BuildStr += ' -b ' + BuildTarget
if ArchFlag and TargetArch:
if isinstance(TargetArch, list) or isinstance(TargetArch, tuple):
BuildStr += ' -a ' + ' -a '.join(TargetArch)
elif isinstance(TargetArch, str):
BuildStr += ' -a ' + TargetArch
if ToolChainFlag and ToolChain:
if isinstance(ToolChain, list) or isinstance(ToolChain, tuple):
BuildStr += ' -t ' + ' -t '.join(ToolChain)
elif isinstance(ToolChain, str):
BuildStr += ' -t ' + ToolChain
if PlatformFileFlag and PlatformFile:
if isinstance(PlatformFile, list) or isinstance(PlatformFile, tuple):
BuildStr += ' -p ' + ' -p '.join(PlatformFile)
elif isinstance(PlatformFile, str):
BuildStr += ' -p' + PlatformFile
BuildStr += ' --conf=' + GlobalData.gConfDirectory
if Target:
BuildStr += ' ' + Target
return BuildStr
def LaunchPrebuild(self):
if self.Prebuild:
EdkLogger.info("\n- Prebuild Start -\n")
self.LaunchPrebuildFlag = True
#
# The purpose of .PrebuildEnv file is capture environment variable settings set by the prebuild script
# and preserve them for the rest of the main build step, because the child process environment will
# evaporate as soon as it exits, we cannot get it in build step.
#
PrebuildEnvFile = os.path.join(GlobalData.gConfDirectory, '.cache', '.PrebuildEnv')
if os.path.isfile(PrebuildEnvFile):
os.remove(PrebuildEnvFile)
if os.path.isfile(self.PlatformBuildPath):
os.remove(self.PlatformBuildPath)
if sys.platform == "win32":
args = ' && '.join((self.Prebuild, 'set > ' + PrebuildEnvFile))
Process = Popen(args, stdout=PIPE, stderr=PIPE, shell=True)
else:
args = ' && '.join((self.Prebuild, 'env > ' + PrebuildEnvFile))
Process = Popen(args, stdout=PIPE, stderr=PIPE, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Process.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Process.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Process.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Process.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Process.wait()
if Process.stdout:
StdOutThread.join()
if Process.stderr:
StdErrThread.join()
if Process.returncode != 0 :
EdkLogger.error("Prebuild", PREBUILD_ERROR, 'Prebuild process is not success!')
if os.path.exists(PrebuildEnvFile):
f = open(PrebuildEnvFile)
envs = f.readlines()
f.close()
envs = itertools.imap(lambda l: l.split('=', 1), envs)
envs = itertools.ifilter(lambda l: len(l) == 2, envs)
envs = itertools.imap(lambda l: [i.strip() for i in l], envs)
os.environ.update(dict(envs))
EdkLogger.info("\n- Prebuild Done -\n")
def LaunchPostbuild(self):
if self.Postbuild:
EdkLogger.info("\n- Postbuild Start -\n")
if sys.platform == "win32":
Process = Popen(self.Postbuild, stdout=PIPE, stderr=PIPE, shell=True)
else:
Process = Popen(self.Postbuild, stdout=PIPE, stderr=PIPE, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Process.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Process.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Process.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Process.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Process.wait()
if Process.stdout:
StdOutThread.join()
if Process.stderr:
StdErrThread.join()
if Process.returncode != 0 :
EdkLogger.error("Postbuild", POSTBUILD_ERROR, 'Postbuild process is not success!')
EdkLogger.info("\n- Postbuild Done -\n")
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _BuildPa(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False, FfsCommand={}):
if AutoGenObject is None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile, FfsCommand)
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand is None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
makefile = GenMake.BuildFile(AutoGenObject)._FILE_NAME_[GenMake.gMakeType]
# run
if Target == 'run':
RunDir = os.path.normpath(os.path.join(AutoGenObject.BuildDir, GlobalData.gGlobalDefines['ARCH']))
Command = '.\SecMain'
os.chdir(RunDir)
LaunchCommand(Command, RunDir)
return True
# build modules
if BuildModule:
BuildCommand = BuildCommand + [Target]
LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# build library
if Target == 'libraries':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# build module
if Target == 'modules':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Mod in AutoGenObject.ModuleBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Mod, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# cleanlib
if Target == 'cleanlib':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, makefile))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# clean
if Target == 'clean':
for Mod in AutoGenObject.ModuleBuildDirectoryList:
ModMakefile = os.path.normpath(os.path.join(Mod, makefile))
if os.path.exists(ModMakefile):
NewBuildCommand = BuildCommand + ['-f', ModMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, makefile))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError as X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _Build(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False):
if AutoGenObject is None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile)
#AutoGenObject.CreateAsBuiltInf()
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand is None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
# build modules
if BuildModule:
if Target != 'fds':
BuildCommand = BuildCommand + [Target]
AutoGenObject.BuildTime = LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# genfds
if Target == 'fds':
if GenFdsApi(AutoGenObject.GenFdsCommandDict, self.Db):
EdkLogger.error("build", COMMAND_FAILURE)
return True
# run
if Target == 'run':
RunDir = os.path.normpath(os.path.join(AutoGenObject.BuildDir, GlobalData.gGlobalDefines['ARCH']))
Command = '.\SecMain'
os.chdir(RunDir)
LaunchCommand(Command, RunDir)
return True
# build library
if Target == 'libraries':
pass
# not build modules
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError as X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Rebase module image and Get function address for the input module list.
#
def _RebaseModule (self, MapBuffer, BaseAddress, ModuleList, AddrIsOffset = True, ModeIsSmm = False):
if ModeIsSmm:
AddrIsOffset = False
for InfFile in ModuleList:
sys.stdout.write (".")
sys.stdout.flush()
ModuleInfo = ModuleList[InfFile]
ModuleName = ModuleInfo.BaseName
ModuleOutputImage = ModuleInfo.Image.FileName
ModuleDebugImage = os.path.join(ModuleInfo.DebugDir, ModuleInfo.BaseName + '.efi')
## for SMM module in SMRAM, the SMRAM will be allocated from base to top.
if not ModeIsSmm:
BaseAddress = BaseAddress - ModuleInfo.Image.Size
#
# Update Image to new BaseAddress by GenFw tool
#
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
else:
#
# Set new address to the section header only for SMM driver.
#
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
#
# Collect funtion address from Map file
#
ImageMapTable = ModuleOutputImage.replace('.efi', '.map')
FunctionList = []
if os.path.exists(ImageMapTable):
OrigImageBaseAddress = 0
ImageMap = open(ImageMapTable, 'r')
for LinStr in ImageMap:
if len (LinStr.strip()) == 0:
continue
#
# Get the preferred address set on link time.
#
if LinStr.find ('Preferred load address is') != -1:
StrList = LinStr.split()
OrigImageBaseAddress = int (StrList[len(StrList) - 1], 16)
StrList = LinStr.split()
if len (StrList) > 4:
if StrList[3] == 'f' or StrList[3] == 'F':
Name = StrList[1]
RelativeAddress = int (StrList[2], 16) - OrigImageBaseAddress
FunctionList.append ((Name, RelativeAddress))
if ModuleInfo.Arch == 'IPF' and Name.endswith('_ModuleEntryPoint'):
#
# Get the real entry point address for IPF image.
#
ModuleInfo.Image.EntryPoint = RelativeAddress
ImageMap.close()
#
# Add general information.
#
if ModeIsSmm:
MapBuffer.write('\n\n%s (Fixed SMRAM Offset, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
elif AddrIsOffset:
MapBuffer.write('\n\n%s (Fixed Memory Offset, BaseAddress=-0x%010X, EntryPoint=-0x%010X)\n' % (ModuleName, 0 - BaseAddress, 0 - (BaseAddress + ModuleInfo.Image.EntryPoint)))
else:
MapBuffer.write('\n\n%s (Fixed Memory Address, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
#
# Add guid and general seciton section.
#
TextSectionAddress = 0
DataSectionAddress = 0
for SectionHeader in ModuleInfo.Image.SectionHeaderList:
if SectionHeader[0] == '.text':
TextSectionAddress = SectionHeader[1]
elif SectionHeader[0] in ['.data', '.sdata']:
DataSectionAddress = SectionHeader[1]
if AddrIsOffset:
MapBuffer.write('(GUID=%s, .textbaseaddress=-0x%010X, .databaseaddress=-0x%010X)\n' % (ModuleInfo.Guid, 0 - (BaseAddress + TextSectionAddress), 0 - (BaseAddress + DataSectionAddress)))
else:
MapBuffer.write('(GUID=%s, .textbaseaddress=0x%010X, .databaseaddress=0x%010X)\n' % (ModuleInfo.Guid, BaseAddress + TextSectionAddress, BaseAddress + DataSectionAddress))
#
# Add debug image full path.
#
MapBuffer.write('(IMAGE=%s)\n\n' % (ModuleDebugImage))
#
# Add funtion address
#
for Function in FunctionList:
if AddrIsOffset:
MapBuffer.write(' -0x%010X %s\n' % (0 - (BaseAddress + Function[1]), Function[0]))
else:
MapBuffer.write(' 0x%010X %s\n' % (BaseAddress + Function[1], Function[0]))
ImageMap.close()
#
# for SMM module in SMRAM, the SMRAM will be allocated from base to top.
#
if ModeIsSmm:
BaseAddress = BaseAddress + ModuleInfo.Image.Size
## Collect MAP information of all FVs
#
def _CollectFvMapBuffer (self, MapBuffer, Wa, ModuleList):
if self.Fdf:
# First get the XIP base address for FV map file.
GuidPattern = re.compile("[-a-fA-F0-9]+")
GuidName = re.compile("\(GUID=[-a-fA-F0-9]+")
for FvName in Wa.FdfProfile.FvDict:
FvMapBuffer = os.path.join(Wa.FvDir, FvName + '.Fv.map')
if not os.path.exists(FvMapBuffer):
continue
FvMap = open(FvMapBuffer, 'r')
#skip FV size information
FvMap.readline()
FvMap.readline()
FvMap.readline()
FvMap.readline()
for Line in FvMap:
MatchGuid = GuidPattern.match(Line)
if MatchGuid is not None:
#
# Replace GUID with module name
#
GuidString = MatchGuid.group()
if GuidString.upper() in ModuleList:
Line = Line.replace(GuidString, ModuleList[GuidString.upper()].Name)
MapBuffer.write(Line)
#
# Add the debug image full path.
#
MatchGuid = GuidName.match(Line)
if MatchGuid is not None:
GuidString = MatchGuid.group().split("=")[1]
if GuidString.upper() in ModuleList:
MapBuffer.write('(IMAGE=%s)\n' % (os.path.join(ModuleList[GuidString.upper()].DebugDir, ModuleList[GuidString.upper()].Name + '.efi')))
FvMap.close()
## Collect MAP information of all modules
#
def _CollectModuleMapBuffer (self, MapBuffer, ModuleList):
sys.stdout.write ("Generate Load Module At Fix Address Map")
sys.stdout.flush()
PatchEfiImageList = []
PeiModuleList = {}
BtModuleList = {}
RtModuleList = {}
SmmModuleList = {}
PeiSize = 0
BtSize = 0
RtSize = 0
# reserve 4K size in SMRAM to make SMM module address not from 0.
SmmSize = 0x1000
IsIpfPlatform = False
if 'IPF' in self.ArchList:
IsIpfPlatform = True
for ModuleGuid in ModuleList:
Module = ModuleList[ModuleGuid]
GlobalData.gProcessingFile = "%s [%s, %s, %s]" % (Module.MetaFile, Module.Arch, Module.ToolChain, Module.BuildTarget)
OutputImageFile = ''
for ResultFile in Module.CodaTargetList:
if str(ResultFile.Target).endswith('.efi'):
#
# module list for PEI, DXE, RUNTIME and SMM
#
OutputImageFile = os.path.join(Module.OutputDir, Module.Name + '.efi')
ImageClass = PeImageClass (OutputImageFile)
if not ImageClass.IsValid:
EdkLogger.error("build", FILE_PARSE_FAILURE, ExtraData=ImageClass.ErrorInfo)
ImageInfo = PeImageInfo(Module.Name, Module.Guid, Module.Arch, Module.OutputDir, Module.DebugDir, ImageClass)
if Module.ModuleType in [SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM, EDK_COMPONENT_TYPE_COMBINED_PEIM_DRIVER, EDK_COMPONENT_TYPE_PIC_PEIM, EDK_COMPONENT_TYPE_RELOCATABLE_PEIM, SUP_MODULE_DXE_CORE]:
PeiModuleList[Module.MetaFile] = ImageInfo
PeiSize += ImageInfo.Image.Size
elif Module.ModuleType in [EDK_COMPONENT_TYPE_BS_DRIVER, SUP_MODULE_DXE_DRIVER, SUP_MODULE_UEFI_DRIVER]:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
elif Module.ModuleType in [SUP_MODULE_DXE_RUNTIME_DRIVER, EDK_COMPONENT_TYPE_RT_DRIVER, SUP_MODULE_DXE_SAL_DRIVER, EDK_COMPONENT_TYPE_SAL_RT_DRIVER]:
RtModuleList[Module.MetaFile] = ImageInfo
#IPF runtime driver needs to be at 2 page alignment.
if IsIpfPlatform and ImageInfo.Image.Size % 0x2000 != 0:
ImageInfo.Image.Size = (ImageInfo.Image.Size / 0x2000 + 1) * 0x2000
RtSize += ImageInfo.Image.Size
elif Module.ModuleType in [SUP_MODULE_SMM_CORE, SUP_MODULE_DXE_SMM_DRIVER, SUP_MODULE_MM_STANDALONE, SUP_MODULE_MM_CORE_STANDALONE]:
SmmModuleList[Module.MetaFile] = ImageInfo
SmmSize += ImageInfo.Image.Size
if Module.ModuleType == SUP_MODULE_DXE_SMM_DRIVER:
PiSpecVersion = Module.Module.Specification.get('PI_SPECIFICATION_VERSION', '0x00000000')
# for PI specification < PI1.1, DXE_SMM_DRIVER also runs as BOOT time driver.
if int(PiSpecVersion, 16) < 0x0001000A:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
break
#
# EFI image is final target.
# Check EFI image contains patchable FixAddress related PCDs.
#
if OutputImageFile != '':
ModuleIsPatch = False
for Pcd in Module.ModulePcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SET:
ModuleIsPatch = True
break
if not ModuleIsPatch:
for Pcd in Module.LibraryPcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SET:
ModuleIsPatch = True
break
if not ModuleIsPatch:
continue
#
# Module includes the patchable load fix address PCDs.
# It will be fixed up later.
#
PatchEfiImageList.append (OutputImageFile)
#
# Get Top Memory address
#
ReservedRuntimeMemorySize = 0
TopMemoryAddress = 0
if self.LoadFixAddress == 0xFFFFFFFFFFFFFFFF:
TopMemoryAddress = 0
else:
TopMemoryAddress = self.LoadFixAddress
if TopMemoryAddress < RtSize + BtSize + PeiSize:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS is too low to load driver")
# Make IPF runtime driver at 2 page alignment.
if IsIpfPlatform:
ReservedRuntimeMemorySize = TopMemoryAddress % 0x2000
RtSize = RtSize + ReservedRuntimeMemorySize
#
# Patch FixAddress related PCDs into EFI image
#
for EfiImage in PatchEfiImageList:
EfiImageMap = EfiImage.replace('.efi', '.map')
if not os.path.exists(EfiImageMap):
continue
#
# Get PCD offset in EFI image by GenPatchPcdTable function
#
PcdTable = parsePcdInfoFromMapFile(EfiImageMap, EfiImage)
#
# Patch real PCD value by PatchPcdValue tool
#
for PcdInfo in PcdTable:
ReturnValue = 0
if PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE_DATA_TYPE, str (PeiSize / 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE_DATA_TYPE, str (BtSize / 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE_DATA_TYPE, str (RtSize / 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE and len (SmmModuleList) > 0:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE_DATA_TYPE, str (SmmSize / 0x1000))
if ReturnValue != 0:
EdkLogger.error("build", PARAMETER_INVALID, "Patch PCD value failed", ExtraData=ErrorInfo)
MapBuffer.write('PEI_CODE_PAGE_NUMBER = 0x%x\n' % (PeiSize / 0x1000))
MapBuffer.write('BOOT_CODE_PAGE_NUMBER = 0x%x\n' % (BtSize / 0x1000))
MapBuffer.write('RUNTIME_CODE_PAGE_NUMBER = 0x%x\n' % (RtSize / 0x1000))
if len (SmmModuleList) > 0:
MapBuffer.write('SMM_CODE_PAGE_NUMBER = 0x%x\n' % (SmmSize / 0x1000))
PeiBaseAddr = TopMemoryAddress - RtSize - BtSize
BtBaseAddr = TopMemoryAddress - RtSize
RtBaseAddr = TopMemoryAddress - ReservedRuntimeMemorySize
self._RebaseModule (MapBuffer, PeiBaseAddr, PeiModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, BtBaseAddr, BtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, RtBaseAddr, RtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, 0x1000, SmmModuleList, AddrIsOffset=False, ModeIsSmm=True)
MapBuffer.write('\n\n')
sys.stdout.write ("\n")
sys.stdout.flush()
## Save platform Map file
#
def _SaveMapFile (self, MapBuffer, Wa):
#
# Map file path is got.
#
MapFilePath = os.path.join(Wa.BuildDir, Wa.Name + '.map')
#
# Save address map into MAP file.
#
SaveFileOnChange(MapFilePath, MapBuffer.getvalue(), False)
MapBuffer.close()
if self.LoadFixAddress != 0:
sys.stdout.write ("\nLoad Module At Fix Address Map file can be found at %s\n" % (MapFilePath))
sys.stdout.flush()
## Build active platform for different build targets and different tool chains
#
def _BuildPlatform(self):
SaveFileOnChange(self.PlatformBuildPath, '# DO NOT EDIT \n# FILE auto-generated\n', False)
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
self.Progress.Stop("done!")
# Add ffs build to makefile
CmdListDict = {}
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd()
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma is None:
continue
self.BuildModules.append(Ma)
self._BuildPa(self.Target, Pa, FfsCommand=CmdListDict)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platform with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = BytesIO('')
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# create FDS again for the updated EFI image
#
self._Build("fds", Wa)
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
## Build active module for different build targets, different tool chains and different archs
#
def _BuildModule(self):
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
WorkspaceAutoGenTime = time.time()
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
#
# module build needs platform build information, so get platform
# AutoGen first
#
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress,
self.ModuleFile
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
Wa.CreateMakeFile(False)
# Add ffs build to makefile
CmdListDict = None
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd()
self.Progress.Stop("done!")
MaList = []
ExitFlag = threading.Event()
ExitFlag.clear()
self.AutoGenTime += int(round((time.time() - WorkspaceAutoGenTime)))
for Arch in Wa.ArchList:
AutoGenStart = time.time()
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
if self.ModuleFile.Dir == Module.Dir and self.ModuleFile.Name == Module.Name:
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma is None: continue
MaList.append(Ma)
if Ma.CanSkipbyHash():
self.HashSkipModules.append(Ma)
continue
# Not to auto-gen for targets 'clean', 'cleanlib', 'cleanall', 'run', 'fds'
if self.Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or self.Target == 'genc':
self.Progress.Start("Generating code")
Ma.CreateCodeFile(True)
self.Progress.Stop("done!")
if self.Target == "genc":
return True
if not self.SkipAutoGen or self.Target == 'genmake':
self.Progress.Start("Generating makefile")
if CmdListDict and self.Fdf and (Module.File, Arch) in CmdListDict:
Ma.CreateMakeFile(True, CmdListDict[Module.File, Arch])
del CmdListDict[Module.File, Arch]
else:
Ma.CreateMakeFile(True)
self.Progress.Stop("done!")
if self.Target == "genmake":
return True
self.BuildModules.append(Ma)
self.AutoGenTime += int(round((time.time() - AutoGenStart)))
MakeStart = time.time()
for Ma in self.BuildModules:
if not Ma.IsBinaryModule:
Bt = BuildTask.New(ModuleMakeUnit(Ma, self.Target))
# Break build if any build thread has error
if BuildTask.HasError():
# we need a full version of makefile for platform
ExitFlag.set()
BuildTask.WaitForComplete()
Pa.CreateMakeFile(False)
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Start task scheduler
if not BuildTask.IsOnGoing():
BuildTask.StartScheduler(self.ThreadNumber, ExitFlag)
# in case there's an interruption. we need a full version of makefile for platform
Pa.CreateMakeFile(False)
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.MakeTime += int(round((time.time() - MakeStart)))
MakeContiue = time.time()
ExitFlag.set()
BuildTask.WaitForComplete()
self.CreateAsBuiltInf()
self.MakeTime += int(round((time.time() - MakeContiue)))
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.BuildReport.AddPlatformReport(Wa, MaList)
if MaList == []:
EdkLogger.error(
'build',
BUILD_ERROR,
"Module for [%s] is not a component of active platform."\
" Please make sure that the ARCH and inf file path are"\
" given in the same as in [%s]" % \
(', '.join(Wa.ArchList), self.PlatformFile),
ExtraData=self.ModuleFile
)
# Create MAP file when Load Fix Address is enabled.
if self.Target == "fds" and self.Fdf:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = BytesIO('')
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
#
# create FDS again for the updated EFI image
#
GenFdsStart = time.time()
self._Build("fds", Wa)
self.GenFdsTime += int(round((time.time() - GenFdsStart)))
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
def _GenFfsCmd(self):
# convert dictionary of Cmd:(Inf,Arch)
# to a new dictionary of (Inf,Arch):Cmd,Cmd,Cmd...
CmdSetDict = defaultdict(set)
GenFfsDict = GenFds.GenFfsMakefile('', GlobalData.gFdfParser, self, self.ArchList, GlobalData)
for Cmd in GenFfsDict:
tmpInf, tmpArch = GenFfsDict[Cmd]
CmdSetDict[tmpInf, tmpArch].add(Cmd)
return CmdSetDict
## Build a platform in multi-thread mode
#
def _MultiThreadBuildPlatform(self):
SaveFileOnChange(self.PlatformBuildPath, '# DO NOT EDIT \n# FILE auto-generated\n', False)
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
WorkspaceAutoGenTime = time.time()
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
Wa.CreateMakeFile(False)
# Add ffs build to makefile
CmdListDict = None
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd()
# multi-thread exit flag
ExitFlag = threading.Event()
ExitFlag.clear()
self.AutoGenTime += int(round((time.time() - WorkspaceAutoGenTime)))
for Arch in Wa.ArchList:
AutoGenStart = time.time()
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
if Pa is None:
continue
ModuleList = []
for Inf in Pa.Platform.Modules:
ModuleList.append(Inf)
# Add the INF only list in FDF
if GlobalData.gFdfParser is not None:
for InfName in GlobalData.gFdfParser.Profile.InfList:
Inf = PathClass(NormPath(InfName), self.WorkspaceDir, Arch)
if Inf in Pa.Platform.Modules:
continue
ModuleList.append(Inf)
for Module in ModuleList:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma is None:
continue
if Ma.CanSkipbyHash():
self.HashSkipModules.append(Ma)
continue
# Not to auto-gen for targets 'clean', 'cleanlib', 'cleanall', 'run', 'fds'
if self.Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or self.Target == 'genc':
Ma.CreateCodeFile(True)
if self.Target == "genc":
continue
if not self.SkipAutoGen or self.Target == 'genmake':
if CmdListDict and self.Fdf and (Module.File, Arch) in CmdListDict:
Ma.CreateMakeFile(True, CmdListDict[Module.File, Arch])
del CmdListDict[Module.File, Arch]
else:
Ma.CreateMakeFile(True)
if self.Target == "genmake":
continue
self.BuildModules.append(Ma)
self.Progress.Stop("done!")
self.AutoGenTime += int(round((time.time() - AutoGenStart)))
MakeStart = time.time()
for Ma in self.BuildModules:
# Generate build task for the module
if not Ma.IsBinaryModule:
Bt = BuildTask.New(ModuleMakeUnit(Ma, self.Target))
# Break build if any build thread has error
if BuildTask.HasError():
# we need a full version of makefile for platform
ExitFlag.set()
BuildTask.WaitForComplete()
Pa.CreateMakeFile(False)
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Start task scheduler
if not BuildTask.IsOnGoing():
BuildTask.StartScheduler(self.ThreadNumber, ExitFlag)
# in case there's an interruption. we need a full version of makefile for platform
Pa.CreateMakeFile(False)
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.MakeTime += int(round((time.time() - MakeStart)))
MakeContiue = time.time()
#
#
# All modules have been put in build tasks queue. Tell task scheduler
# to exit if all tasks are completed
#
ExitFlag.set()
BuildTask.WaitForComplete()
self.CreateAsBuiltInf()
self.MakeTime += int(round((time.time() - MakeContiue)))
#
# Check for build error, and raise exception if one
# has been signaled.
#
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
#
# Rebase module to the preferred memory address before GenFds
#
MapBuffer = BytesIO('')
if self.LoadFixAddress != 0:
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# Generate FD image if there's a FDF file found
#
GenFdsStart = time.time()
if GenFdsApi(Wa.GenFdsCommandDict, self.Db):
EdkLogger.error("build", COMMAND_FAILURE)
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
self.GenFdsTime += int(round((time.time() - GenFdsStart)))
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile(MapBuffer, Wa)
## Generate GuidedSectionTools.txt in the FV directories.
#
def CreateGuidedSectionToolsFile(self):
for BuildTarget in self.BuildTargetList:
for ToolChain in self.ToolChainList:
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag
)
FvDir = Wa.FvDir
if not os.path.exists(FvDir):
continue
for Arch in self.ArchList:
# Build up the list of supported architectures for this build
prefix = '%s_%s_%s_' % (BuildTarget, ToolChain, Arch)
# Look through the tool definitions for GUIDed tools
guidAttribs = []
for (attrib, value) in self.ToolDef.ToolsDefTxtDictionary.iteritems():
if attrib.upper().endswith('_GUID'):
split = attrib.split('_')
thisPrefix = '_'.join(split[0:3]) + '_'
if thisPrefix == prefix:
guid = self.ToolDef.ToolsDefTxtDictionary[attrib]
guid = guid.lower()
toolName = split[3]
path = '_'.join(split[0:4]) + '_PATH'
path = self.ToolDef.ToolsDefTxtDictionary[path]
path = self.GetFullPathOfTool(path)
guidAttribs.append((guid, toolName, path))
# Write out GuidedSecTools.txt
toolsFile = os.path.join(FvDir, 'GuidedSectionTools.txt')
toolsFile = open(toolsFile, 'wt')
for guidedSectionTool in guidAttribs:
print(' '.join(guidedSectionTool), file=toolsFile)
toolsFile.close()
## Returns the full path of the tool.
#
def GetFullPathOfTool (self, tool):
if os.path.exists(tool):
return os.path.realpath(tool)
else:
# We need to search for the tool using the
# PATH environment variable.
for dirInPath in os.environ['PATH'].split(os.pathsep):
foundPath = os.path.join(dirInPath, tool)
if os.path.exists(foundPath):
return os.path.realpath(foundPath)
# If the tool was not found in the path then we just return
# the input tool.
return tool
## Launch the module or platform build
#
def Launch(self):
if not self.ModuleFile:
if not self.SpawnMode or self.Target not in ["", "all"]:
self.SpawnMode = False
self._BuildPlatform()
else:
self._MultiThreadBuildPlatform()
self.CreateGuidedSectionToolsFile()
else:
self.SpawnMode = False
self._BuildModule()
if self.Target == 'cleanall':
RemoveDirectory(os.path.dirname(GlobalData.gDatabasePath), True)
def CreateAsBuiltInf(self):
for Module in self.BuildModules:
Module.CreateAsBuiltInf()
for Module in self.HashSkipModules:
Module.CreateAsBuiltInf(True)
self.BuildModules = []
self.HashSkipModules = []
## Do some clean-up works when error occurred
def Relinquish(self):
OldLogLevel = EdkLogger.GetLevel()
EdkLogger.SetLevel(EdkLogger.ERROR)
#self.DumpBuildData()
Utils.Progressor.Abort()
if self.SpawnMode == True:
BuildTask.Abort()
EdkLogger.SetLevel(OldLogLevel)
def DumpBuildData(self):
CacheDirectory = os.path.dirname(GlobalData.gDatabasePath)
Utils.CreateDirectory(CacheDirectory)
Utils.DataDump(Utils.gFileTimeStampCache, os.path.join(CacheDirectory, "gFileTimeStampCache"))
Utils.DataDump(Utils.gDependencyDatabase, os.path.join(CacheDirectory, "gDependencyDatabase"))
def RestoreBuildData(self):
FilePath = os.path.join(os.path.dirname(GlobalData.gDatabasePath), "gFileTimeStampCache")
if Utils.gFileTimeStampCache == {} and os.path.isfile(FilePath):
Utils.gFileTimeStampCache = Utils.DataRestore(FilePath)
if Utils.gFileTimeStampCache is None:
Utils.gFileTimeStampCache = {}
FilePath = os.path.join(os.path.dirname(GlobalData.gDatabasePath), "gDependencyDatabase")
if Utils.gDependencyDatabase == {} and os.path.isfile(FilePath):
Utils.gDependencyDatabase = Utils.DataRestore(FilePath)
if Utils.gDependencyDatabase is None:
Utils.gDependencyDatabase = {}
def ParseDefines(DefineList=[]):
DefineDict = {}
if DefineList is not None:
for Define in DefineList:
DefineTokenList = Define.split("=", 1)
if not GlobalData.gMacroNamePattern.match(DefineTokenList[0]):
EdkLogger.error('build', FORMAT_INVALID,
"The macro name must be in the pattern [A-Z][A-Z0-9_]*",
ExtraData=DefineTokenList[0])
if len(DefineTokenList) == 1:
DefineDict[DefineTokenList[0]] = "TRUE"
else:
DefineDict[DefineTokenList[0]] = DefineTokenList[1].strip()
return DefineDict
gParamCheck = []
def SingleCheckCallback(option, opt_str, value, parser):
if option not in gParamCheck:
setattr(parser.values, option.dest, value)
gParamCheck.append(option)
else:
parser.error("Option %s only allows one instance in command line!" % option)
def LogBuildTime(Time):
if Time:
TimeDurStr = ''
TimeDur = time.gmtime(Time)
if TimeDur.tm_yday > 1:
TimeDurStr = time.strftime("%H:%M:%S", TimeDur) + ", %d day(s)" % (TimeDur.tm_yday - 1)
else:
TimeDurStr = time.strftime("%H:%M:%S", TimeDur)
return TimeDurStr
else:
return None
## Parse command line options
#
# Using standard Python module optparse to parse command line option of this tool.
#
# @retval Opt A optparse.Values object containing the parsed options
# @retval Args Target of build command
#
def MyOptionParser():
Parser = OptionParser(description=__copyright__, version=__version__, prog="build.exe", usage="%prog [options] [all|fds|genc|genmake|clean|cleanall|cleanlib|modules|libraries|run]")
Parser.add_option("-a", "--arch", action="append", type="choice", choices=['IA32', 'X64', 'IPF', 'EBC', 'ARM', 'AARCH64'], dest="TargetArch",
help="ARCHS is one of list: IA32, X64, IPF, ARM, AARCH64 or EBC, which overrides target.txt's TARGET_ARCH definition. To specify more archs, please repeat this option.")
Parser.add_option("-p", "--platform", action="callback", type="string", dest="PlatformFile", callback=SingleCheckCallback,
help="Build the platform specified by the DSC file name argument, overriding target.txt's ACTIVE_PLATFORM definition.")
Parser.add_option("-m", "--module", action="callback", type="string", dest="ModuleFile", callback=SingleCheckCallback,
help="Build the module specified by the INF file name argument.")
Parser.add_option("-b", "--buildtarget", type="string", dest="BuildTarget", help="Using the TARGET to build the platform, overriding target.txt's TARGET definition.",
action="append")
Parser.add_option("-t", "--tagname", action="append", type="string", dest="ToolChain",
help="Using the Tool Chain Tagname to build the platform, overriding target.txt's TOOL_CHAIN_TAG definition.")
Parser.add_option("-x", "--sku-id", action="callback", type="string", dest="SkuId", callback=SingleCheckCallback,
help="Using this name of SKU ID to build the platform, overriding SKUID_IDENTIFIER in DSC file.")
Parser.add_option("-n", action="callback", type="int", dest="ThreadNumber", callback=SingleCheckCallback,
help="Build the platform using multi-threaded compiler. The value overrides target.txt's MAX_CONCURRENT_THREAD_NUMBER. When value is set to 0, tool automatically detect number of "\
"processor threads, set value to 1 means disable multi-thread build, and set value to more than 1 means user specify the threads number to build.")
Parser.add_option("-f", "--fdf", action="callback", type="string", dest="FdfFile", callback=SingleCheckCallback,
help="The name of the FDF file to use, which overrides the setting in the DSC file.")
Parser.add_option("-r", "--rom-image", action="append", type="string", dest="RomImage", default=[],
help="The name of FD to be generated. The name must be from [FD] section in FDF file.")
Parser.add_option("-i", "--fv-image", action="append", type="string", dest="FvImage", default=[],
help="The name of FV to be generated. The name must be from [FV] section in FDF file.")
Parser.add_option("-C", "--capsule-image", action="append", type="string", dest="CapName", default=[],
help="The name of Capsule to be generated. The name must be from [Capsule] section in FDF file.")
Parser.add_option("-u", "--skip-autogen", action="store_true", dest="SkipAutoGen", help="Skip AutoGen step.")
Parser.add_option("-e", "--re-parse", action="store_true", dest="Reparse", help="Re-parse all meta-data files.")
Parser.add_option("-c", "--case-insensitive", action="store_true", dest="CaseInsensitive", default=False, help="Don't check case of file name.")
Parser.add_option("-w", "--warning-as-error", action="store_true", dest="WarningAsError", help="Treat warning in tools as error.")
Parser.add_option("-j", "--log", action="store", dest="LogFile", help="Put log in specified file as well as on console.")
Parser.add_option("-s", "--silent", action="store_true", type=None, dest="SilentMode",
help="Make use of silent mode of (n)make.")
Parser.add_option("-q", "--quiet", action="store_true", type=None, help="Disable all messages except FATAL ERRORS.")
Parser.add_option("-v", "--verbose", action="store_true", type=None, help="Turn on verbose output with informational messages printed, "\
"including library instances selected, final dependency expression, "\
"and warning messages, etc.")
Parser.add_option("-d", "--debug", action="store", type="int", help="Enable debug messages at specified level.")
Parser.add_option("-D", "--define", action="append", type="string", dest="Macros", help="Macro: \"Name [= Value]\".")
Parser.add_option("-y", "--report-file", action="store", dest="ReportFile", help="Create/overwrite the report to the specified filename.")
Parser.add_option("-Y", "--report-type", action="append", type="choice", choices=['PCD', 'LIBRARY', 'FLASH', 'DEPEX', 'BUILD_FLAGS', 'FIXED_ADDRESS', 'HASH', 'EXECUTION_ORDER'], dest="ReportType", default=[],
help="Flags that control the type of build report to generate. Must be one of: [PCD, LIBRARY, FLASH, DEPEX, BUILD_FLAGS, FIXED_ADDRESS, HASH, EXECUTION_ORDER]. "\
"To specify more than one flag, repeat this option on the command line and the default flag set is [PCD, LIBRARY, FLASH, DEPEX, HASH, BUILD_FLAGS, FIXED_ADDRESS]")
Parser.add_option("-F", "--flag", action="store", type="string", dest="Flag",
help="Specify the specific option to parse EDK UNI file. Must be one of: [-c, -s]. -c is for EDK framework UNI file, and -s is for EDK UEFI UNI file. "\
"This option can also be specified by setting *_*_*_BUILD_FLAGS in [BuildOptions] section of platform DSC. If they are both specified, this value "\
"will override the setting in [BuildOptions] section of platform DSC.")
Parser.add_option("-N", "--no-cache", action="store_true", dest="DisableCache", default=False, help="Disable build cache mechanism")
Parser.add_option("--conf", action="store", type="string", dest="ConfDirectory", help="Specify the customized Conf directory.")
Parser.add_option("--check-usage", action="store_true", dest="CheckUsage", default=False, help="Check usage content of entries listed in INF file.")
Parser.add_option("--ignore-sources", action="store_true", dest="IgnoreSources", default=False, help="Focus to a binary build and ignore all source files")
Parser.add_option("--pcd", action="append", dest="OptionPcd", help="Set PCD value by command line. Format: \"PcdName=Value\" ")
Parser.add_option("-l", "--cmd-len", action="store", type="int", dest="CommandLength", help="Specify the maximum line length of build command. Default is 4096.")
Parser.add_option("--hash", action="store_true", dest="UseHashCache", default=False, help="Enable hash-based caching during build process.")
Parser.add_option("--binary-destination", action="store", type="string", dest="BinCacheDest", help="Generate a cache of binary files in the specified directory.")
Parser.add_option("--binary-source", action="store", type="string", dest="BinCacheSource", help="Consume a cache of binary files from the specified directory.")
Parser.add_option("--genfds-multi-thread", action="store_true", dest="GenfdsMultiThread", default=False, help="Enable GenFds multi thread to generate ffs file.")
(Opt, Args) = Parser.parse_args()
return (Opt, Args)
## Tool entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
# @retval 0 Tool was successful
# @retval 1 Tool failed
#
def Main():
StartTime = time.time()
# Initialize log system
EdkLogger.Initialize()
GlobalData.gCommand = sys.argv[1:]
#
# Parse the options and args
#
(Option, Target) = MyOptionParser()
GlobalData.gOptions = Option
GlobalData.gCaseInsensitive = Option.CaseInsensitive
# Set log level
if Option.verbose is not None:
EdkLogger.SetLevel(EdkLogger.VERBOSE)
elif Option.quiet is not None:
EdkLogger.SetLevel(EdkLogger.QUIET)
elif Option.debug is not None:
EdkLogger.SetLevel(Option.debug + 1)
else:
EdkLogger.SetLevel(EdkLogger.INFO)
if Option.LogFile is not None:
EdkLogger.SetLogFile(Option.LogFile)
if Option.WarningAsError == True:
EdkLogger.SetWarningAsError()
if platform.platform().find("Windows") >= 0:
GlobalData.gIsWindows = True
else:
GlobalData.gIsWindows = False
EdkLogger.quiet("Build environment: %s" % platform.platform())
EdkLogger.quiet(time.strftime("Build start time: %H:%M:%S, %b.%d %Y\n", time.localtime()));
ReturnCode = 0
MyBuild = None
BuildError = True
try:
if len(Target) == 0:
Target = "all"
elif len(Target) >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "More than one targets are not supported.",
ExtraData="Please select one of: %s" % (' '.join(gSupportedTarget)))
else:
Target = Target[0].lower()
if Target not in gSupportedTarget:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "Not supported target [%s]." % Target,
ExtraData="Please select one of: %s" % (' '.join(gSupportedTarget)))
#
# Check environment variable: EDK_TOOLS_PATH, WORKSPACE, PATH
#
CheckEnvVariable()
GlobalData.gCommandLineDefines.update(ParseDefines(Option.Macros))
Workspace = os.getenv("WORKSPACE")
#
# Get files real name in workspace dir
#
GlobalData.gAllFiles = Utils.DirCache(Workspace)
WorkingDirectory = os.getcwd()
if not Option.ModuleFile:
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.inf')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "There are %d INF files in %s." % (FileNum, WorkingDirectory),
ExtraData="Please use '-m <INF_FILE_PATH>' switch to choose one.")
elif FileNum == 1:
Option.ModuleFile = NormFile(FileList[0], Workspace)
if Option.ModuleFile:
if os.path.isabs (Option.ModuleFile):
if os.path.normcase (os.path.normpath(Option.ModuleFile)).find (Workspace) == 0:
Option.ModuleFile = NormFile(os.path.normpath(Option.ModuleFile), Workspace)
Option.ModuleFile = PathClass(Option.ModuleFile, Workspace)
ErrorCode, ErrorInfo = Option.ModuleFile.Validate(".inf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.PlatformFile is not None:
if os.path.isabs (Option.PlatformFile):
if os.path.normcase (os.path.normpath(Option.PlatformFile)).find (Workspace) == 0:
Option.PlatformFile = NormFile(os.path.normpath(Option.PlatformFile), Workspace)
Option.PlatformFile = PathClass(Option.PlatformFile, Workspace)
if Option.FdfFile is not None:
if os.path.isabs (Option.FdfFile):
if os.path.normcase (os.path.normpath(Option.FdfFile)).find (Workspace) == 0:
Option.FdfFile = NormFile(os.path.normpath(Option.FdfFile), Workspace)
Option.FdfFile = PathClass(Option.FdfFile, Workspace)
ErrorCode, ErrorInfo = Option.FdfFile.Validate(".fdf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.Flag is not None and Option.Flag not in ['-c', '-s']:
EdkLogger.error("build", OPTION_VALUE_INVALID, "UNI flag must be one of -c or -s")
MyBuild = Build(Target, Workspace, Option)
GlobalData.gCommandLineDefines['ARCH'] = ' '.join(MyBuild.ArchList)
if not (MyBuild.LaunchPrebuildFlag and os.path.exists(MyBuild.PlatformBuildPath)):
MyBuild.Launch()
#MyBuild.DumpBuildData()
#
# All job done, no error found and no exception raised
#
BuildError = False
except FatalError as X:
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = X.args[0]
except Warning as X:
# error from Fdf parser
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
else:
EdkLogger.error(X.ToolName, FORMAT_INVALID, File=X.FileName, Line=X.LineNumber, ExtraData=X.Message, RaiseError=False)
ReturnCode = FORMAT_INVALID
except KeyboardInterrupt:
ReturnCode = ABORT_ERROR
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
except:
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
# try to get the meta-file from the object causing exception
Tb = sys.exc_info()[-1]
MetaFile = GlobalData.gProcessingFile
while Tb is not None:
if 'self' in Tb.tb_frame.f_locals and hasattr(Tb.tb_frame.f_locals['self'], 'MetaFile'):
MetaFile = Tb.tb_frame.f_locals['self'].MetaFile
Tb = Tb.tb_next
EdkLogger.error(
"\nbuild",
CODE_ERROR,
"Unknown fatal error when processing [%s]" % MetaFile,
ExtraData="\n(Please send email to edk2-devel@lists.01.org for help, attaching following call stack trace!)\n",
RaiseError=False
)
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = CODE_ERROR
finally:
Utils.Progressor.Abort()
Utils.ClearDuplicatedInf()
if ReturnCode == 0:
try:
MyBuild.LaunchPostbuild()
Conclusion = "Done"
except:
Conclusion = "Failed"
elif ReturnCode == ABORT_ERROR:
Conclusion = "Aborted"
else:
Conclusion = "Failed"
FinishTime = time.time()
BuildDuration = time.gmtime(int(round(FinishTime - StartTime)))
BuildDurationStr = ""
if BuildDuration.tm_yday > 1:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration) + ", %d day(s)" % (BuildDuration.tm_yday - 1)
else:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration)
if MyBuild is not None:
if not BuildError:
MyBuild.BuildReport.GenerateReport(BuildDurationStr, LogBuildTime(MyBuild.AutoGenTime), LogBuildTime(MyBuild.MakeTime), LogBuildTime(MyBuild.GenFdsTime))
EdkLogger.SetLevel(EdkLogger.QUIET)
EdkLogger.quiet("\n- %s -" % Conclusion)
EdkLogger.quiet(time.strftime("Build end time: %H:%M:%S, %b.%d %Y", time.localtime()))
EdkLogger.quiet("Build total time: %s\n" % BuildDurationStr)
return ReturnCode
if __name__ == '__main__':
r = Main()
## 0-127 is a safe return range, and 1 is a standard default error
if r < 0 or r > 127: r = 1
sys.exit(r)
|
process.py
|
import os
import multiprocessing as mp
import resource
import time
from ctypes import *
__all__ = ['Manager']
class Manager:
def __init__(self, tab, fun):
self.name = self
self.tab = tab
self.fun = fun
self.n = mp.cpu_count()//2
def Mes(self,q):
start=time.time()
temp=True
max_usage = 0
while temp:
if not q.empty():
temp=False
max_usage = max(
max_usage,
resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
)
time.sleep(0.1)
end=time.time()
print("Elapsed time: {}, memory usage: {} kb".format(end-start,max_usage))
def __call__(self,measure=False):
def pom(tab, v, fun):
wyn = 0
for a in tab:
wyn += fun(a)
with v.get_lock():
v.value += wyn
v = mp.Value(c_longlong, 0)
processes = []
for c in range(0, self.n):
temp = [self.tab[i] for i in range(c, len(self.tab), self.n)]
p = mp.Process(target=pom, args=(temp, v, self.fun))
processes.append(p)
if measure:
q=mp.Queue()
monitor=mp.Process(target=self.Mes ,args=(q,))
monitor.start()
for p in processes:
p.start()
for p in processes:
p.join()
if measure:
q.put(False)
monitor.join()
return v.value
|
watcher.py
|
import redis
import time
import json
import threading
import logging
from pprint import pprint
class PlayQueueWatcher:
def __init__(self, host, port, password=None, debug=False, wait=5):
"""
:param host:
:param port:
:param password:
:param debug:
:param wait:
"""
if debug:
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
self._stop_event = threading.Event()
self._worker = None
self._conn = self._connect(host, port, password)
self._wait = wait
def get_redis_conn(self):
return self._conn
def _connect(self, host, port, password):
"""
Returns redis connection object
"""
pool = redis.ConnectionPool(host=host, port=port, db=0, password=password)
r = redis.StrictRedis(connection_pool=pool)
r.client_list() # Health check
logging.debug("Redis connection established.")
return r
def start(self, callback, with_thread=False):
"""
Start worker
:param callback: callback function (having arguments "data")
:param with_thread: start worker with thread or not
:return:
"""
if with_thread:
logging.debug("Worker status: thread mode")
self._worker = threading.Thread(target=self._start_worker_with_thread, args=(callback,))
self._worker.start()
else:
logging.debug("Worker status: normal mode")
self._start_worker(callback)
def stop(self):
if self._worker:
self._stop_event.set() # set stop flag
self._worker.join() # wait until thread stopping
def _start_worker(self, callback):
"""
Start worker
:param callback: callback function
:return:
"""
while True:
for data in self._dequeue():
if data:
logging.debug("Received data.")
logging.debug(data)
callback(data) # call callback function
time.sleep(int(data["song"]["time"]) + self._wait) # Wait for time saved in queue
else:
logging.debug("No data in queue.")
def _start_worker_with_thread(self, callback):
"""
Start worker
:param callback: callback function
:return:
"""
while not self._stop_event.is_set():
for data in self._dequeue():
if data:
logging.debug("Received data.")
callback(data) # call callback function
time.sleep(int(data["song"]["time"]) + self._wait) # Wait for time saved in queue
else:
logging.debug("No data in queue.")
def _dequeue(self) -> dict:
"""
Dequeue from queue in redis after waiting for a while.
"""
while True:
keys = self._conn.keys()
keys.sort()
if len(keys) == 0:
yield None
continue
key = keys[0] # pick up early timestamp data from queue
data = self._conn.get(key)
logging.debug("Next key: {0}".format(key))
logging.debug("Data: {0}".format(data))
logging.debug("Remaining: {0}".format(keys[1::]))
data = json.loads(data)
yield data
logging.debug("Delete key: {0}".format(key))
self._conn.delete(key)
def __del__(self):
self.stop()
|
monitor.py
|
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import datetime
import json
import os
import platform
import time
import threading
import uuid
import azurelinuxagent.common.conf as conf
import azurelinuxagent.common.utils.fileutil as fileutil
import azurelinuxagent.common.logger as logger
from azurelinuxagent.common.event import add_event, WALAEventOperation
from azurelinuxagent.common.exception import EventError, ProtocolError, OSUtilError
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.osutil import get_osutil
from azurelinuxagent.common.protocol import get_protocol_util
from azurelinuxagent.common.protocol.restapi import TelemetryEventParam, \
TelemetryEventList, \
TelemetryEvent, \
set_properties
from azurelinuxagent.common.utils.restutil import IOErrorCounter
from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, getattrib
from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \
DISTRO_CODE_NAME, AGENT_LONG_VERSION, \
AGENT_NAME, CURRENT_AGENT, CURRENT_VERSION
def parse_event(data_str):
try:
return parse_json_event(data_str)
except ValueError:
return parse_xml_event(data_str)
def parse_xml_param(param_node):
name = getattrib(param_node, "Name")
value_str = getattrib(param_node, "Value")
attr_type = getattrib(param_node, "T")
value = value_str
if attr_type == 'mt:uint64':
value = int(value_str)
elif attr_type == 'mt:bool':
value = bool(value_str)
elif attr_type == 'mt:float64':
value = float(value_str)
return TelemetryEventParam(name, value)
def parse_xml_event(data_str):
try:
xml_doc = parse_doc(data_str)
event_id = getattrib(find(xml_doc, "Event"), 'id')
provider_id = getattrib(find(xml_doc, "Provider"), 'id')
event = TelemetryEvent(event_id, provider_id)
param_nodes = findall(xml_doc, 'Param')
for param_node in param_nodes:
event.parameters.append(parse_xml_param(param_node))
return event
except Exception as e:
raise ValueError(ustr(e))
def parse_json_event(data_str):
data = json.loads(data_str)
event = TelemetryEvent()
set_properties("TelemetryEvent", event, data)
return event
def get_monitor_handler():
return MonitorHandler()
class MonitorHandler(object):
def __init__(self):
self.osutil = get_osutil()
self.protocol_util = get_protocol_util()
self.sysinfo = []
self.event_thread = None
def run(self):
self.init_sysinfo()
self.start()
def is_alive(self):
return self.event_thread.is_alive()
def start(self):
self.event_thread = threading.Thread(target=self.daemon)
self.event_thread.setDaemon(True)
self.event_thread.start()
def init_sysinfo(self):
osversion = "{0}:{1}-{2}-{3}:{4}".format(platform.system(),
DISTRO_NAME,
DISTRO_VERSION,
DISTRO_CODE_NAME,
platform.release())
self.sysinfo.append(TelemetryEventParam("OSVersion", osversion))
self.sysinfo.append(
TelemetryEventParam("GAVersion", CURRENT_AGENT))
try:
ram = self.osutil.get_total_mem()
processors = self.osutil.get_processor_cores()
self.sysinfo.append(TelemetryEventParam("RAM", ram))
self.sysinfo.append(TelemetryEventParam("Processors", processors))
except OSUtilError as e:
logger.warn("Failed to get system info: {0}", e)
try:
protocol = self.protocol_util.get_protocol()
vminfo = protocol.get_vminfo()
self.sysinfo.append(TelemetryEventParam("VMName",
vminfo.vmName))
self.sysinfo.append(TelemetryEventParam("TenantName",
vminfo.tenantName))
self.sysinfo.append(TelemetryEventParam("RoleName",
vminfo.roleName))
self.sysinfo.append(TelemetryEventParam("RoleInstanceName",
vminfo.roleInstanceName))
self.sysinfo.append(TelemetryEventParam("ContainerId",
vminfo.containerId))
except ProtocolError as e:
logger.warn("Failed to get system info: {0}", e)
def collect_event(self, evt_file_name):
try:
logger.verbose("Found event file: {0}", evt_file_name)
with open(evt_file_name, "rb") as evt_file:
# if fail to open or delete the file, throw exception
data_str = evt_file.read().decode("utf-8", 'ignore')
logger.verbose("Processed event file: {0}", evt_file_name)
os.remove(evt_file_name)
return data_str
except IOError as e:
msg = "Failed to process {0}, {1}".format(evt_file_name, e)
raise EventError(msg)
def collect_and_send_events(self):
event_list = TelemetryEventList()
event_dir = os.path.join(conf.get_lib_dir(), "events")
event_files = os.listdir(event_dir)
for event_file in event_files:
if not event_file.endswith(".tld"):
continue
event_file_path = os.path.join(event_dir, event_file)
try:
data_str = self.collect_event(event_file_path)
except EventError as e:
logger.error("{0}", e)
continue
try:
event = parse_event(data_str)
self.add_sysinfo(event)
event_list.events.append(event)
except (ValueError, ProtocolError) as e:
logger.warn("Failed to decode event file: {0}", e)
continue
if len(event_list.events) == 0:
return
try:
protocol = self.protocol_util.get_protocol()
protocol.report_event(event_list)
except ProtocolError as e:
logger.error("{0}", e)
def daemon(self):
period = datetime.timedelta(minutes=30)
protocol = self.protocol_util.get_protocol()
last_heartbeat = datetime.datetime.utcnow() - period
# Create a new identifier on each restart and reset the counter
heartbeat_id = str(uuid.uuid4()).upper()
counter = 0
while True:
if datetime.datetime.utcnow() >= (last_heartbeat + period):
last_heartbeat = datetime.datetime.utcnow()
incarnation = protocol.get_incarnation()
dropped_packets = self.osutil.get_firewall_dropped_packets(
protocol.endpoint)
msg = "{0};{1};{2};{3}".format(
incarnation, counter, heartbeat_id, dropped_packets)
add_event(
name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.HeartBeat,
is_success=True,
message=msg,
log_event=False)
counter += 1
io_errors = IOErrorCounter.get_and_reset()
hostplugin_errors = io_errors.get("hostplugin")
protocol_errors = io_errors.get("protocol")
other_errors = io_errors.get("other")
if hostplugin_errors > 0 \
or protocol_errors > 0 \
or other_errors > 0:
msg = "hostplugin:{0};protocol:{1};other:{2}"\
.format(hostplugin_errors,
protocol_errors,
other_errors)
add_event(
name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.HttpErrors,
is_success=True,
message=msg,
log_event=False)
try:
self.collect_and_send_events()
except Exception as e:
logger.warn("Failed to send events: {0}", e)
time.sleep(60)
def add_sysinfo(self, event):
sysinfo_names = [v.name for v in self.sysinfo]
for param in event.parameters:
if param.name in sysinfo_names:
logger.verbose("Remove existing event parameter: [{0}:{1}]",
param.name,
param.value)
event.parameters.remove(param)
event.parameters.extend(self.sysinfo)
|
dodge.py
|
# By Al Sweigart al@inventwithpython.com
# This program is a demo for the Pygcurse module.
# Simplified BSD License, Copyright 2011 Al Sweigart
import pygame, random, sys, time, pygcurse
from pygame.locals import *
import LED_display as TLD
import HC_SR04 as RS
import random
import threading
import keyboard
#import time
import os
import copy
GREEN = (0, 255, 0)
BLACK = (0,0,0)
WHITE = (255,255,255)
RED = (255,0,0)
WINWIDTH = 32
WINHEIGHT = 16
TEXTCOLOR = WHITE
BACKGROUNDCOLOR = (0, 0, 0)
FPS = 20
BADDIEMINSIZE = 1
BADDIEMAXSIZE = 5
BADDIEMINSPEED = 4
BADDIEMAXSPEED = 1
ADDNEWBADDIERATE = 50
isfullscreen = False
# Mode
mode_list = ['mouse', 'keyboard', 'sensor']
mode = mode_list[2]
if mode == 'mouse':
isfullscreen = True
# Make board
win = pygcurse.PygcurseWindow(WINWIDTH, WINHEIGHT, fullscreen=isfullscreen)
pygame.display.set_caption('Pygcurse Dodger')
win.autoupdate = False
# Modified to play in LED Matrix
t=threading.Thread(target=TLD.main, args=())
t.setDaemon(True)
t.start()
# Screen
iScreen = [[0 for i in range(WINWIDTH)] for j in range(WINHEIGHT)]
moveLeft = False
moveRight = False
counter = 0
def main():
global moveLeft, moveRight, counter
showStartScreen()
pygame.mouse.set_visible(False)
mainClock = pygame.time.Clock()
gameOver = False
newGame = True
while True:
if gameOver and time.time() - 4 > gameOverTime:
newGame = True
# First setting
if newGame:
newGame = False
# Set first position of character
pygame.mouse.set_pos(win.centerx * win.cellwidth, (win.bottom) * win.cellheight)
mousex, mousey = pygame.mouse.get_pos()
cellx, celly = WINWIDTH//2, WINHEIGHT-1
before_cellx_arr = [WINWIDTH//2] * 3
print(before_cellx_arr)
baddies = []
baddieAddCounter = 0
gameOver = False
score = 0
win.fill(bgcolor=BLACK)
if mode == 'sensor':
if not gameOver:
cellx = RS.get_distance()-10
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
terminate()
# Character's move
# mouse mode
if mode == 'mouse':
if event.type == MOUSEMOTION and not gameOver:
mousex, mousey = event.pos
cellx, celly = win.getcoordinatesatpixel(mousex, mousey)
# keyboard mode
elif mode == 'keyboard':
if event.type == KEYDOWN:
if event.key == K_LEFT or event.key == ord('a'):
if not gameOver:
cellx -= 1
counter += 1
moveRight = False
moveLeft = True
if event.key == K_RIGHT or event.key == ord('d'):
if not gameOver:
cellx += 1
counter += 1
moveLeft == False
moveRight = True
if event.type == KEYUP:
if event.key == K_LEFT or event.key == ord('a'):
counter = 0
moveLeft = False
if event.key == K_RIGHT or event.key == ord('d'):
counter = 0
moveRight = False
# add new baddies if needed
# ADDNEWBADDIERATE determines amount of new baddies
if baddieAddCounter == ADDNEWBADDIERATE:
speed = random.randint(BADDIEMAXSPEED, BADDIEMINSPEED)
baddies.append({'size': random.randint(BADDIEMINSIZE, BADDIEMAXSIZE),
'speed': speed,
'x': random.randint(0, win.width),
'y': -BADDIEMAXSIZE,
'movecounter': speed})
baddieAddCounter = 0
else:
baddieAddCounter += 1
# move baddies down, remove if needed
for i in range(len(baddies)-1, -1, -1):
if baddies[i]['movecounter'] == 0:
baddies[i]['y'] += 1
baddies[i]['movecounter'] = baddies[i]['speed']
else:
baddies[i]['movecounter'] -= 1
# print(i, baddies[i]['y'])
if baddies[i]['y'] > win.height:
del baddies[i]
# check if hit
if not gameOver:
for baddie in baddies:
if cellx >= baddie['x'] and WINHEIGHT-1 >= baddie['y'] and cellx < baddie['x']+baddie['size'] and WINHEIGHT-1 < baddie['y']+baddie['size']:
gameOver = True
gameOverTime = time.time()
break
score += 1
# draw screen
oScreen = copy.deepcopy(iScreen)
# draw baddies to screen (Mouse Mode)
for baddie in baddies:
win.fill('#', GREEN, BLACK, (baddie['x'], baddie['y'], baddie['size'], baddie['size']))
fillMatrix(baddie['x'], baddie['y'], baddie['size'], oScreen, WINWIDTH, WINHEIGHT)
#for i in oScreen:
# print(i)
if mode == 'keyboard':
# todo - Boost
if not gameOver:
if counter > 3:
if moveRight: cellx += 1
if moveLeft: cellx -= 1
elif moveRight or moveLeft:
counter += 1
if cellx < 0:
cellx = 0
if cellx > WINWIDTH -1:
cellx = WINWIDTH-1
elif mode == 'sensor':
if not gameOver:
if cellx < 0 or cellx > WINWIDTH -1:
cellx = before_cellx_arr[-1]
if abs(cellx - before_cellx_arr[-1]) > 10:
cellx = before_cellx_arr[-1]
else:
cellx = sum(before_cellx_arr[1:]+[cellx],1)//len(before_cellx_arr)
before_cellx_arr.pop(0)
before_cellx_arr.append(cellx)
cellx = before_cellx_arr[-1]
# draw character to screen
if not gameOver:
playercolor = WHITE
oScreen[WINHEIGHT-1][cellx] = 2
else:
playercolor = RED
oScreen[WINHEIGHT-1][cellx] = 3
win.putchars('GAME OVER', win.centerx-4, win.centery, fgcolor=RED, bgcolor=BLACK)
win.putchar('@', cellx, WINHEIGHT-1, playercolor)
win.putchars('Score: %s' % (score), win.width - 14, 1, fgcolor=WHITE)
win.update()
drawMatrix(oScreen)
mainClock.tick(FPS)
def showStartScreen():
while checkForKeyPress() is None:
win.fill(bgcolor=BLACK)
win.putchars('Pygcurse Dodger', win.centerx-8, win.centery, fgcolor=TEXTCOLOR)
if int(time.time() * 2) % 2 == 0: # flashing
win.putchars('Press a key to start!', win.centerx-11, win.centery+1, fgcolor=TEXTCOLOR)
win.update()
def checkForKeyPress():
# Go through event queue looking for a KEYUP event.
# Grab KEYDOWN events to remove them from the event queue.
for event in pygame.event.get([KEYDOWN, KEYUP]):
if event.type == KEYDOWN:
continue
if event.key == K_ESCAPE:
terminate()
return event.key
return None
# Game display
def fillMatrix(bx, by, size, screen, w, h):
for i in range(by, by+size):
for j in range(bx, bx+size):
if i < 0 or i >= h or j < 0 or j >= w:
continue
#print(i, j)
screen[i][j] = 1
def fillCharacter(cx, cy, screen):
if i < 0 or i >= h or j < 0 or j >= w:
return
def drawMatrix(array):
for x in range(len(array[0])):
for y in range(len(array)):
if array[y][x] == 0:
TLD.set_pixel(x, y, 0)
elif array[y][x] == 1:
TLD.set_pixel(x, y, 2)
elif array[y][x] == 2:
TLD.set_pixel(x, y, 7)
elif array[y][x] == 3:
TLD.set_pixel(x, y, 1)
else:
continue
# Game mode
def terminate():
pygame.quit()
sys.exit()
if __name__ == '__main__':
main()
|
PlainTasks.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sublime, sublime_plugin
import os
import re
import webbrowser
import itertools
import threading
from datetime import datetime, tzinfo, timedelta
import time
import logging, sys # [HKC] For debugging
logging.basicConfig(format='\n\n----\n%(levelname)-8s| %(message)s', level=logging.DEBUG)
platform = sublime.platform()
ST3 = int(sublime.version()) >= 3000
if ST3:
from .APlainTasksCommon import PlainTasksBase, PlainTasksFold, get_all_projects_and_separators
else:
from APlainTasksCommon import PlainTasksBase, PlainTasksFold, get_all_projects_and_separators
sublime_plugin.ViewEventListener = object
# io is not operable in ST2 on Linux, but in all other cases io is better
# https://github.com/SublimeTextIssues/Core/issues/254
if not ST3 and platform == 'linux':
import codecs as io
else:
import io
NT = platform == 'windows'
if NT:
import subprocess
if ST3:
from datetime import timezone
else:
class timezone(tzinfo):
__slots__ = ("_offset", "_name")
def __init__(self, offset, name=None):
if not isinstance(offset, timedelta):
raise TypeError("offset must be a timedelta")
self._offset = offset
self._name = name
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return self._name
def dst(self, dt):
return timedelta(0)
def tznow():
t = time.time()
d = datetime.fromtimestamp(t)
u = datetime.utcfromtimestamp(t)
return d.replace(tzinfo=timezone(d - u))
def check_parentheses(date_format, regex_group, is_date=False):
if is_date:
try:
parentheses = regex_group if datetime.strptime(regex_group.strip(), date_format) else ''
except ValueError:
parentheses = ''
else:
try:
parentheses = '' if datetime.strptime(regex_group.strip(), date_format) else regex_group
except ValueError:
parentheses = regex_group
return parentheses
class PlainTasksNewCommand(PlainTasksBase):
def runCommand(self, edit, is_above = False):
# list for ST3 support;
# reversed because with multiple selections regions would be messed up after first iteration
# [HKC] if not reversed, first iteration will mess up the initial region measures
regions = itertools.chain(*(reversed(self.view.lines(region)) for region in reversed(list(self.view.sel()))))
header_to_task = self.view.settings().get('header_to_task', False)
# ST3 (3080) moves sel when call view.replace only by delta between original and
# new regions, so if sel is not in eol and we replace line with two lines,
# then cursor won’t be on next line as it should
sels = self.view.sel()
item_size = len(self.before_tasks_bullet_spaces + self.open_tasks_bullet + self.tasks_bullet_space)
for region in sels:
_myfolds = self.view.folded_regions()
logging.debug("[for] Region in sels:{} region_size: {} total_lines: {} sel_size: {} is_above: {} item_size: {} folded_regions:{}".
format(region, region.size(), len(self.view.lines(region)), len(sels), is_above, item_size, _myfolds))
eol = None
for i, line in enumerate(regions):
line_contents = self.view.substr(line).rstrip()
not_empty_line = re.match('^(\s*)(\S.*)$', self.view.substr(line))
empty_line = re.match('^(\s+)$', self.view.substr(line))
current_scope = self.view.scope_name(line.a)
eol = line.b # need for ST3 when new content has line break
print("\n\t[Enumerating regions] i:", i, "line:", line, "current_scope:", current_scope)
if 'item' in current_scope:
# [HKC]'item' means an existing task
grps = not_empty_line.groups() if not_empty_line != None else None
print("\t[item] grps:(", grps[0],")")
if is_above:
line_contents = grps[0] + self.open_tasks_bullet + self.tasks_bullet_space + '\n' + self.view.substr(line)
else:
line_contents = self.view.substr(line) + '\n' + grps[0] + self.open_tasks_bullet + self.tasks_bullet_space
elif 'header' in current_scope and line_contents and not header_to_task:
# [HKC]'header' refers to a project: line
grps = not_empty_line.groups()
print("\t[header] grps:(", grps[0],")")
if is_above:
line_contents = grps[0] + self.before_tasks_bullet_spaces + self.open_tasks_bullet + self.tasks_bullet_space + '\n' + self.view.substr(line)
else:
line_contents = self.view.substr(line) + '\n' + grps[0] + self.before_tasks_bullet_spaces + self.open_tasks_bullet + self.tasks_bullet_space
elif 'separator' in current_scope:
# [HKC] a separator line
grps = not_empty_line.groups()
print("\t[separator] grps:(", grps[0],")")
if is_above:
line_contents = grps[0] + self.before_tasks_bullet_spaces + self.open_tasks_bullet + self.tasks_bullet_space + '\n' + self.view.substr(line)
else:
line_contents = self.view.substr(line) + '\n' + grps[0] + self.before_tasks_bullet_spaces + self.open_tasks_bullet + self.tasks_bullet_space
elif not ('header' and 'separator') in current_scope or header_to_task:
print("\t[Regular line]")
eol = None
if not_empty_line:
grps = not_empty_line.groups()
line_contents = (grps[0] if len(grps[0]) > 0 else self.before_tasks_bullet_spaces) + self.open_tasks_bullet + self.tasks_bullet_space + grps[1]
elif empty_line: # only whitespaces
grps = empty_line.groups()
line_contents = grps[0] + self.open_tasks_bullet + self.tasks_bullet_space
else: # completely empty, no whitespaces
line_contents = self.before_tasks_bullet_spaces + self.open_tasks_bullet + self.tasks_bullet_space
else:
print('oops, need to improve PlainTasksNewCommand')
if eol:
# move cursor to eol of original line, workaround for ST3
print("\t[eol]", "begin:", line.begin(), "end:", line.end(), "i:", i, "~i:", ~i)
sels.add(sublime.Region(eol, eol))
self.view.replace(edit, line, line_contents)
#print("\t[End of for] Updated", "begin:", line.begin(), "end:", line.end(), "i:", i, "~i:", ~i)
# convert each selection to single cursor, ready to type
new_selections = []
for sel in list(self.view.sel()):
print("\t[sel dump]", sel)
if is_above:
begin = self.view.line(sel).begin() - 1 # [HKC] line.begin() - 1 will move to prev line
new_selections.append(sublime.Region(begin, begin))
else:
eol = self.view.line(sel).b
new_selections.append(sublime.Region(eol, eol))
# [HKC] Clear the selection shadow
self.view.sel().clear()
for sel in new_selections:
self.view.sel().add(sel)
PlainTasksStatsStatus.set_stats(self.view)
self.view.run_command('plain_tasks_toggle_highlight_past_due')
print("End of PlainTasksNewCommand")
class PlainTasksNewWithDateCommand(PlainTasksBase):
def runCommand(self, edit):
#print("Invoking PlainTasksNewWithDateCommand")
self.view.run_command('plain_tasks_new')
sels = list(self.view.sel())
suffix = ' @created%s' % tznow().strftime(self.date_format)
points = []
for s in reversed(sels):
if self.view.substr(sublime.Region(s.b - 2, s.b)) == ' ':
point = s.b - 2 # keep double whitespace at eol
else:
point = s.b
self.view.insert(edit, point, suffix)
points.append(point)
self.view.sel().clear()
offset = len(suffix)
for i, sel in enumerate(sels):
self.view.sel().add(sublime.Region(points[~i] + i*offset, points[~i] + i*offset))
class PlainTasksCompleteCommand(PlainTasksBase):
def runCommand(self, edit):
original = [r for r in self.view.sel()]
done_line_end, now = self.format_line_end(self.done_tag, tznow())
offset = len(done_line_end)
rom = r'^(\s*)(\[\s\]|.)(\s*.*)$'
rdm = r'''
(?x)^(\s*)(\[x\]|.) # 0,1 indent & bullet
(\s*[^\b]*?(?:[^\@]|(?<!\s)\@|\@(?=\s))*?\s*) # 2 very task
(?=
((?:\s@done|@project|@[wl]asted|$).*) # 3 ending either w/ done or w/o it & no date
| # or
(?:[ \t](\([^()]*\))\s*([^@]*|(?:@project|@[wl]asted).*))?$ # 4 date & possible project tag after
)
''' # rcm is the same, except bullet & ending
rcm = r'^(\s*)(\[\-\]|.)(\s*[^\b]*?(?:[^\@]|(?<!\s)\@|\@(?=\s))*?\s*)(?=((?:\s@cancelled|@project|@[wl]asted|$).*)|(?:[ \t](\([^()]*\))\s*([^@]*|(?:@project|@[wl]asted).*))?$)'
started = r'^\s*[^\b]*?\s*@started(\([\d\w,\.:\-\/ @]*\)).*$'
toggle = r'@toggle(\([\d\w,\.:\-\/ @]*\))'
regions = itertools.chain(*(reversed(self.view.lines(region)) for region in reversed(list(self.view.sel()))))
for line in regions:
line_contents = self.view.substr(line)
open_matches = re.match(rom, line_contents, re.U)
done_matches = re.match(rdm, line_contents, re.U)
canc_matches = re.match(rcm, line_contents, re.U)
started_matches = re.findall(started, line_contents, re.U)
toggle_matches = re.findall(toggle, line_contents, re.U)
done_line_end = done_line_end.rstrip()
if line_contents.endswith(' '):
done_line_end += ' ' # keep double whitespace at eol
dblspc = ' '
else:
dblspc = ''
current_scope = self.view.scope_name(line.a)
if 'pending' in current_scope:
grps = open_matches.groups()
len_dle = self.view.insert(edit, line.end(), done_line_end)
replacement = u'%s%s%s' % (grps[0], self.done_tasks_bullet, grps[2].rstrip())
self.view.replace(edit, line, replacement)
self.view.run_command(
'plain_tasks_calculate_time_for_task', {
'started_matches': started_matches,
'toggle_matches': toggle_matches,
'now': now,
'eol': line.a + len(replacement) + len_dle}
)
elif 'header' in current_scope:
eol = self.view.insert(edit, line.end(), done_line_end)
self.view.run_command(
'plain_tasks_calculate_time_for_task', {
'started_matches': started_matches,
'toggle_matches': toggle_matches,
'now': now,
'eol': line.end() + eol}
)
indent = re.match('^(\s*)\S', line_contents, re.U)
self.view.insert(edit, line.begin() + len(indent.group(1)), '%s ' % self.done_tasks_bullet)
self.view.run_command('plain_tasks_calculate_total_time_for_project', {'start': line.a})
elif 'completed' in current_scope:
grps = done_matches.groups()
parentheses = check_parentheses(self.date_format, grps[4] or '')
replacement = u'%s%s%s%s' % (grps[0], self.open_tasks_bullet, grps[2], parentheses)
self.view.replace(edit, line, replacement.rstrip() + dblspc)
offset = -offset
elif 'cancelled' in current_scope:
grps = canc_matches.groups()
len_dle = self.view.insert(edit, line.end(), done_line_end)
parentheses = check_parentheses(self.date_format, grps[4] or '')
replacement = u'%s%s%s%s' % (grps[0], self.done_tasks_bullet, grps[2], parentheses)
self.view.replace(edit, line, replacement.rstrip())
offset = -offset
self.view.run_command(
'plain_tasks_calculate_time_for_task', {
'started_matches': started_matches,
'toggle_matches': toggle_matches,
'now': now,
'eol': line.a + len(replacement) + len_dle}
)
self.view.sel().clear()
for ind, pt in enumerate(original):
ofs = ind * offset
new_pt = sublime.Region(pt.a + ofs, pt.b + ofs)
self.view.sel().add(new_pt)
PlainTasksStatsStatus.set_stats(self.view)
self.view.run_command('plain_tasks_toggle_highlight_past_due')
class PlainTasksCancelCommand(PlainTasksBase):
def runCommand(self, edit):
original = [r for r in self.view.sel()]
canc_line_end, now = self.format_line_end(self.canc_tag, tznow())
offset = len(canc_line_end)
rom = r'^(\s*)(\[\s\]|.)(\s*.*)$'
rdm = r'^(\s*)(\[x\]|.)(\s*[^\b]*?(?:[^\@]|(?<!\s)\@|\@(?=\s))*?\s*)(?=((?:\s@done|@project|@[wl]asted|$).*)|(?:[ \t](\([^()]*\))\s*([^@]*|(?:@project|@[wl]asted).*))?$)'
rcm = r'^(\s*)(\[\-\]|.)(\s*[^\b]*?(?:[^\@]|(?<!\s)\@|\@(?=\s))*?\s*)(?=((?:\s@cancelled|@project|@[wl]asted|$).*)|(?:[ \t](\([^()]*\))\s*([^@]*|(?:@project|@[wl]asted).*))?$)'
started = r'^\s*[^\b]*?\s*@started(\([\d\w,\.:\-\/ @]*\)).*$'
toggle = r'@toggle(\([\d\w,\.:\-\/ @]*\))'
regions = itertools.chain(*(reversed(self.view.lines(region)) for region in reversed(list(self.view.sel()))))
for line in regions:
line_contents = self.view.substr(line)
open_matches = re.match(rom, line_contents, re.U)
done_matches = re.match(rdm, line_contents, re.U)
canc_matches = re.match(rcm, line_contents, re.U)
started_matches = re.findall(started, line_contents, re.U)
toggle_matches = re.findall(toggle, line_contents, re.U)
canc_line_end = canc_line_end.rstrip()
if line_contents.endswith(' '):
canc_line_end += ' ' # keep double whitespace at eol
dblspc = ' '
else:
dblspc = ''
current_scope = self.view.scope_name(line.a)
if 'pending' in current_scope:
grps = open_matches.groups()
len_cle = self.view.insert(edit, line.end(), canc_line_end)
replacement = u'%s%s%s' % (grps[0], self.canc_tasks_bullet, grps[2].rstrip())
self.view.replace(edit, line, replacement)
self.view.run_command(
'plain_tasks_calculate_time_for_task', {
'started_matches': started_matches,
'toggle_matches': toggle_matches,
'now': now,
'eol': line.a + len(replacement) + len_cle,
'tag': 'wasted'}
)
elif 'header' in current_scope:
eol = self.view.insert(edit, line.end(), canc_line_end)
self.view.run_command(
'plain_tasks_calculate_time_for_task', {
'started_matches': started_matches,
'toggle_matches': toggle_matches,
'now': now,
'eol': line.end() + eol,
'tag': 'wasted'}
)
indent = re.match('^(\s*)\S', line_contents, re.U)
self.view.insert(edit, line.begin() + len(indent.group(1)), '%s ' % self.canc_tasks_bullet)
self.view.run_command('plain_tasks_calculate_total_time_for_project', {'start': line.a})
elif 'completed' in current_scope:
sublime.status_message('You cannot cancel what have been done, can you?')
# grps = done_matches.groups()
# parentheses = check_parentheses(self.date_format, grps[4] or '')
# replacement = u'%s%s%s%s' % (grps[0], self.canc_tasks_bullet, grps[2], parentheses)
# self.view.replace(edit, line, replacement.rstrip())
# offset = -offset
elif 'cancelled' in current_scope:
grps = canc_matches.groups()
parentheses = check_parentheses(self.date_format, grps[4] or '')
replacement = u'%s%s%s%s' % (grps[0], self.open_tasks_bullet, grps[2], parentheses)
self.view.replace(edit, line, replacement.rstrip() + dblspc)
offset = -offset
self.view.sel().clear()
for ind, pt in enumerate(original):
ofs = ind * offset
new_pt = sublime.Region(pt.a + ofs, pt.b + ofs)
self.view.sel().add(new_pt)
PlainTasksStatsStatus.set_stats(self.view)
self.view.run_command('plain_tasks_toggle_highlight_past_due')
class PlainTasksArchiveCommand(PlainTasksBase):
def runCommand(self, edit, partial=False):
rds = 'meta.item.todo.completed'
rcs = 'meta.item.todo.cancelled'
# finding archive section
archive_pos = self.view.find(self.archive_name, 0, sublime.LITERAL)
if partial:
all_tasks = self.get_archivable_tasks_within_selections()
else:
all_tasks = self.get_all_archivable_tasks(archive_pos, rds, rcs)
if not all_tasks:
sublime.status_message('Nothing to archive')
else:
if archive_pos and archive_pos.a > 0:
line = self.view.full_line(archive_pos).end()
else:
create_archive = u'\n\n___________________\n%s\n' % self.archive_name
self.view.insert(edit, self.view.size(), create_archive)
line = self.view.size()
projects = get_all_projects_and_separators(self.view)
# adding tasks to archive section
for task in all_tasks:
line_content = self.view.substr(task)
match_task = re.match(r'^\s*(\[[x-]\]|.)(\s+.*$)', line_content, re.U)
current_scope = self.view.scope_name(task.a)
if rds in current_scope or rcs in current_scope:
pr = self.get_task_project(task, projects)
if self.project_postfix:
eol = u'{0}{1}{2}{3}\n'.format(
self.before_tasks_bullet_spaces,
line_content.strip(),
(u' @project(%s)' % pr) if pr else '',
' ' if line_content.endswith(' ') else '')
else:
eol = u'{0}{1}{2}{3}\n'.format(
self.before_tasks_bullet_spaces,
match_task.group(1), # bullet
(u'%s%s:' % (self.tasks_bullet_space, pr)) if pr else '',
match_task.group(2)) # very task
else:
eol = u'{0}{1}\n'.format(self.before_tasks_bullet_spaces * 2, line_content.lstrip())
line += self.view.insert(edit, line, eol)
# remove moved tasks (starting from the last one otherwise it screw up regions after the first delete)
for task in reversed(all_tasks):
self.view.erase(edit, self.view.full_line(task))
self.view.run_command('plain_tasks_sort_by_date')
def get_task_project(self, task, projects):
index = -1
for ind, pr in enumerate(projects):
if task < pr:
if ind > 0:
index = ind-1
break
#if there is no projects for task - return empty string
if index == -1:
return ''
prog = re.compile(r'^\n*(\s*)(.+):(?=\s|$)\s*(\@[^\s]+(\(.*?\))?\s*)*')
hierarhProject = ''
if index >= 0:
depth = re.match(r"\s*", self.view.substr(self.view.line(task))).group()
while index >= 0:
strProject = self.view.substr(projects[index])
if prog.match(strProject):
spaces = prog.match(strProject).group(1)
if len(spaces) < len(depth):
hierarhProject = prog.match(strProject).group(2) + ((" / " + hierarhProject) if hierarhProject else '')
depth = spaces
if len(depth) == 0:
break
else:
sep = re.compile(r'(^\s*)---.{3,5}---+$')
spaces = sep.match(strProject).group(1)
if len(spaces) < len(depth):
depth = spaces
if len(depth) == 0:
break
index -= 1
if not hierarhProject:
return ''
else:
return hierarhProject
def get_task_note(self, task, tasks):
note_line = task.end() + 1
while self.view.scope_name(note_line) == 'text.todo notes.todo ':
note = self.view.line(note_line)
if note not in tasks:
tasks.append(note)
note_line = self.view.line(note_line).end() + 1
def get_all_archivable_tasks(self, archive_pos, rds, rcs):
done_tasks = [i for i in self.view.find_by_selector(rds) if i.a < (archive_pos.a if archive_pos and archive_pos.a > 0 else self.view.size())]
for i in done_tasks:
self.get_task_note(i, done_tasks)
canc_tasks = [i for i in self.view.find_by_selector(rcs) if i.a < (archive_pos.a if archive_pos and archive_pos.a > 0 else self.view.size())]
for i in canc_tasks:
self.get_task_note(i, canc_tasks)
all_tasks = done_tasks + canc_tasks
all_tasks.sort()
return all_tasks
def get_archivable_tasks_within_selections(self):
all_tasks = []
for region in self.view.sel():
for l in self.view.lines(region):
line = self.view.line(l)
if ('completed' in self.view.scope_name(line.a)) or ('cancelled' in self.view.scope_name(line.a)):
all_tasks.append(line)
self.get_task_note(line, all_tasks)
return all_tasks
class PlainTasksNewTaskDocCommand(sublime_plugin.WindowCommand):
def run(self):
view = self.window.new_file()
view.settings().add_on_change('color_scheme', lambda: self.set_proper_scheme(view))
view.set_syntax_file('Packages/PlainTasks/PlainTasks.sublime-syntax' if ST3 else
'Packages/PlainTasks/PlainTasks.tmLanguage')
def set_proper_scheme(self, view):
if view.id() != sublime.active_window().active_view().id():
return
pts = sublime.load_settings('PlainTasks.sublime-settings')
if view.settings().get('color_scheme') == pts.get('color_scheme'):
return
# Since we cannot create file with syntax, there is moment when view has no settings,
# but it is activated, so some plugins (e.g. Color Highlighter) set wrong color scheme
view.settings().set('color_scheme', pts.get('color_scheme'))
class PlainTasksOpenUrlCommand(sublime_plugin.TextCommand):
#It is horrible regex but it works perfectly
URL_REGEX = r"""(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))
+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’]))"""
def run(self, edit):
s = self.view.sel()[0]
start, end = s.a, s.b
if 'url' in self.view.scope_name(start):
while self.view.substr(start) != '<': start -= 1
while self.view.substr(end) != '>': end += 1
rgn = sublime.Region(start + 1, end)
# optional select URL
self.view.sel().add(rgn)
url = self.view.substr(rgn)
if NT and all([ST3, ':' in url]):
# webbrowser uses os.startfile() under the hood, and it is not reliable in py3;
# thus call start command for url with scheme (eg skype:nick) and full path (eg c:\b)
subprocess.Popen(['start', url], shell=True)
else:
webbrowser.open_new_tab(url)
else:
self.search_bare_weblink_and_open(start, end)
def search_bare_weblink_and_open(self, start, end):
# expand selection to nearest stopSymbols
view_size = self.view.size()
stopSymbols = ['\t', ' ', '\"', '\'', '>', '<', ',']
# move the selection back to the start of the url
while (start > 0
and not self.view.substr(start - 1) in stopSymbols
and self.view.classify(start) & sublime.CLASS_LINE_START == 0):
start -= 1
# move end of selection forward to the end of the url
while (end < view_size
and not self.view.substr(end) in stopSymbols
and self.view.classify(end) & sublime.CLASS_LINE_END == 0):
end += 1
# grab the URL
url = self.view.substr(sublime.Region(start, end))
# optional select URL
self.view.sel().add(sublime.Region(start, end))
exp = re.search(self.URL_REGEX, url, re.X)
if exp and exp.group(0):
strUrl = exp.group(0)
if strUrl.find("://") == -1:
strUrl = "http://" + strUrl
webbrowser.open_new_tab(strUrl)
else:
sublime.status_message("Looks like there is nothing to open")
class PlainTasksOpenLinkCommand(sublime_plugin.TextCommand):
LINK_PATTERN = re.compile( # simple ./path/
r'''(?ixu)(?:^|[ \t])\.[\\/]
(?P<fn>
(?:[a-z]\:[\\/])? # special case for Windows full path
(?:[^\\/:">]+[\\/]?)+) # the very path (single filename/relative/full)
(?=[\\/:">]) # stop matching path
# options:
(>(?P<sym>\w+))?(\:(?P<line>\d+))?(\:(?P<col>\d+))?(\"(?P<text>[^\n]*)\")?
''')
MD_LINK = re.compile( # markdown [](path)
r'''(?ixu)\][ \t]*\(\<?(?:file\:///?)?
(?P<fn>.*?((\\\))?.*?)*)
(?:\>?[ \t]*
\"((\:(?P<line>\d+))?(\:(?P<col>\d+))?|(\>(?P<sym>\w+))?|(?P<text>[^\n]*))
\")?
\)
''')
WIKI_LINK = re.compile( # ORGMODE, NV, and all similar formats [[link][opt-desc]]
r'''(?ixu)\[\[(?:file(?:\+(?:sys|emacs))?\:)?(?:\.[\\/])?
(?P<fn>.*?((\\\])?.*?)*)
(?# options for orgmode link [[path::option]])
(?:\:\:(((?P<line>\d+))?(\:(?P<col>\d+))?|(\*(?P<sym>\w+))?|(?P<text>.*?((\\\])?.*?)*)))?
\](?:\[(.*?)\])?
\]
(?# options for NV [[path]] "option" — NV not support it, but PT should support so it wont break NV)
(?:[ \t]*
\"((\:(?P<linen>\d+))?(\:(?P<coln>\d+))?|(\>(?P<symn>\w+))?|(?P<textn>[^\n]*))
\")?
''')
def _format_res(self, res):
if res[3] == 'f':
return [res[0], "line: %d column: %d" % (int(res[1]), int(res[2]))]
elif res[3] == 'd':
return [res[0], 'Add folder to project' if ST3 else 'Folders are supported only in Sublime 3']
else:
return [res[0], res[1]]
def _on_panel_selection(self, selection, text=None, line=0):
if selection < 0:
self.panel_hidden = True
return
self.stop_thread = True
self.thread.join()
win = sublime.active_window()
win.run_command('hide_overlay')
res = self._current_res[selection]
if not res[3]:
return # user chose to stop search
if not ST3 and res[3] == "d":
return sublime.status_message('Folders are supported only in Sublime 3')
elif res[3] == "d":
data = win.project_data()
if not data:
data = {}
if "folders" not in data:
data["folders"] = []
data["folders"].append({'follow_symlinks': True,
'path': res[0]})
win.set_project_data(data)
else:
self.opened_file = win.open_file('%s:%s:%s' % res[:3],
sublime.ENCODED_POSITION)
if text:
sublime.set_timeout(lambda: self.find_text(self.opened_file, text, line), 300)
def search_files(self, all_folders, fn, sym, line, col, text):
'''run in separate thread; worker'''
fn = fn.replace('/', os.sep)
if os.path.isfile(fn): # check for full path
self._current_res.append((fn, line, col, "f"))
elif os.path.isdir(fn):
self._current_res.append((fn, 0, 0, "d"))
seen_folders = []
for folder in sorted(set(all_folders)):
for root, subdirs, _ in os.walk(folder):
if self.stop_thread:
return
if root in seen_folders:
continue
else:
seen_folders.append(root)
subdirs = [f for f in subdirs if os.path.join(root, f) not in seen_folders]
tname = '%s at %s' % (fn, root)
self.thread.name = tname if ST3 else tname.encode('utf8')
name = os.path.normpath(os.path.abspath(os.path.join(root, fn)))
if os.path.isfile(name):
item = (name, line, col, "f")
if item not in self._current_res:
self._current_res.append(item)
if os.path.isdir(name):
item = (name, 0, 0, "d")
if item not in self._current_res:
self._current_res.append(item)
self._current_res = self._current_res[1:] # remove 'Stop search' item
if not self._current_res:
return sublime.error_message('File was not found\n\n\t%s' % fn)
if len(self._current_res) == 1:
sublime.set_timeout(lambda: self._on_panel_selection(0), 1)
else:
entries = [self._format_res(res) for res in self._current_res]
sublime.set_timeout(lambda: self.window.show_quick_panel(entries, lambda i: self._on_panel_selection(i, text=text, line=line)), 1)
def run(self, edit):
if hasattr(self, 'thread'):
if self.thread.is_alive:
self.stop_thread = True
self.thread.join()
point = self.view.sel()[0].begin()
line = self.view.substr(self.view.line(point))
fn, sym, line, col, text = self.parse_link(line)
if not fn:
sublime.status_message('Line does not contain a valid link to file')
return
self.window = win = sublime.active_window()
self._current_res = [('Stop search', '', '', '')]
# init values to update quick panel
self.items = 0
self.panel_hidden = True
if sym:
for name, _, pos in win.lookup_symbol_in_index(sym):
if name.endswith(fn):
line, col = pos
self._current_res.append((name, line, col, "f"))
all_folders = win.folders() + [os.path.dirname(v.file_name()) for v in win.views() if v.file_name()]
self.stop_thread = False
self.thread = threading.Thread(target=self.search_files, args=(all_folders, fn, sym, line, col, text))
self.thread.setName('is starting')
self.thread.start()
self.progress_bar()
def find_text(self, view, text, line):
result = view.find(text, view.sel()[0].a if line else 0, sublime.LITERAL)
view.sel().clear()
view.sel().add(result.a)
view.set_viewport_position(view.text_to_layout(view.size()), False)
view.show_at_center(result)
def progress_bar(self, i=0, dir=1):
if not self.thread.is_alive():
PlainTasksStatsStatus.set_stats(self.view)
return
if self._current_res and sublime.active_window().active_view().id() == self.view.id():
items = len(self._current_res)
if items != self.items:
self.window.run_command('hide_overlay')
self.items = items
if self.panel_hidden:
entries = [self._format_res(res) for res in self._current_res]
self.window.show_quick_panel(entries, self._on_panel_selection)
self.panel_hidden = False
# This animates a little activity indicator in the status area
before = i % 8
after = (7) - before
if not after: dir = -1
if not before: dir = 1
i += dir
self.view.set_status('PlainTasks', u'Please wait%s…%ssearching %s' %
(' ' * before, ' ' * after, self.thread.name if ST3 else self.thread.name.decode('utf8')))
sublime.set_timeout(lambda: self.progress_bar(i, dir), 100)
return
def parse_link(self, line):
match_link = self.LINK_PATTERN.search(line)
match_md = self.MD_LINK.search(line)
match_wiki = self.WIKI_LINK.search(line)
if match_link:
fn, sym, line, col, text = match_link.group('fn', 'sym', 'line', 'col', 'text')
elif match_md:
fn, sym, line, col, text = match_md.group('fn', 'sym', 'line', 'col', 'text')
# unescape some chars
fn = (fn.replace('\\(', '(').replace('\\)', ')'))
elif match_wiki:
fn = match_wiki.group('fn')
sym = match_wiki.group('sym') or match_wiki.group('symn')
line = match_wiki.group('line') or match_wiki.group('linen')
col = match_wiki.group('col') or match_wiki.group('coln')
text = match_wiki.group('text') or match_wiki.group('textn')
# unescape some chars
fn = (fn.replace('\\[', '[').replace('\\]', ']'))
if text:
text = (text.replace('\\[', '[').replace('\\]', ']'))
return fn, sym, line or 0, col or 0, text
class PlainTasksSortByDate(PlainTasksBase):
def runCommand(self, edit):
if not re.search(r'(?su)%[Yy][-./ ]*%m[-./ ]*%d\s*%H.*%M', self.date_format):
# TODO: sort with dateutil so we wont depend on specific date_format
return
archive_pos = self.view.find(self.archive_name, 0, sublime.LITERAL)
if archive_pos:
have_date = r'(^\s*[^\n]*?\s\@(?:done|cancelled)\s*(\([\d\w,\.:\-\/ ]*\))[^\n]*$)'
all_tasks_prefixed_date = []
all_tasks = self.view.find_all(have_date, 0, u"\\2\\1", all_tasks_prefixed_date)
tasks_prefixed_date = []
tasks = []
for ind, task in enumerate(all_tasks):
if task.a > archive_pos.b:
tasks.append(task)
tasks_prefixed_date.append(all_tasks_prefixed_date[ind])
notes = []
for ind, task in enumerate(tasks):
note_line = task.end() + 1
while self.view.scope_name(note_line) == 'text.todo notes.todo ':
note = self.view.line(note_line)
notes.append(note)
tasks_prefixed_date[ind] += u'\n' + self.view.substr(note)
note_line = note.end() + 1
to_remove = tasks+notes
to_remove.sort()
for i in reversed(to_remove):
self.view.erase(edit, self.view.full_line(i))
tasks_prefixed_date.sort(reverse=self.view.settings().get('new_on_top', True))
eol = archive_pos.end()
for a in tasks_prefixed_date:
eol += self.view.insert(edit, eol, u'\n' + re.sub(r'^\([\d\w,\.:\-\/ ]*\)([^\b]*$)', u'\\1', a))
else:
sublime.status_message("Nothing to sort")
class PlainTasksRemoveBold(sublime_plugin.TextCommand):
def run(self, edit):
for s in reversed(list(self.view.sel())):
a, b = s.begin(), s.end()
for r in sublime.Region(b + 2, b), sublime.Region(a - 2, a):
self.view.erase(edit, r)
class PlainTasksStatsStatus(sublime_plugin.EventListener):
def on_activated(self, view):
if not view.score_selector(0, "text.todo") > 0:
return
self.set_stats(view)
def on_post_save(self, view):
self.on_activated(view)
@staticmethod
def set_stats(view):
view.set_status('PlainTasks', PlainTasksStatsStatus.get_stats(view))
@staticmethod
def get_stats(view):
msgf = view.settings().get('stats_format', '$n/$a done ($percent%) $progress Last task @done $last')
special_interest = re.findall(r'{{.*?}}', msgf)
for i in special_interest:
matches = view.find_all(i.strip('{}'))
pend, done, canc = [], [], []
for t in matches:
# one task may contain same tag/word several times—we count amount of tasks, not tags
t = view.line(t).a
scope = view.scope_name(t)
if 'pending' in scope and t not in pend:
pend.append(t)
elif 'completed' in scope and t not in done:
done.append(t)
elif 'cancelled' in scope and t not in canc:
canc.append(t)
msgf = msgf.replace(i, '%d/%d/%d'%(len(pend), len(done), len(canc)))
ignore_archive = view.settings().get('stats_ignore_archive', False)
if ignore_archive:
archive_pos = view.find(view.settings().get('archive_name', 'Archive:'), 0, sublime.LITERAL)
pend = len([i for i in view.find_by_selector('meta.item.todo.pending') if i.a < (archive_pos.a if archive_pos and archive_pos.a > 0 else view.size())])
done = len([i for i in view.find_by_selector('meta.item.todo.completed') if i.a < (archive_pos.a if archive_pos and archive_pos.a > 0 else view.size())])
canc = len([i for i in view.find_by_selector('meta.item.todo.cancelled') if i.a < (archive_pos.a if archive_pos and archive_pos.a > 0 else view.size())])
else:
pend = len(view.find_by_selector('meta.item.todo.pending'))
done = len(view.find_by_selector('meta.item.todo.completed'))
canc = len(view.find_by_selector('meta.item.todo.cancelled'))
allt = pend + done + canc
percent = ((done+canc)/float(allt))*100 if allt else 0
factor = int(round(percent/10)) if percent<90 else int(percent/10)
barfull = view.settings().get('bar_full', u'■')
barempty = view.settings().get('bar_empty', u'□')
progress = '%s%s' % (barfull*factor, barempty*(10-factor)) if factor else ''
tasks_dates = []
view.find_all('(^\s*[^\n]*?\s\@(?:done)\s*(\([\d\w,\.:\-\/ ]*\))[^\n]*$)', 0, "\\2", tasks_dates)
date_format = view.settings().get('date_format', '(%y-%m-%d %H:%M)')
tasks_dates = [check_parentheses(date_format, t, is_date=True) for t in tasks_dates]
tasks_dates.sort(reverse=True)
last = tasks_dates[0] if tasks_dates else '(UNKNOWN)'
msg = (msgf.replace('$o', str(pend))
.replace('$d', str(done))
.replace('$c', str(canc))
.replace('$n', str(done+canc))
.replace('$a', str(allt))
.replace('$percent', str(int(percent)))
.replace('$progress', progress)
.replace('$last', last)
)
return msg
class PlainTasksCopyStats(sublime_plugin.TextCommand):
def is_enabled(self):
return self.view.score_selector(0, "text.todo") > 0
def run(self, edit):
msg = self.view.get_status('PlainTasks')
replacements = self.view.settings().get('replace_stats_chars', [])
if replacements:
for o, r in replacements:
msg = msg.replace(o, r)
sublime.set_clipboard(msg)
class PlainTasksArchiveOrgCommand(PlainTasksBase):
def runCommand(self, edit):
# Archive the curent subtree to our archive file, not just completed tasks.
# For now, it's mapped to ctrl-shift-o or super-shift-o
# TODO: Mark any tasks found as complete, or maybe warn.
# Get our archive filename
archive_filename = self.__createArchiveFilename()
# Figure out our subtree
region = self.__findCurrentSubtree()
if region.empty():
# How can we get here?
sublime.error_message("Error:\n\nCould not find a tree to archive.")
return
# Write our region or our archive file
success = self.__writeArchive(archive_filename, region)
# only erase our region if the write was successful
if success:
self.view.erase(edit,region)
return
def __writeArchive(self, filename, region):
# Write out the given region
sublime.status_message(u'Archiving tree to {0}'.format(filename))
try:
# Have to use io.open because windows doesn't like writing
# utf8 to regular filehandles
with io.open(filename, 'a', encoding='utf8') as fh:
data = self.view.substr(region)
# Is there a way to read this in?
fh.write(u"--- ✄ -----------------------\n")
fh.write(u"Archived {0}:\n".format(tznow().strftime(
self.date_format)))
# And, finally, write our data
fh.write(u"{0}\n".format(data))
return True
except Exception as e:
sublime.error_message(u"Error:\n\nUnable to append to {0}\n{1}".format(
filename, str(e)))
return False
def __createArchiveFilename(self):
# Create our archive filename, from the mask in our settings.
# Split filename int dir, base, and extension, then apply our mask
path_base, extension = os.path.splitext(self.view.file_name())
dir = os.path.dirname(path_base)
base = os.path.basename(path_base)
sep = os.sep
# Now build our new filename
try:
# This could fail, if someone messed up the mask in the
# settings. So, if it did fail, use our default.
archive_filename = self.archive_org_filemask.format(
dir=dir, base=base, ext=extension, sep=sep)
except:
# Use our default mask
archive_filename = self.archive_org_default_filemask.format(
dir=dir, base=base, ext=extension, sep=sep)
# Display error, letting the user know
sublime.error_message(u"Error:\n\nInvalid filemask:{0}\nUsing default: {1}".format(
self.archive_org_filemask, self.archive_org_default_filemask))
return archive_filename
def __findCurrentSubtree(self):
# Return the region that starts at the cursor, or starts at
# the beginning of the selection
line = self.view.line(self.view.sel()[0].begin())
# Start finding the region at the beginning of the next line
region = self.view.indented_region(line.b + 2)
if region.contains(line.b):
# there is no subtree
return sublime.Region(-1, -1)
if not region.empty():
region = sublime.Region(line.a, region.b)
return region
class PlainTasksFoldToTags(PlainTasksFold):
TAG = r'(?u)@\w+'
def run(self, edit):
tag_sels = [s for s in list(self.view.sel()) if 'tag.todo' in self.view.scope_name(s.a)]
if not tag_sels:
sublime.status_message('Cursor(s) must be placed on tag(s)')
return
tags = self.extract_tags(tag_sels)
tasks = [self.view.line(f) for f in self.view.find_all(r'[ \t](%s)' % '|'.join(tags)) if 'pending' in self.view.scope_name(f.a)]
if not tasks:
sublime.status_message('Pending tasks with given tags are not found')
print(tags, tag_sels)
return
self.exec_folding(self.add_projects_and_notes(tasks))
def extract_tags(self, tag_sels):
tags = []
for s in tag_sels:
start = end = s.a
limit = self.view.size()
while all(self.view.substr(start) != c for c in '@ \n'):
start -= 1
if start == 0:
break
while all(self.view.substr(end) != c for c in '( @\n'):
end += 1
if end == limit:
break
match = re.match(self.TAG, self.view.substr(sublime.Region(start, end)))
tag = match.group(0) if match else False
if tag and tag not in tags:
tags.append(tag)
return tags
class PlainTasksAddGutterIconsForTags(sublime_plugin.EventListener):
def on_activated(self, view):
if not view.score_selector(0, "text.todo") > 0:
return
view.erase_regions('critical')
view.erase_regions('high')
view.erase_regions('low')
view.erase_regions('today')
icon_critical = view.settings().get('icon_critical', '')
icon_high = view.settings().get('icon_high', '')
icon_low = view.settings().get('icon_low', '')
icon_today = view.settings().get('icon_today', '')
if not any((icon_critical, icon_high, icon_low, icon_today)):
return
critical = 'string.other.tag.todo.critical'
high = 'string.other.tag.todo.high'
low = 'string.other.tag.todo.low'
today = 'string.other.tag.todo.today'
r_critical = view.find_by_selector(critical)
r_high = view.find_by_selector(high)
r_low = view.find_by_selector(low)
r_today = view.find_by_selector(today)
if not any((r_critical, r_high, r_low, r_today)):
return
view.add_regions('critical', r_critical, critical, icon_critical, sublime.HIDDEN)
view.add_regions('high', r_high, high, icon_high, sublime.HIDDEN)
view.add_regions('low', r_low, low, icon_low, sublime.HIDDEN)
view.add_regions('today', r_today, today, icon_today, sublime.HIDDEN)
def on_post_save(self, view):
self.on_activated(view)
def on_load(self, view):
self.on_activated(view)
class PlainTasksHover(sublime_plugin.ViewEventListener):
'''Show popup with actions when hover over bullet'''
msg = ('<style>' # four curly braces because it will be modified with format method twice
'html {{{{background-color: color(var(--background) blenda(white 75%))}}}}'
'body {{{{margin: .1em .3em}}}}'
'p {{{{margin: .5em 0}}}}'
'a {{{{text-decoration: none}}}}'
'span.icon {{{{font-weight: bold; font-size: 1.3em}}}}'
'#icon-done {{{{color: var(--greenish)}}}}'
'#icon-cancel {{{{color: var(--redish)}}}}'
'#icon-archive {{{{color: var(--bluish)}}}}'
'#icon-outside {{{{color: var(--purplish)}}}}'
'#done {{{{color: var(--greenish)}}}}'
'#cancel {{{{color: var(--redish)}}}}'
'#archive {{{{color: var(--bluish)}}}}'
'#outside {{{{color: var(--purplish)}}}}'
'</style><body>'
'{actions}'
)
complete = '<a href="complete\v{point}"><span class="icon" id="icon-done">✔</span> <span id="done">Toggle complete</span></a>'
cancel = '<a href="cancel\v{point}"><span class="icon" id="icon-cancel">✘</span> <span id="cancel">Toggle cancel</span></a>'
archive = '<a href="archive\v{point}"><span class="icon" id="icon-archive">📚</span> <span id="archive">Archive</span></a>'
archivetofile = '<a href="tofile\v{point}"><span class="icon" id="icon-outside">📤</span> <span id="outside">Archive to file</span></a>'
actions = {
'text.todo meta.item.todo.pending': '<p>{complete}</p><p>{cancel}</p>'.format(complete=complete, cancel=cancel),
'text.todo meta.item.todo.completed': '<p>{archive}</p><p>{archivetofile}</p><p>{complete}</p>'.format(archive=archive, archivetofile=archivetofile, complete=complete),
'text.todo meta.item.todo.cancelled': '<p>{archive}</p><p>{archivetofile}</p><p>{complete}</p><p>{cancel}</p>'.format(archive=archive, archivetofile=archivetofile, complete=complete, cancel=cancel)
}
@classmethod
def is_applicable(cls, settings):
return settings.get('syntax') == 'Packages/PlainTasks/PlainTasks.sublime-syntax'
def on_hover(self, point, hover_zone):
self.view.hide_popup()
if hover_zone != sublime.HOVER_TEXT:
return
line = self.view.line(point)
line_scope_name = self.view.scope_name(line.a).strip()
if 'meta.item.todo' not in line_scope_name:
return
bullet = any(('bullet' in self.view.scope_name(p) for p in (point, point - 1)))
if not bullet:
return
width, height = self.view.viewport_extent()
self.view.show_popup(self.msg.format(actions=self.actions.get(line_scope_name)).format(point=point), 0, point or self.view.sel()[0].begin() or 1, width, height / 2, self.exec_action)
def exec_action(self, msg):
action, at = msg.split('\v')
case = {
'complete': lambda: self.view.run_command('plain_tasks_complete'),
'cancel': lambda: self.view.run_command('plain_tasks_cancel'),
'archive': lambda: self.view.run_command("plain_tasks_archive", {"partial": True}),
'tofile': lambda: self.view.run_command('plain_tasks_org_archive'),
}
self.view.sel().clear()
self.view.sel().add(sublime.Region(int(at)))
case[action]()
self.view.hide_popup()
class PlainTasksGotoTag(sublime_plugin.TextCommand):
def run(self, edit):
self.initial_viewport = self.view.viewport_position()
self.initial_sels = list(self.view.sel())
self.tags = sorted(
[r for r in self.view.find_by_selector('meta.tag.todo')
if not any(s in self.view.scope_name(r.a) for s in ('completed', 'cancelled'))
] +
self.view.find_by_selector('string.other.tag.todo.critical') +
self.view.find_by_selector('string.other.tag.todo.high') +
self.view.find_by_selector('string.other.tag.todo.low') +
self.view.find_by_selector('string.other.tag.todo.today')
)
window = self.view.window() or sublime.active_window()
items = [[self.view.substr(t), u'{0}: {1}'.format(self.view.rowcol(t.a)[0], self.view.substr(self.view.line(t)).strip())] for t in self.tags]
if ST3:
from bisect import bisect_left
# find the closest tag after current position of viewport, to avoid scrolling
closest_index = bisect_left([r.a for r in self.tags], self.view.layout_to_text(self.initial_viewport))
llen = len(self.tags)
selected_index = closest_index if closest_index < llen else llen - 1
window.show_quick_panel(items, self.on_done, 0, selected_index, self.on_highlighted)
else:
window.show_quick_panel(items, self.on_done)
def on_done(self, index):
if index < 0:
self.view.sel().clear()
self.view.sel().add_all(self.initial_sels)
self.view.set_viewport_position(self.initial_viewport)
return
self.view.sel().clear()
self.view.sel().add(sublime.Region(self.tags[index].a))
self.view.show_at_center(self.tags[index])
def on_highlighted(self, index):
self.view.sel().clear()
self.view.sel().add(self.tags[index])
self.view.show(self.tags[index], True)
|
local.py
|
import threading
import time
import types
import typing
from concurrent.futures.thread import ThreadPoolExecutor
from . import base
from ..exceptions import LovageRemoteException
class LocalBackend(base.Backend):
def __init__(self):
self._executor = LocalExecutor()
def new_task(self, serializer: base.Serializer, func: types.FunctionType, options: typing.Mapping) -> base.Task:
return base.Task(func, self._executor, serializer)
def deploy(self, *, requirements: typing.List[str], root: str, exclude=None):
print("Nothing to deploy when running locally")
class LocalExecutor(base.Executor):
def __init__(self):
self._executor = ThreadPoolExecutor(max_workers=1)
def invoke(self, serializer: base.Serializer, func: types.FunctionType, packed_args):
return self._invoke(serializer, func, packed_args)
def invoke_async(self, serializer: base.Serializer, func: types.FunctionType, packed_args):
threading.Thread(target=self._invoke, args=(serializer, func, packed_args)).start()
def queue(self, serializer: base.Serializer, func: types.FunctionType, packed_args):
self._executor.submit(self._invoke, serializer, func, packed_args)
def delay(self, serializer: base.Serializer, func: types.FunctionType, packed_args, timeout):
def delayer():
time.sleep(timeout)
self._invoke(serializer, func, packed_args)
threading.Thread(target=delayer).start()
@staticmethod
def _invoke(serializer: base.Serializer, func: types.FunctionType, packed_args):
# TODO handle exceptions so we can test serializers
try:
unpacked_args, unpacked_kwargs = serializer.unpack_args(packed_args)
result = func(*unpacked_args, **unpacked_kwargs)
return serializer.pack_result(result)
except Exception as e:
# exception_handler(e) -- TODO AWS only for now
if serializer.objects_supported:
packed_e = serializer.pack_result(e)
unpacked_e = serializer.unpack_result(packed_e)
raise unpacked_e
else:
raise LovageRemoteException.from_exception_object(LovageRemoteException.exception_object(e))
|
dagucar.py
|
# Wrapping the Adafruit API to talk to DC motors with a simpler interface
#
# date: 11/17/2015
#
# authors: Valerio Varricchio <valerio@mit.edu>
# Luca Carlone <lcarlone@mit.edu>
#
# ~~~~~ IMPORTANT !!! ~~~~~
#
# Make sure that the front motor is connected in such a way that a positive
# speed causes an increase in the potentiometer reading!
from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor
import time
import atexit
import numpy
import warnings
from threading import Thread
from dagu_car import pot
import math
class Controller:
def __init__(self):
# First eye-metric tuning. At least the signs are ok!
self.P = 1.2
self.I = 4
self.D = 50
self.reference = 0
self.integral = 0
self.oldValue = 0
self.dt = 0.02 # the higher this frequency the more often the potentiometer fails
self.numIt = 0
self.totalTime = 0
class DAGU_Car:
__driveDeadZone = 10 # Minimum speed to win static friction
__driveMaxSpeed = 255 # Maximum speed that can be commanded
__steerDeadZone = 10 # Minimum steer DC motor speed to win static friction
__steerMaxSpeed = 255
__steerTolerance = 0.1 # Positioning threshold below which we send release to the DC motor
def __init__(self, verbose=False):
self.mh = Adafruit_MotorHAT(addr=0x60)
self.rearMotor = self.mh.getMotor(1)
self.frontMotor = self.mh.getMotor(2)
self.rearMotorMode = Adafruit_MotorHAT.RELEASE
# create Potentiometer object
self.potentiometer = pot.Pot()
self.referenceSteerAngle = 0
self.verbose = verbose
#
self.steerController = Controller()
# Separate thred
self.steerThread = Thread(target = self.steerServo)
self.steerThreadStop = False;
self.steerThread.start()
# Speed has to be in [-1, 1], sign determines bwd/fwd
def setSpeed(self, speed):
# Warning the user
if abs(speed)>1:
warnings.warn("Input speed has to be in [-1, 1]. Clamping it.")
# Clamping speed value to [-1, 1]
speed = numpy.clip(speed, -1, 1)
if abs(speed) < 1e-4:
self.rearMotorMode = Adafruit_MotorHAT.RELEASE
elif speed > 0:
self.rearMotorMode = Adafruit_MotorHAT.FORWARD
elif speed < 0:
self.rearMotorMode = Adafruit_MotorHAT.BACKWARD
self.rearMotor.setSpeed(int(round(\
abs(speed)*(DAGU_Car.__driveMaxSpeed-DAGU_Car.__driveDeadZone)\
+DAGU_Car.__driveDeadZone)));
self.rearMotor.run(self.rearMotorMode);
def steerServo(self):
while not self.steerThreadStop:
#print("Separate Servo thread running")
self.steerController.numIt += 1
tin = time.time()
reading = self.potentiometer.getPot()
if(math.isnan(reading)):
# If the sensor turns out "not attached" (PhidgetException downstream)
# then...wait for a while. Apparently if I keep issuing requests...
# it doesn't manage to reconnect again.
if(self.verbose):
print("potentiometer not ready, waiting...")
time.sleep(5*self.steerController.dt)
continue
delta = self.steerController.reference-reading
steerMotorMode = Adafruit_MotorHAT.RELEASE
derivative = (delta-self.steerController.oldValue)/self.steerController.dt
# worst case Delta is pm 2
cmd = self.steerController.P*delta
cmd += self.steerController.I*self.steerController.integral
cmd *= DAGU_Car.__steerMaxSpeed
if(self.verbose):
print("x: "+str(delta)+" xdot: "+str(derivative)+" int: "+str(self.steerController.integral))
print("rawcmd: "+str(int(cmd)))
if(abs(delta)>DAGU_Car.__steerTolerance):
if(cmd>0):
steerMotorMode = Adafruit_MotorHAT.BACKWARD
else:
steerMotorMode = Adafruit_MotorHAT.FORWARD
cmd = abs(cmd)
cmd += self.steerController.D*abs(derivative)
cmd = numpy.clip(cmd, DAGU_Car.__steerDeadZone, DAGU_Car.__steerMaxSpeed)
else:
self.steerController.integral = 0 #resetting integral term
if(self.verbose):
#print("x: "+str(delta)+" xdot: "+str(derivative)+" int: "+str(self.steerController.integral))
print("cmd: "+str(int(cmd)))
self.frontMotor.setSpeed(int(cmd))
self.frontMotor.run(steerMotorMode)
self.steerController.oldValue = delta
self.steerController.integral += delta*self.steerController.dt
time.sleep(self.steerController.dt)
self.steerController.totalTime += time.time()-tin;
def stopSteerControl(self):
print("Trying to stop the steer control thread and release motor.")
self.steerThreadStop = True
#self.steerThread.join()
self.frontMotor.run(Adafruit_MotorHAT.RELEASE)
def startSteerControl(self):
self.steerThreadStop = False;
self.steerThread.start()
def setSteerAngle(self, angle, P=float('nan'), D=float('nan'), I=float('nan')):
# TODO this has to be implemented (maybe using a separate control thread)
# once the potentiometer data is available
if not math.isnan(P):
self.steerController.P = P
if not math.isnan(P):
self.steerController.D = D
if not math.isnan(P):
self.steerController.I = I
if abs(angle)>1:
warnings.warn("Input angle has to be in [-1, 1]. Clamping it.")
self.steerController.integral = 0 #resetting integral term
self.steerController.reference = numpy.clip(angle, -1, 1);
def printHz(self):
print(self.steerController.numIt/self.steerController.totalTime)
# recommended for auto-disabling motors on shutdown!
def turnOffMotors(self):
self.rearMotor.run(Adafruit_MotorHAT.RELEASE)
self.frontMotor.run(Adafruit_MotorHAT.RELEASE)
self.setSteerAngle(0)
def __del__(self):
self.stopSteerControl()
del self.potentiometer
del self.mh
self.turnOffMotors()
|
store.py
|
from os import unlink, path, mkdir
import json
import uuid as uuid_builder
from threading import Lock
from copy import deepcopy
import logging
import time
import threading
import os
# Is there an existing library to ensure some data store (JSON etc) is in sync with CRUD methods?
# Open a github issue if you know something :)
# https://stackoverflow.com/questions/6190468/how-to-trigger-function-on-value-change
class ChangeDetectionStore:
lock = Lock()
def __init__(self, datastore_path="/datastore", include_default_watches=True, version_tag="0.0.0"):
self.needs_write = False
self.datastore_path = datastore_path
self.json_store_path = "{}/url-watches.json".format(self.datastore_path)
self.stop_thread = False
self.__data = {
'note': "Hello! If you change this file manually, please be sure to restart your changedetection.io instance!",
'watching': {},
'settings': {
'headers': {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate', # No support for brolti in python requests yet.
'Accept-Language': 'en-GB,en-US;q=0.9,en;'
},
'requests': {
'timeout': 15, # Default 15 seconds
'minutes_between_check': 3 * 60, # Default 3 hours
'workers': 10 # Number of threads, lower is better for slow connections
},
'application': {
'password': False,
'base_url' : None,
'extract_title_as_title': False,
'fetch_backend': 'html_requests',
'notification_urls': [], # Apprise URL list
# Custom notification content
'notification_title': None,
'notification_body': None,
}
}
}
# Base definition for all watchers
self.generic_definition = {
'url': None,
'tag': None,
'last_checked': 0,
'last_changed': 0,
'paused': False,
'last_viewed': 0, # history key value of the last viewed via the [diff] link
'newest_history_key': "",
'title': None,
# Re #110, so then if this is set to None, we know to use the default value instead
# Requires setting to None on submit if it's the same as the default
'minutes_between_check': None,
'previous_md5': "",
'uuid': str(uuid_builder.uuid4()),
'headers': {}, # Extra headers to send
'history': {}, # Dict of timestamp and output stripped filename
'ignore_text': [], # List of text to ignore when calculating the comparison checksum
# Custom notification content
'notification_urls': [], # List of URLs to add to the notification Queue (Usually AppRise)
'notification_title': None,
'notification_body': None,
'css_filter': "",
'trigger_text': [], # List of text or regex to wait for until a change is detected
'fetch_backend': None,
'extract_title_as_title': False
}
if path.isfile('changedetectionio/source.txt'):
with open('changedetectionio/source.txt') as f:
# Should be set in Dockerfile to look for /source.txt , this will give us the git commit #
# So when someone gives us a backup file to examine, we know exactly what code they were running.
self.__data['build_sha'] = f.read()
try:
# @todo retest with ", encoding='utf-8'"
with open(self.json_store_path) as json_file:
from_disk = json.load(json_file)
# @todo isnt there a way todo this dict.update recursively?
# Problem here is if the one on the disk is missing a sub-struct, it wont be present anymore.
if 'watching' in from_disk:
self.__data['watching'].update(from_disk['watching'])
if 'app_guid' in from_disk:
self.__data['app_guid'] = from_disk['app_guid']
if 'settings' in from_disk:
if 'headers' in from_disk['settings']:
self.__data['settings']['headers'].update(from_disk['settings']['headers'])
if 'requests' in from_disk['settings']:
self.__data['settings']['requests'].update(from_disk['settings']['requests'])
if 'application' in from_disk['settings']:
self.__data['settings']['application'].update(from_disk['settings']['application'])
# Reinitialise each `watching` with our generic_definition in the case that we add a new var in the future.
# @todo pretty sure theres a python we todo this with an abstracted(?) object!
for uuid, watch in self.__data['watching'].items():
_blank = deepcopy(self.generic_definition)
_blank.update(watch)
self.__data['watching'].update({uuid: _blank})
self.__data['watching'][uuid]['newest_history_key'] = self.get_newest_history_key(uuid)
print("Watching:", uuid, self.__data['watching'][uuid]['url'])
# First time ran, doesnt exist.
except (FileNotFoundError, json.decoder.JSONDecodeError):
if include_default_watches:
print("Creating JSON store at", self.datastore_path)
self.add_watch(url='http://www.quotationspage.com/random.php', tag='test')
self.add_watch(url='https://news.ycombinator.com/', tag='Tech news')
self.add_watch(url='https://www.gov.uk/coronavirus', tag='Covid')
self.add_watch(url='https://changedetection.io', tag='Tech news')
self.__data['version_tag'] = version_tag
# Helper to remove password protection
password_reset_lockfile = "{}/removepassword.lock".format(self.datastore_path)
if path.isfile(password_reset_lockfile):
self.__data['settings']['application']['password'] = False
unlink(password_reset_lockfile)
if not 'app_guid' in self.__data:
import sys
import os
if "pytest" in sys.modules or "PYTEST_CURRENT_TEST" in os.environ:
self.__data['app_guid'] = "test-" + str(uuid_builder.uuid4())
else:
self.__data['app_guid'] = str(uuid_builder.uuid4())
self.needs_write = True
# Finally start the thread that will manage periodic data saves to JSON
save_data_thread = threading.Thread(target=self.save_datastore).start()
# Returns the newest key, but if theres only 1 record, then it's counted as not being new, so return 0.
def get_newest_history_key(self, uuid):
if len(self.__data['watching'][uuid]['history']) == 1:
return 0
dates = list(self.__data['watching'][uuid]['history'].keys())
# Convert to int, sort and back to str again
dates = [int(i) for i in dates]
dates.sort(reverse=True)
if len(dates):
# always keyed as str
return str(dates[0])
return 0
def set_last_viewed(self, uuid, timestamp):
self.data['watching'][uuid].update({'last_viewed': int(timestamp)})
self.needs_write = True
def update_watch(self, uuid, update_obj):
# Skip if 'paused' state
if self.__data['watching'][uuid]['paused']:
return
with self.lock:
# In python 3.9 we have the |= dict operator, but that still will lose data on nested structures...
for dict_key, d in self.generic_definition.items():
if isinstance(d, dict):
if update_obj is not None and dict_key in update_obj:
self.__data['watching'][uuid][dict_key].update(update_obj[dict_key])
del (update_obj[dict_key])
self.__data['watching'][uuid].update(update_obj)
self.__data['watching'][uuid]['newest_history_key'] = self.get_newest_history_key(uuid)
self.needs_write = True
@property
def data(self):
has_unviewed = False
for uuid, v in self.__data['watching'].items():
self.__data['watching'][uuid]['newest_history_key'] = self.get_newest_history_key(uuid)
if int(v['newest_history_key']) <= int(v['last_viewed']):
self.__data['watching'][uuid]['viewed'] = True
else:
self.__data['watching'][uuid]['viewed'] = False
has_unviewed = True
# #106 - Be sure this is None on empty string, False, None, etc
# Default var for fetch_backend
if not self.__data['watching'][uuid]['fetch_backend']:
self.__data['watching'][uuid]['fetch_backend'] = self.__data['settings']['application']['fetch_backend']
# Re #152, Return env base_url if not overriden, @todo also prefer the proxy pass url
env_base_url = os.getenv('BASE_URL','')
if self.__data['settings']['application']['base_url'] is None and len(env_base_url) >0:
self.__data['settings']['application']['base_url'] = env_base_url.strip('" ')
self.__data['has_unviewed'] = has_unviewed
return self.__data
def get_all_tags(self):
tags = []
for uuid, watch in self.data['watching'].items():
# Support for comma separated list of tags.
for tag in watch['tag'].split(','):
tag = tag.strip()
if tag not in tags:
tags.append(tag)
tags.sort()
return tags
def unlink_history_file(self, path):
try:
unlink(path)
except (FileNotFoundError, IOError):
pass
# Delete a single watch by UUID
def delete(self, uuid):
with self.lock:
if uuid == 'all':
self.__data['watching'] = {}
# GitHub #30 also delete history records
for uuid in self.data['watching']:
for path in self.data['watching'][uuid]['history'].values():
self.unlink_history_file(path)
else:
for path in self.data['watching'][uuid]['history'].values():
self.unlink_history_file(path)
del self.data['watching'][uuid]
self.needs_write = True
# Clone a watch by UUID
def clone(self, uuid):
url = self.data['watching'][uuid]['url']
tag = self.data['watching'][uuid]['tag']
new_uuid = self.add_watch(url=url, tag=tag)
return new_uuid
def url_exists(self, url):
# Probably their should be dict...
for watch in self.data['watching'].values():
if watch['url'] == url:
return True
return False
def get_val(self, uuid, val):
# Probably their should be dict...
return self.data['watching'][uuid].get(val)
# Remove a watchs data but keep the entry (URL etc)
def scrub_watch(self, uuid, limit_timestamp = False):
import hashlib
del_timestamps = []
changes_removed = 0
for timestamp, path in self.data['watching'][uuid]['history'].items():
if not limit_timestamp or (limit_timestamp is not False and int(timestamp) > limit_timestamp):
self.unlink_history_file(path)
del_timestamps.append(timestamp)
changes_removed += 1
if not limit_timestamp:
self.data['watching'][uuid]['last_checked'] = 0
self.data['watching'][uuid]['last_changed'] = 0
self.data['watching'][uuid]['previous_md5'] = 0
for timestamp in del_timestamps:
del self.data['watching'][uuid]['history'][str(timestamp)]
# If there was a limitstamp, we need to reset some meta data about the entry
# This has to happen after we remove the others from the list
if limit_timestamp:
newest_key = self.get_newest_history_key(uuid)
if newest_key:
self.data['watching'][uuid]['last_checked'] = int(newest_key)
# @todo should be the original value if it was less than newest key
self.data['watching'][uuid]['last_changed'] = int(newest_key)
try:
with open(self.data['watching'][uuid]['history'][str(newest_key)], "rb") as fp:
content = fp.read()
self.data['watching'][uuid]['previous_md5'] = hashlib.md5(content).hexdigest()
except (FileNotFoundError, IOError):
self.data['watching'][uuid]['previous_md5'] = False
pass
self.needs_write = True
return changes_removed
def add_watch(self, url, tag):
with self.lock:
# @todo use a common generic version of this
new_uuid = str(uuid_builder.uuid4())
_blank = deepcopy(self.generic_definition)
_blank.update({
'url': url,
'tag': tag,
'uuid': new_uuid
})
self.data['watching'][new_uuid] = _blank
# Get the directory ready
output_path = "{}/{}".format(self.datastore_path, new_uuid)
try:
mkdir(output_path)
except FileExistsError:
print(output_path, "already exists.")
self.sync_to_json()
return new_uuid
# Save some text file to the appropriate path and bump the history
# result_obj from fetch_site_status.run()
def save_history_text(self, watch_uuid, contents):
import uuid
output_path = "{}/{}".format(self.datastore_path, watch_uuid)
fname = "{}/{}.stripped.txt".format(output_path, uuid.uuid4())
with open(fname, 'wb') as f:
f.write(contents)
f.close()
return fname
def sync_to_json(self):
print("Saving..")
data ={}
try:
data = deepcopy(self.__data)
except RuntimeError:
time.sleep(0.5)
print ("! Data changed when writing to JSON, trying again..")
self.sync_to_json()
return
else:
with open(self.json_store_path, 'w') as json_file:
json.dump(data, json_file, indent=4)
logging.info("Re-saved index")
self.needs_write = False
# Thread runner, this helps with thread/write issues when there are many operations that want to update the JSON
# by just running periodically in one thread, according to python, dict updates are threadsafe.
def save_datastore(self):
while True:
if self.stop_thread:
print("Shutting down datastore thread")
return
if self.needs_write:
self.sync_to_json()
time.sleep(3)
# Go through the datastore path and remove any snapshots that are not mentioned in the index
# This usually is not used, but can be handy.
def remove_unused_snapshots(self):
print ("Removing snapshots from datastore that are not in the index..")
index=[]
for uuid in self.data['watching']:
for id in self.data['watching'][uuid]['history']:
index.append(self.data['watching'][uuid]['history'][str(id)])
import pathlib
# Only in the sub-directories
for item in pathlib.Path(self.datastore_path).rglob("*/*txt"):
if not str(item) in index:
print ("Removing",item)
unlink(item)
|
housekeeper.py
|
"""
Keeps data up to date
"""
import logging, os, time, requests
from multiprocessing import Process
from sqlalchemy.ext.automap import automap_base
import sqlalchemy as s
import pandas as pd
from sqlalchemy import MetaData
logging.basicConfig(filename='housekeeper.log')
class Housekeeper:
def __init__(self, jobs, broker, broker_host, broker_port, user, password, host, port, dbname):
self.broker_host = broker_host
self.broker_port = broker_port
self.broker = broker
DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format(
user, password, host, port, dbname
)
dbschema='augur_data'
self.db = s.create_engine(DB_STR, poolclass=s.pool.NullPool,
connect_args={'options': '-csearch_path={}'.format(dbschema)})
helper_schema = 'augur_operations'
self.helper_db = s.create_engine(DB_STR, poolclass = s.pool.NullPool,
connect_args={'options': '-csearch_path={}'.format(helper_schema)})
helper_metadata = MetaData()
helper_metadata.reflect(self.helper_db, only=['worker_job'])
HelperBase = automap_base(metadata=helper_metadata)
HelperBase.prepare()
self.job_table = HelperBase.classes.worker_job.__table__
repoUrlSQL = s.sql.text("""
SELECT repo_git FROM repo
""")
rs = pd.read_sql(repoUrlSQL, self.db, params={})
all_repos = rs['repo_git'].values.tolist()
# List of tasks that need periodic updates
self.__updatable = self.prep_jobs(jobs)
self.__processes = []
self.__updater()
@staticmethod
def updater_process(broker_host, broker_port, broker, job):
"""
Controls a given plugin's update process
:param name: name of object to be updated
:param delay: time needed to update
:param shared: shared object that is to also be updated
"""
logging.info("HOB: {}".format(job))
repo_group_id = job['repo_group_id'] if 'repo_group_id' in job else None
if repo_group_id:
logging.info('Housekeeper spawned {} model updater process for subsection {} with PID {}'.format(job['model'], repo_group_id, os.getpid()))
else:
logging.info('Housekeeper spawned {} model updater process for repo {} with PID {}'.format(job['model'], job['repos'][0]['repo_id'], os.getpid()))
try:
compatible_worker_found = False
# Waiting for compatible worker
while True:
if not compatible_worker_found:
for worker in list(broker._getvalue().keys()):
# logging.info("{} {} {} {}".format(worker, model, broker[worker], given))
if job['model'] in broker[worker]['models'] and job['given'] in broker[worker]['given']:
compatible_worker_found = True
if compatible_worker_found:
logging.info("Housekeeper recognized that the broker has a worker that " +
"can handle the {} model... beginning to distribute maintained tasks".format(job['model']))
time.sleep(4)
while True:
logging.info('Housekeeper updating {} model with given {}...'.format(
job['model'], job['given'][0]))
if job['given'][0] == 'git_url' or job['given'][0] == 'github_url':
for repo in job['repos']:
if job['given'][0] == 'github_url' and 'github.com' not in repo['repo_git']:
continue
given_key = 'git_url' if job['given'][0] == 'git_url' else 'github_url'
task = {
"job_type": job['job_type'] if 'job_type' in job else 'MAINTAIN',
"models": [job['model']],
"display_name": "{} model for url: {}".format(job['model'], repo['repo_git']),
"given": {}
}
task['given'][given_key] = repo['repo_git']
if "focused_task" in repo:
task["focused_task"] = repo['focused_task']
try:
requests.post('http://{}:{}/api/unstable/task'.format(
broker_host,broker_port), json=task, timeout=10)
except Exception as e:
logging.info("Error encountered: {}".format(e))
time.sleep(90)
elif job['given'][0] == 'repo_group':
task = {
"job_type": job['job_type'] if 'job_type' in job else 'MAINTAIN',
"models": [job['model']],
"display_name": "{} model for repo group id: {}".format(job['model'], repo_group_id),
"given": {
"repo_group": job['repos']
}
}
try:
requests.post('http://{}:{}/api/unstable/task'.format(
broker_host,broker_port), json=task, timeout=10)
except Exception as e:
logging.info("Error encountered: {}".format(e))
logging.info("Housekeeper finished sending {} tasks to the broker for it to distribute to your worker(s)".format(len(job['repos'])))
time.sleep(job['delay'])
break
time.sleep(3)
except KeyboardInterrupt:
os.kill(os.getpid(), 9)
os._exit(0)
except:
raise
def __updater(self, jobs=None):
"""
Starts update processes
"""
logging.info("Starting update processes...")
if jobs is None:
jobs = self.__updatable
for job in jobs:
up = Process(target=self.updater_process, args=(self.broker_host, self.broker_port, self.broker, job), daemon=True)
up.start()
self.__processes.append(up)
def update_all(self):
"""
Updates all plugins
"""
for updatable in self.__updatable:
updatable['update']()
def schedule_updates(self):
"""
Schedules updates
"""
# don't use this,
logging.debug('Scheduling updates...')
self.__updater()
def join_updates(self):
"""
Join to the update processes
"""
for process in self.__processes:
process.join()
def shutdown_updates(self):
"""
Ends all running update processes
"""
for process in self.__processes:
process.terminate()
def prep_jobs(self, jobs):
for job in jobs:
if 'repo_group_id' in job or 'repo_ids' in job:
# If RG id is 0 then it just means to query all repos
where_and = 'AND' if job['model'] in ['issues', 'pull_requests'] else 'WHERE'
where_condition = '{} repo_group_id = {}'.format(where_and, job['repo_group_id']) if 'repo_group_id' in job and job['repo_group_id'] != 0 else ''
repoUrlSQL = s.sql.text("""
SELECT *
FROM repo
WHERE repo_id IN ({})
""".format( # Generator expression
",".join(str(x) for x in job['repo_ids']))) if 'repo_ids' in job else s.sql.text(
"""
SELECT a.repo_id, a.repo_git, b.pull_request_count, d.repo_id AS pr_repo_id, count(*) AS pr_collected_count,
(b.pull_request_count-count(*)) AS prs_missing,
abs(cast((count(*))AS DOUBLE PRECISION)/NULLIF(cast(b.pull_request_count AS DOUBLE PRECISION), 0)) AS ratio_abs,
(cast((count(*))AS DOUBLE PRECISION)/NULLIF(cast(b.pull_request_count AS DOUBLE PRECISION), 0)) AS ratio_prs
FROM repo a, pull_requests d, repo_info b, (
SELECT repo_id, max(data_collection_date) AS last_collected FROM repo_info
GROUP BY repo_id
ORDER BY repo_id) e
WHERE a.repo_id = b.repo_id AND
a.repo_id = d.repo_id AND
b.repo_id = d.repo_id AND
b.data_collection_date = e.last_collected
{0}
GROUP BY a.repo_id, d.repo_id, b.pull_request_count
UNION
SELECT repo_id,repo_git, 0 AS pull_request_count, repo_id AS pr_repo_id, 0 AS pr_collected_count,
0 AS prs_missing,
0 AS ratio_abs,
0 AS ratio_prs
FROM repo
WHERE repo_id NOT IN (
SELECT a.repo_id FROM repo a, pull_requests d, repo_info b, (
SELECT repo_id, max(data_collection_date) AS last_collected FROM repo_info
GROUP BY repo_id
ORDER BY repo_id) e
WHERE a.repo_id = b.repo_id AND
a.repo_id = d.repo_id AND
b.repo_id = d.repo_id AND
b.data_collection_date = e.last_collected
{0}
GROUP BY a.repo_id, d.repo_id, b.pull_request_count
)
ORDER BY ratio_abs
""".format(where_condition)) if job['model'] == 'pull_requests' else s.sql.text("""
SELECT a.repo_id, a.repo_git, b.issues_count, d.repo_id AS pr_repo_id, count(*) AS issues_collected_count,
(b.issues_count-count(*)) AS issues_missing,
abs(cast((count(*))AS DOUBLE PRECISION)/NULLIF(cast(b.issues_count AS DOUBLE PRECISION), 0)) AS ratio_abs,
(cast((count(*))AS DOUBLE PRECISION)/NULLIF(cast(b.issues_count AS DOUBLE PRECISION), 0)) AS ratio_issues
FROM repo a, issues d, repo_info b,
(
SELECT repo_id, max(data_collection_date) AS last_collected FROM repo_info
GROUP BY repo_id
ORDER BY repo_id) e
WHERE a.repo_id = b.repo_id AND
a.repo_id = d.repo_id AND
b.repo_id = d.repo_id AND
b.data_collection_date = e.last_collected
AND d.pull_request_id IS NULL
{0}
GROUP BY a.repo_id, d.repo_id, b.issues_count
UNION
SELECT repo_id,repo_git, 0 AS issues_count, repo_id AS pr_repo_id, 0 AS issues_collected_count,
0 AS issues_missing,
0 AS ratio_abs,
0 AS ratio_issues
FROM repo
WHERE repo_id NOT IN (
SELECT a.repo_id FROM repo a, issues d, repo_info b,
(
SELECT repo_id, max(data_collection_date) AS last_collected FROM repo_info
GROUP BY repo_id
ORDER BY repo_id) e
WHERE a.repo_id = b.repo_id AND
a.repo_id = d.repo_id AND
b.repo_id = d.repo_id AND
b.data_collection_date = e.last_collected
AND d.pull_request_id IS NULL
{0}
GROUP BY a.repo_id, d.repo_id, b.issues_count
)
ORDER BY ratio_abs
""".format(where_condition)) if job['model'] == 'issues' else s.sql.text("""
SELECT repo_git, repo_id FROM repo {} ORDER BY repo_id ASC
""".format(where_condition))
reorganized_repos = pd.read_sql(repoUrlSQL, self.db, params={})
if len(reorganized_repos) == 0:
logging.info("Trying to send tasks for repo group, but the repo group does not contain any repos")
job['repos'] = []
continue
if 'starting_repo_id' in job:
last_id = job['starting_repo_id']
else:
repoIdSQL = s.sql.text("""
SELECT since_id_str FROM worker_job
WHERE job_model = '{}'
""".format(job['model']))
job_df = pd.read_sql(repoIdSQL, self.helper_db, params={})
# If there is no job tuple found, insert one
if len(job_df) == 0:
job_tuple = {
'job_model': job['model'],
'oauth_id': 0
}
result = self.helper_db.execute(self.job_table.insert().values(job_tuple))
logging.info("No job tuple for {} model was found, so one was inserted into the job table: {}".format(job['model'], job_tuple))
# If a last id is not recorded, start from beginning of repos
# (first id is not necessarily 0)
try:
last_id = int(job_df.iloc[0]['since_id_str'])
except:
last_id = 0
jobHistorySQL = s.sql.text("""
SELECT max(history_id) AS history_id, status FROM worker_history
GROUP BY status
LIMIT 1
""")
history_df = pd.read_sql(jobHistorySQL, self.helper_db, params={})
finishing_task = False
if len(history_df.index) != 0:
if history_df.iloc[0]['status'] == 'Stopped':
self.history_id = int(history_df.iloc[0]['history_id'])
finishing_task = True
# Rearrange repos so the one after the last one that
# was completed will be ran first (if prioritized ordering is not available/enabled)
if job['model'] not in ['issues', 'pull_requests']:
before_repos = reorganized_repos.loc[reorganized_repos['repo_id'].astype(int) < last_id]
after_repos = reorganized_repos.loc[reorganized_repos['repo_id'].astype(int) >= last_id]
reorganized_repos = after_repos.append(before_repos)
if 'all_focused' in job:
reorganized_repos['focused_task'] = job['all_focused']
reorganized_repos = reorganized_repos.to_dict('records')
if finishing_task:
reorganized_repos[0]['focused_task'] = 1
job['repos'] = reorganized_repos
elif 'repo_id' in job:
job['repo_group_id'] = None
repoUrlSQL = s.sql.text("""
SELECT repo_git, repo_id FROM repo WHERE repo_id = {}
""".format(job['repo_id']))
rs = pd.read_sql(repoUrlSQL, self.db, params={})
if 'all_focused' in job:
rs['focused_task'] = job['all_focused']
rs = rs.to_dict('records')
job['repos'] = rs
return jobs
|
keygen.py
|
# coding:utf-8
import Tkinter as tk
from ScrolledText import ScrolledText
import threading
import hashlib
from random import randint
class Application(tk.Frame):
def __init__(self, master=None):
tk.Frame.__init__(self, master)
self.pack()
self.createWidgets()
def __del__(self):
pass
def createWidgets(self):
self.contentDest = tk.StringVar()
self.contentDest.set(u'随意写个用户名')
self.entryDest = tk.Entry(self, width=60)
self.entryDest["textvariable"] = self.contentDest
self.entryDest.grid(row=0)
self.entryDest.focus()
self.entryDest.bind('<Return>', lambda event: self.start())
self.buttonStart = tk.Button(self, text='生成序列号', width=25)
self.buttonStart['command'] = self.start
self.buttonStart['fg'] = 'black'
self.buttonStart.grid(row=1)
self.text = ScrolledText(self, font=('Courier New', 13), fg='green', bg='black', width=50)
self.text.grid(row=2, columnspan=1)
def start(self):
self.running = True
self.td = threading.Thread(target=self.gen_key)
self.td.setDaemon(True)
self.td.start()
def get_md5(self, src_txt):
m = hashlib.md5()
try:
src_txt = src_txt.encode("utf-8")
except:
pass
m.update(src_txt)
return m.hexdigest()
def gen_key(self):
try:
self.text.delete(0.0, "end")
name = self.contentDest.get()
self.text.insert("end", u"注册名:【%s】\n" % name)
salt = str(randint(10000,30000))
self.text.insert("end", u"盐 值:【%s】\n" % salt)
salted_text = u"ax5{}b52w{}vb3".format(name,salt)
self.text.insert("end", u"盐混淆:【%s】\n" % salted_text)
hashed_text = self.get_md5(salted_text)
self.text.insert("end", u"哈希值:【%s】\n" % hashed_text)
result_key = u"{}{}{}".format(hashed_text[:4],salt,hashed_text[4:])
self.text.insert("end", u"序列号:【%s】\n" % result_key)
self.text.insert("end", (u"^-^成功生成^-^\n"
u"请复制注册名和序列号进行软件激活:"
u"菜单栏->HELP->Register->输入激活"))
except Exception as e:
self.text.insert("end", u'生成失败,请填写英文名重新生成')
root = tk.Tk()
root.withdraw()
app = Application(master=root)
root.title("DbSchema序列号生成器")
try:
root.iconbitmap("logo.ico")
except:
pass
screen_width = root.winfo_screenwidth()
root.resizable(False, False)
root.update_idletasks()
root.deiconify()
screen_height = root.winfo_screenheight() - 100
root.geometry('%sx%s+%s+%s' % (
root.winfo_width() + 10, root.winfo_height() + 10, (screen_width - root.winfo_width()) / 2,
(screen_height - root.winfo_height()) / 2))
root.deiconify()
app.mainloop()
|
miniterm3.py
|
#!/usr/bin/env python
# Very simple serial terminal
# (C)2002-2004 Chris Liechti <cliecht@gmx.net>
# Input characters are sent directly (only LF -> CR/LF/CRLF translation is
# done), received characters are displayed as is (or as trough pythons
# repr, useful for debug purposes)
# Baudrate and echo configuartion is done through globals
import sys, os, serial, threading, getopt
EXITCHARCTER = '\x04' #ctrl+D
#first choose a platform dependant way to read single characters from the console
if os.name == 'nt':
import msvcrt
def getkey():
while 1:
if echo:
z = msvcrt.getche()
else:
z = msvcrt.getch()
if z == '\0' or z == '\xe0': #functions keys
msvcrt.getch()
else:
if z == '\r':
return '\n'
return z
elif os.name == 'posix':
import termios, sys, os
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
new = termios.tcgetattr(fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(fd, termios.TCSANOW, new)
s = '' # We'll save the characters typed and add them to the pool.
def getkey():
c = os.read(fd, 1)
#~ c = sys.stdin.read(1)
if echo: sys.stdout.write(c); sys.stdout.flush()
return c
def clenaup_console():
termios.tcsetattr(fd, termios.TCSAFLUSH, old)
sys.exitfunc = clenaup_console #terminal modes have to be restored on exit...
else:
raise "Sorry no implementation for your platform (%s) available." % sys.platform
CONVERT_CRLF = 2
CONVERT_CR = 1
CONVERT_LF = 0
def reader():
"""loop forever and copy serial->console"""
while 1:
data = s.read()
if repr_mode:
sys.stdout.write(repr(data)[1:-1])
else:
sys.stdout.write(data)
sys.stdout.flush()
def writer():
"""loop and copy console->serial until EOF character is found"""
while 1:
c = getkey()
if c == EXITCHARCTER:
break #exit app
elif c == '\n':
if convert_outgoing == CONVERT_CRLF:
s.write('\r\n') #make it a CR+LF
elif convert_outgoing == CONVERT_CR:
s.write('\r') #make it a CR
elif convert_outgoing == CONVERT_LF:
s.write('\n') #make it a LF
else:
s.write(c) #send character
#print a short help message
def usage():
sys.stderr.write("""USAGE: %s [options]
Miniterm - A simple terminal program for the serial port.
options:
-p, --port=PORT: port, a number, default = 0 or a device name
-b, --baud=BAUD: baudrate, default 9600
-r, --rtscts: enable RTS/CTS flow control (default off)
-x, --xonxoff: enable software flow control (default off)
-e, --echo: enable local echo (default off)
-c, --cr: do not send CR+LF, send CR only
-n, --newline: do not send CR+LF, send LF only
-D, --debug: debug received data (escape nonprintable chars)
""" % (sys.argv[0], ))
if __name__ == '__main__':
#initialize with defaults
port = 0
baudrate = 9600
echo = 0
convert_outgoing = CONVERT_CRLF
rtscts = 0
xonxoff = 0
repr_mode = 0
#parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:],
"hp:b:rxecnD",
["help", "port=", "baud=", "rtscts", "xonxoff", "echo",
"cr", "newline", "debug"]
)
except getopt.GetoptError:
# print help information and exit:
usage()
sys.exit(2)
for o, a in opts:
if o in ("-h", "--help"): #help text
usage()
sys.exit()
elif o in ("-p", "--port"): #specified port
try:
port = int(a)
except ValueError:
port = a
elif o in ("-b", "--baud"): #specified baudrate
try:
baudrate = int(a)
except ValueError:
raise ValueError, "Baudrate must be a integer number, not %r" % a
elif o in ("-r", "--rtscts"):
rtscts = 1
elif o in ("-x", "--xonxoff"):
xonxoff = 1
elif o in ("-e", "--echo"):
echo = 1
elif o in ("-c", "--cr"):
convert_outgoing = CONVERT_CR
elif o in ("-n", "--newline"):
convert_outgoing = CONVERT_LF
elif o in ("-D", "--debug"):
repr_mode = 1
#open the port
try:
s = serial.Serial(port, baudrate, rtscts=rtscts, xonxoff=xonxoff)
except:
sys.stderr.write("Could not open port\n")
sys.exit(1)
sys.stderr.write("--- Miniterm --- type Ctrl-D to quit\n")
#start serial->console thread
r = threading.Thread(target=reader)
r.setDaemon(1)
r.start()
#and enter console->serial loop
writer()
sys.stderr.write("\n--- exit ---\n")
|
multiprocess_detect_actions.py
|
import numpy as np
import cv2
import imageio
#import tensorflow as tf
import json
import os
import sys
import argparse
import object_detection.object_detector as obj
import action_detection.action_detector as act
from multiprocessing import Process, Queue
from queue import Empty
import socket
import struct
import time
#SHOW_CAMS = True
SHOW_CAMS = False
# Object classes
CAM_CLASSES = ["read", "answer phone", "carry", "text on", "drink", "eat"]
# Person state classes
CAM_CLASSES = ["walk", "stand", "sit", "bend", "run", "talk"]
#USE_WEBCAM = True
ACTION_FREQ = 8
#OBJ_BATCH_SIZE = 16 # with ssd-mobilenet2
#OBJ_BATCH_SIZE = 4 # with ssd-mobilenet2
#OBJ_BATCH_SIZE = 1 # with NAS, otherwise memory exhausts
DELAY = 60 # ms, this limits the input around 16 fps. This makes sense as the action model was trained with similar fps videos.
#OBJ_GPU = "0"
#ACT_GPU = "2"
#ACT_GPU = "1" # if using nas and/or high res input use different GPUs for each process
T = 32 # Timesteps
HOST = ''
PORT = 8089
# separate process definitions
# frame reader
def read_frames(conn, frame_q, use_webcam):
if use_webcam:
time.sleep(15)
frame_cnt = 0
while True:
#if frame_cnt % 5 == 0:
# ret, frame = reader.read()
# cur_img = frame[:,:,::-1]
# frame_q.put(cur_img)
#else:
# ret, frame = reader.read()
data_size = struct.unpack("!I", recv_n_bytes(conn, 4))[0]
data = recv_n_bytes(conn, data_size)
np_data = np.frombuffer(data, dtype=np.uint8)
frame = cv2.imdecode(np_data, cv2.IMREAD_COLOR)
cur_img = frame[:,:,::-1] # bgr to rgb from opencv reader
if frame_q.full():
try:
frame_q.get_nowait()
except Empty as e:
pass
frame_q.put(cur_img)
# if frame_q.qsize() > 100:
# time.sleep(1)
# else:
# time.sleep(DELAY/1000.)
#print(cur_img.shape)
else:
#for cur_img in reader: # this is imageio reader, it uses rgb
nframes = reader.get_length()
for ii in range(nframes):
while frame_q.qsize() > 500: # so that we dont use huge amounts of memory
time.sleep(1)
cur_img = reader.get_next_data()
if frame_q.full():
frame_q.get_nowait()
frame_q.put(cur_img)
#shape = cur_img.shape
#noisy_img = np.uint8(cur_img.astype(np.float) + np.random.randn(*shape) * 20)
#frame_q.put(noisy_img)
if ii % 100 == 0:
print("%i / %i frames in queue" % (ii, nframes))
print("All %i frames in queue" % (nframes))
# # object detector and tracker
# def run_obj_det_and_track(frame_q, detection_q, det_vis_q):
# import tensorflow as tf # there is a bug. if you dont import tensorflow within the process you cant use the same gpus for both processes.
# os.environ['CUDA_VISIBLE_DEVICES'] = OBJ_GPU
# main_folder = "./"
# ## Best
# # obj_detection_graph = os.path.join(main_folder, 'object_detection/weights/batched_zoo/faster_rcnn_nas_coco_2018_01_28/batched_graph/frozen_inference_graph.pb')
# ## Good and Faster
# #obj_detection_graph = os.path.join(main_folder, 'object_detection/weights/batched_zoo/faster_rcnn_nas_lowproposals_coco_2018_01_28/batched_graph/frozen_inference_graph.pb')
# print("Loading object detection model at %s" % obj_detection_graph)
# obj_detector = obj.Object_Detector(obj_detection_graph)
# tracker = obj.Tracker()
# while True:
# cur_img = frame_q.get()
# expanded_img = np.expand_dims(cur_img, axis=0)
# detection_list = obj_detector.detect_objects_in_np(expanded_img)
# detection_info = [info[0] for info in detection_list]
# tracker.update_tracker(detection_info, cur_img)
# rois_np, temporal_rois_np = tracker.generate_all_rois()
# actors_snapshot = []
# for cur_actor in tracker.active_actors:
# act_id = cur_actor['actor_id']
# act_box = cur_actor['all_boxes'][-1][:]
# act_score = cur_actor['all_scores'][-1]
# actors_snapshot.append({'actor_id':act_id, 'all_boxes':[act_box], 'all_scores':[act_score]})
# #print(len(actors_snapshot))
# #if actors_snapshot:
# # detection_q.put([cur_img, actors_snapshot, rois_np, temporal_rois_np])
# # det_vis_q.put([cur_img, actors_snapshot])
# detection_q.put([cur_img, actors_snapshot, rois_np, temporal_rois_np])
# det_vis_q.put([cur_img, actors_snapshot])
def run_obj_det_and_track_in_batches(frame_q, detection_q, det_vis_q, obj_batch_size, obj_gpu):
import tensorflow as tf # there is a bug. if you dont import tensorflow within the process you cant use the same gpus for both processes.
os.environ['CUDA_VISIBLE_DEVICES'] = obj_gpu
main_folder = "./"
obj_detection_graph = "./object_detection/weights/ssd_mobilenet_v2_coco_2018_03_29/frozen_inference_graph.pb"
#obj_detection_graph = "./object_detection/weights/faster_rcnn_resnet101_coco_2018_01_28/frozen_inference_graph.pb"
print("Loading object detection model at %s" % obj_detection_graph)
obj_detector = obj.Object_Detector(obj_detection_graph)
tracker = obj.Tracker(timesteps=T)
while True:
img_batch = []
for _ in range(obj_batch_size):
cur_img = frame_q.get()
img_batch.append(cur_img)
#expanded_img = np.expand_dims(cur_img, axis=0)
expanded_img = np.stack(img_batch, axis=0)
start_time = time.time()
detection_list = obj_detector.detect_objects_in_np(expanded_img)
end_time = time.time()
print("%.3f second per image" % ((end_time-start_time) / float(obj_batch_size)) )
for ii in range(obj_batch_size):
cur_img = img_batch[ii]
detection_info = [info[ii] for info in detection_list]
tracker.update_tracker(detection_info, cur_img)
rois_np, temporal_rois_np = tracker.generate_all_rois()
actors_snapshot = []
for cur_actor in tracker.active_actors:
act_id = cur_actor['actor_id']
act_box = cur_actor['all_boxes'][-1][:]
act_score = cur_actor['all_scores'][-1]
actors_snapshot.append({'actor_id':act_id, 'all_boxes':[act_box], 'all_scores':[act_score]})
#print(len(actors_snapshot))
#if actors_snapshot:
# detection_q.put([cur_img, actors_snapshot, rois_np, temporal_rois_np])
# det_vis_q.put([cur_img, actors_snapshot])
detection_q.put([cur_img, actors_snapshot, rois_np, temporal_rois_np])
det_vis_q.put([cur_img, actors_snapshot])
# Action detector
def run_act_detector(shape, detection_q, actions_q, act_gpu):
import tensorflow as tf # there is a bug. if you dont import tensorflow within the process you cant use the same gpus for both processes.
os.environ['CUDA_VISIBLE_DEVICES'] = act_gpu
# act_detector = act.Action_Detector('i3d_tail')
# ckpt_name = 'model_ckpt_RGB_i3d_pooled_tail-4'
act_detector = act.Action_Detector('soft_attn', timesteps=T)
#ckpt_name = 'model_ckpt_RGB_soft_attn-16'
#ckpt_name = 'model_ckpt_soft_attn_ava-23'
#ckpt_name = 'model_ckpt_soft_attn_pooled_ava-52'
ckpt_name = 'model_ckpt_soft_attn_pooled_cosine_drop_ava-130'
main_folder = "./"
ckpt_path = os.path.join(main_folder, 'action_detection', 'weights', ckpt_name)
#input_frames, temporal_rois, temporal_roi_batch_indices, cropped_frames = act_detector.crop_tubes_in_tf([T,H,W,3])
memory_size = act_detector.timesteps - ACTION_FREQ
updated_frames, temporal_rois, temporal_roi_batch_indices, cropped_frames = act_detector.crop_tubes_in_tf_with_memory(shape, memory_size)
rois, roi_batch_indices, pred_probs = act_detector.define_inference_with_placeholders_noinput(cropped_frames)
act_detector.restore_model(ckpt_path)
processed_frames_cnt = 0
while True:
images = []
for _ in range(ACTION_FREQ):
cur_img, active_actors, rois_np, temporal_rois_np = detection_q.get()
images.append(cur_img)
#print("action frame: %i" % len(images))
if not active_actors:
prob_dict = {}
if SHOW_CAMS:
prob_dict = {"cams": visualize_cams({})}
else:
# use the last active actors and rois vectors
no_actors = len(active_actors)
cur_input_sequence = np.expand_dims(np.stack(images, axis=0), axis=0)
if no_actors > 14:
no_actors = 14
rois_np = rois_np[:14]
temporal_rois_np = temporal_rois_np[:14]
active_actors = active_actors[:14]
#feed_dict = {input_frames:cur_input_sequence,
feed_dict = {updated_frames:cur_input_sequence, # only update last #action_freq frames
temporal_rois: temporal_rois_np,
temporal_roi_batch_indices: np.zeros(no_actors),
rois:rois_np,
roi_batch_indices:np.arange(no_actors)}
run_dict = {'pred_probs': pred_probs}
if SHOW_CAMS:
run_dict['cropped_frames'] = cropped_frames
#import pdb;pdb.set_trace()
run_dict['final_i3d_feats'] = act_detector.act_graph.get_collection('final_i3d_feats')[0]
#run_dict['cls_weights'] = [var for var in tf.global_variables() if var.name == "CLS_Logits/kernel:0"][0]
run_dict['cls_weights'] = act_detector.act_graph.get_collection('variables')[-2] # this is the kernel
out_dict = act_detector.session.run(run_dict, feed_dict=feed_dict)
probs = out_dict['pred_probs']
if not SHOW_CAMS:
# associate probs with actor ids
print_top_k = 5
prob_dict = {}
for bb in range(no_actors):
act_probs = probs[bb]
order = np.argsort(act_probs)[::-1]
cur_actor_id = active_actors[bb]['actor_id']
print("Person %i" % cur_actor_id)
cur_results = []
for pp in range(print_top_k):
print('\t %s: %.3f' % (act.ACTION_STRINGS[order[pp]], act_probs[order[pp]]))
cur_results.append((act.ACTION_STRINGS[order[pp]], act_probs[order[pp]]))
prob_dict[cur_actor_id] = cur_results
else:
# prob_dict = out_dict
prob_dict = {"cams": visualize_cams(out_dict)} # do it here so it doesnt slow down visualization process
processed_frames_cnt += ACTION_FREQ # each turn we process this many frames
if processed_frames_cnt >= act_detector.timesteps / 2:
# we are doing this so we can skip the initialization period
# first frame needs timesteps / 2 frames to be processed before visualizing
actions_q.put(prob_dict)
#print(prob_dict.keys())
# Visualization
def run_visualization(conn, det_vis_q, actions_q, display):
frame_cnt = 0
# prob_dict = actions_q.get() # skip the first one
durations = []
fps_message = "FPS: 0"
while True:
start_time = time.time()
cur_img, active_actors = det_vis_q.get()
#print(len(active_actors))
if frame_cnt % ACTION_FREQ == 0:
prob_dict = actions_q.get()
if not SHOW_CAMS:
out_img = visualize_detection_results(cur_img, active_actors, prob_dict)
else:
# out_img = visualize_cams(cur_img, prob_dict)
img_to_concat = prob_dict["cams"] #if "cams" in prob_dict else np.zeros((400, 400, 3), np.uint8)
image = cur_img
img_new_height = 400
img_new_width = int(image.shape[1] / float(image.shape[0]) * img_new_height)
img_to_show = cv2.resize(image.copy(), (img_new_width,img_new_height))[:,:,::-1]
out_img = np.array(np.concatenate([img_to_show, img_to_concat], axis=1)[:,:,::-1])
# if display:
# cv2.putText(out_img, fps_message, (25, 25), 0, 1, (255,0,0), 1)
# cv2.imshow('results', out_img[:,:,::-1])
# cv2.waitKey(DELAY//2)
# #cv2.waitKey(1)
# #else:
# writer.append_data(out_img)
cv2.putText(out_img, fps_message, (25, 25), 0, 1, (255,0,0), 1)
ret, jpeg_frame=cv2.imencode('.jpg', out_img[:,:,::-1])
jpeg_frame_str = jpeg_frame.tostring()
data_size = struct.pack("!I", len(jpeg_frame_str))
conn.send(data_size)
conn.sendall(jpeg_frame_str)
frame_cnt += 1
# FPS info
end_time = time.time()
duration = end_time - start_time
durations.append(duration)
if len(durations) > 32: del durations[0]
if frame_cnt % 16 == 0 :
print("avg time per frame: %.3f" % np.mean(durations))
fps_message = "FPS: %i" % int(1 / np.mean(durations))
def recv_n_bytes(conn, n):
""" Convenience method for receiving exactly n bytes from
socket (assuming it's open and connected).
"""
# based on https://docs.python.org/3.4/howto/sockets.html
chunks = []
bytes_recd = 0
while bytes_recd < n:
chunk = conn.recv(n - bytes_recd)
if chunk == b'':
raise RuntimeError("socket connection broken")
chunks.append(chunk)
bytes_recd += len(chunk)
return b''.join(chunks)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--video_path', type=str, required=False, default="", help="The path to the video and if it is not provided, webcam will be used.")
parser.add_argument('-d', '--display', type=str, required=False, default="True",help="The display flag where the results will be visualized using OpenCV.")
parser.add_argument('-b', '--obj_batch_size', type=int, required=False, default=16, help="Batch size for the object detector. Depending on the model used and gpu memory size, this should be changed.")
parser.add_argument('-o', '--obj_gpu', type=str, required=False, default="0", help="Which GPU to use for object detector. Uses CUDA_VISIBLE_DEVICES environment var. Could be the same with action detector but in that case obj batch size should be reduced.")
parser.add_argument('-a', '--act_gpu', type=str, required=False, default="0", help="Which GPU to use for action detector. Uses CUDA_VISIBLE_DEVICES environment var. Could be the same with object detector but in that case obj batch size should be reduced.")
args = parser.parse_args()
use_webcam = args.video_path == ""
display = (args.display == "True" or args.display == "true")
obj_batch_size = args.obj_batch_size
obj_gpu = args.obj_gpu
act_gpu = args.act_gpu
#actor_to_display = 6 # for cams
video_path = args.video_path
basename = os.path.basename(video_path).split('.')[0]
#out_vid_path = "./output_videos/%s_output.mp4" % (basename if not SHOW_CAMS else basename+'_cams_actor_%.2d' % actor_to_display)
out_vid_path = "./output_videos/%s_output.mp4" % basename
out_vid_path = out_vid_path if not use_webcam else './output_videos/webcam_output.mp4'
# video_path = "./tests/chase1Person1View3Point0.mp4"
# out_vid_path = 'output.mp4'
main_folder = './'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(10)
print('Ready for client connection')
conn,addr=s.accept()
print('Client connected')
# if use_webcam:
# print("Using webcam")
# reader = cv2.VideoCapture(0)
# ## We can set the input shape from webcam, I use the default 640x480 to achieve real-time
# #reader.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
# #reader.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
# ret, frame = reader.read()
# if ret:
# H,W,C = frame.shape
# else:
# H = 480
# W = 640
# fps = 1000//DELAY
# else:
# print("Reading video file %s" % video_path)
# reader = imageio.get_reader(video_path, 'ffmpeg')
# fps = reader.get_meta_data()['fps'] #// fps_divider
# W, H = reader.get_meta_data()['size']
# #T = tracker.timesteps
data_size = struct.unpack("!I", recv_n_bytes(conn, 4))[0]
data = recv_n_bytes(conn, data_size)
np_data = np.frombuffer(data, dtype=np.uint8)
frame = cv2.imdecode(np_data, cv2.IMREAD_COLOR)
H,W,C = frame.shape
print("H: %i, W: %i" % (H, W))
#T = 32
# fps_divider = 1
print('Running actions every %i frame' % ACTION_FREQ)
# writer = imageio.get_writer(out_vid_path, fps=fps)
# print("Writing output to %s" % out_vid_path)
shape = [T,H,W,3]
frame_q = Queue(15)
detection_q = Queue()
det_vis_q = Queue()
actions_q = Queue()
frame_reader_p = Process(target=read_frames, args=(conn, frame_q, use_webcam))
#obj_detector_p = Process(target=run_obj_det_and_track, args=(frame_q, detection_q, det_vis_q))
obj_detector_p = Process(target=run_obj_det_and_track_in_batches, args=(frame_q, detection_q, det_vis_q, obj_batch_size, obj_gpu))
action_detector_p = Process(target=run_act_detector, args=(shape, detection_q, actions_q, act_gpu))
visualization_p = Process(target=run_visualization, args=(conn, det_vis_q, actions_q, display))
processes = [frame_reader_p, obj_detector_p, action_detector_p, visualization_p]
for process in processes:
process.daemon = True
process.start()
if use_webcam:
while True:
time.sleep(1)
print("frame_q: %i, obj_q: %i, act_q: %i, vis_q: %i" % (frame_q.qsize(), detection_q.qsize(), actions_q.qsize(), det_vis_q.qsize()))
else:
time.sleep(5)
while True:
time.sleep(1)
print("frame_q: %i, obj_q: %i, act_q: %i, vis_q: %i" % (frame_q.qsize(), detection_q.qsize(), actions_q.qsize(), det_vis_q.qsize()))
if frame_q.qsize() == 0 and detection_q.qsize() == 0 and actions_q.qsize() == 0: # if all the queues are empty, we are done
break
print("Done!")
np.random.seed(10)
COLORS = np.random.randint(0, 100, [1000, 3]) # get darker colors for bboxes and use white text
def visualize_detection_results(img_np, active_actors, prob_dict):
#score_th = 0.30
action_th = 0.20
# copy the original image first
disp_img = np.copy(img_np)
H, W, C = img_np.shape
#for ii in range(len(active_actors)):
for ii in range(len(active_actors)):
cur_actor = active_actors[ii]
actor_id = cur_actor['actor_id']
cur_act_results = prob_dict[actor_id] if actor_id in prob_dict else []
cur_box, cur_score, cur_class = cur_actor['all_boxes'][-1], cur_actor['all_scores'][-1], 1
#if cur_score < score_th:
# continue
top, left, bottom, right = cur_box
left = int(W * left)
right = int(W * right)
top = int(H * top)
bottom = int(H * bottom)
conf = cur_score
#label = bbox['class_str']
# label = 'Class_%i' % cur_class
label = obj.OBJECT_STRINGS[cur_class]['name']
message = '%s_%i: %% %.2f' % (label, actor_id,conf)
action_message_list = ["%s:%.3f" % (actres[0][:20], actres[1]) for actres in cur_act_results if actres[1]>action_th]
# action_message = " ".join(action_message_list)
raw_colors = COLORS[actor_id]
rect_color = tuple(int(raw_color) for raw_color in raw_colors)
text_color = tuple(255-color_value for color_value in rect_color)
cv2.rectangle(disp_img, (left,top), (right,bottom), rect_color, 3)
font_size = max(0.5,(right - left)/50.0/float(len(message)))
cv2.rectangle(disp_img, (left, top-int(font_size*40)), (right,top), rect_color, -1)
cv2.putText(disp_img, message, (left, top-12), 0, font_size, text_color, 1)
#action message writing
cv2.rectangle(disp_img, (left, top), (right,top+10*len(action_message_list)), rect_color, -1)
for aa, action_message in enumerate(action_message_list):
offset = aa*10
cv2.putText(disp_img, action_message, (left, top+5+offset), 0, 0.5, text_color, 1)
return disp_img
#def visualize_cams(image, out_dict):#, actor_idx):
def visualize_cams(out_dict):#, actor_idx):
# img_new_height = 400
# img_new_width = int(image.shape[1] / float(image.shape[0]) * img_new_height)
# img_to_show = cv2.resize(image.copy(), (img_new_width,img_new_height))[:,:,::-1]
##img_to_concat = np.zeros((400, 800, 3), np.uint8)
#img_to_concat = np.zeros((400, 400, 3), np.uint8)
if len(CAM_CLASSES) < 4:
w = 400
else:
w = 900
img_to_concat = np.zeros((400, w, 3), np.uint8)
if out_dict:
actor_idx = 0
action_classes = [cc for cc in range(60) if any([cname in act.ACTION_STRINGS[cc] for cname in CAM_CLASSES])]
feature_activations = out_dict['final_i3d_feats']
cls_weights = out_dict['cls_weights']
input_frames = out_dict['cropped_frames'].astype(np.uint8)
probs = out_dict["pred_probs"]
class_maps = np.matmul(feature_activations, cls_weights)
#min_val = np.min(class_maps[:,:, :, :, :])
#max_val = np.max(class_maps[:,:, :, :, :]) - min_val
min_val = -200.
max_val = 300.
normalized_cmaps = (class_maps-min_val)/max_val * 255.
normalized_cmaps[normalized_cmaps>255] = 255
normalized_cmaps[normalized_cmaps<0] = 0
normalized_cmaps = np.uint8(normalized_cmaps)
#normalized_cmaps = np.uint8((class_maps-min_val)/max_val * 255.)
t_feats = feature_activations.shape[1]
t_input = input_frames.shape[1]
index_diff = (t_input) // (t_feats+1)
for cc in range(len(action_classes)):
cur_cls_idx = action_classes[cc]
act_str = act.ACTION_STRINGS[action_classes[cc]]
message = "%s:%%%.2d" % (act_str[:20], 100*probs[actor_idx, cur_cls_idx])
for tt in range(t_feats):
cur_cam = normalized_cmaps[actor_idx, tt,:,:, cur_cls_idx]
cur_frame = input_frames[actor_idx, (tt+1) * index_diff, :,:,::-1]
resized_cam = cv2.resize(cur_cam, (100,100))
colored_cam = cv2.applyColorMap(resized_cam, cv2.COLORMAP_JET)
overlay = cv2.resize(cur_frame.copy(), (100,100))
overlay = cv2.addWeighted(overlay, 0.5, colored_cam, 0.5, 0)
if cc > 2:
xx = tt + 5 # 4 timesteps visualized per class + 1 empty space
yy = cc - 3 # 3 classes per column
else:
xx = tt
yy = cc
img_to_concat[yy*125:yy*125+100, xx*100:(xx+1)*100, :] = overlay
cv2.putText(img_to_concat, message, (20+int(cc>2)*500, 13+100+125*yy), 0, 0.5, (255,255,255), 1)
return img_to_concat
#final_image = np.concatenate([img_to_show, img_to_concat], axis=1)
#return np.array(final_image[:,:,::-1])
#return final_image
if __name__ == '__main__':
main()
|
bouncing_ball_v2.py
|
import pygame
import threading
from empty_display import EmptyDisplay
import lib.colors as Color
WIDTH = 0
HEIGHT = 1
class Ball(pygame.sprite.Sprite):
def __init__(self,
color,
width,
height,
initial_x_coordinate,
initial_y_coordinate):
super().__init__()
self.color = color
self.width = width
self.height = height
self.image = pygame.Surface([width, height])
self.image.fill(Color.black)
self.image.set_colorkey(Color.black)
self.rect = self.image.get_rect()
pygame.draw.rect(self.image,
color,
[self.rect.x, self.rect.y, width, height])
self.rect.x = initial_x_coordinate
self.rect.y = initial_y_coordinate
self.x_direction_step = 2 # Go to the right, one pixel
self.y_direction_step = 2 # Go to bottom, one pixel
def horizontal_rebound(self):
self.x_direction_step = -self.x_direction_step
def vertical_rebound(self):
self.y_direction_step = -self.y_direction_step
def update(self):
display_width = 800
display_height = 600
self.rect.x += self.x_direction_step
self.rect.y += self.y_direction_step
if (self.rect.x + self.width) > display_width:
self.rect.x = display_width - self.width - 1
self.horizontal_rebound()
elif self.rect.x < 0:
self.rect.x = 0
self.horizontal_rebound()
if (self.rect.y + self.height) > display_height:
self.rect.y = display_height - self.height - 1
self.vertical_rebound()
elif self.rect.y < 0:
self.rect.y = 0
self.vertical_rebound()
class BouncingBall(EmptyDisplay):
def __init__(self,
width = 800,
height = 600,
caption = "A bouncing ball of size 16x16"):
super().__init__(width, height, caption)
self.running = True
self.ball_width = 16
self.ball_height = 16
self.initial_x_coordinate = self.display_size[WIDTH]//2 - self.ball_width//2
self.initial_y_coordinate = 3*self.display_size[HEIGHT]//4 - self.ball_height//2
self.ball_color = Color.white
self.ball = Ball(
color = self.ball_color,
width = self.ball_width,
height = self.ball_height,
initial_x_coordinate = self.initial_x_coordinate,
initial_y_coordinate = self.initial_y_coordinate)
self.all_sprites_list = pygame.sprite.Group()
self.all_sprites_list.add(self.ball)
# Quitar
def update_frame(self):
#display_width = self.display_size[WIDTH]
#display_height = self.display_size[HEIGHT]
self.display.fill(Color.black)
#pygame.draw.rect(
# self.display,
# self.ball_color, (
# self.ball.rect.x,
# self.ball.rect.y,
# self.ball.width,
# self.ball.height))
#self.all_sprites_list.update()
self.all_sprites_list.draw(self.display)
pygame.display.update()
def process_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
def draw_frame(self):
clock = pygame.time.Clock()
while self.running:
self.display.fill(Color.black)
self.all_sprites_list.draw(self.display)
pygame.display.update()
self.process_events()
clock.tick(60)
print(f"UPS={self.UPS:03.2f}")
def run_model(self):
clock = pygame.time.Clock()
while self.running:
self.all_sprites_list.update()
clock.tick(1000)
self.UPS = clock.get_fps()
def run(self):
#self.ball.rect.x = self.initial_x_coordinate
#self.ball.rect.y = self.initial_y_coordinate
#self.x_direction_step = 1 # Go to right, one pixel
#self.y_direction_step = 1 # Go to bottom, one pixel
self.draw_frame__thread = threading.Thread(target = self.draw_frame)
self.draw_frame__thread.start()
#while self.running:
# self.update_frame()
# self.process_events()
self.run_model()
self.draw_frame__thread.join()
if __name__ == "__main__":
display = BouncingBall()
display.run()
|
_optimize.py
|
from concurrent.futures import FIRST_COMPLETED
from concurrent.futures import Future
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import wait
import copy
import datetime
import gc
import itertools
import math
import os
import sys
from threading import Event
from threading import Thread
from typing import Any
from typing import Callable
from typing import cast
from typing import List
from typing import Optional
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import Type
from typing import Union
import warnings
import optuna
from optuna import exceptions
from optuna import logging
from optuna import progress_bar as pbar_module
from optuna import storages
from optuna import trial as trial_module
from optuna.trial import FrozenTrial
from optuna.trial import TrialState
_logger = logging.get_logger(__name__)
def _optimize(
study: "optuna.Study",
func: "optuna.study.study.ObjectiveFuncType",
n_trials: Optional[int] = None,
timeout: Optional[float] = None,
n_jobs: int = 1,
catch: Tuple[Type[Exception], ...] = (),
callbacks: Optional[List[Callable[["optuna.Study", FrozenTrial], None]]] = None,
gc_after_trial: bool = False,
show_progress_bar: bool = False,
) -> None:
if not isinstance(catch, tuple):
raise TypeError(
"The catch argument is of type '{}' but must be a tuple.".format(type(catch).__name__)
)
if not study._optimize_lock.acquire(False):
raise RuntimeError("Nested invocation of `Study.optimize` method isn't allowed.")
if show_progress_bar and n_trials is None and timeout is not None and n_jobs != 1:
warnings.warn("The timeout-based progress bar is not supported with n_jobs != 1.")
show_progress_bar = False
progress_bar = pbar_module._ProgressBar(show_progress_bar, n_trials, timeout)
study._stop_flag = False
try:
if n_jobs == 1:
_optimize_sequential(
study,
func,
n_trials,
timeout,
catch,
callbacks,
gc_after_trial,
reseed_sampler_rng=False,
time_start=None,
progress_bar=progress_bar,
)
else:
if n_jobs == -1:
n_jobs = os.cpu_count() or 1
time_start = datetime.datetime.now()
futures: Set[Future] = set()
with ThreadPoolExecutor(max_workers=n_jobs) as executor:
for n_submitted_trials in itertools.count():
if study._stop_flag:
break
if (
timeout is not None
and (datetime.datetime.now() - time_start).total_seconds() > timeout
):
break
if n_trials is not None and n_submitted_trials >= n_trials:
break
if len(futures) >= n_jobs:
completed, futures = wait(futures, return_when=FIRST_COMPLETED)
# Raise if exception occurred in executing the completed futures.
for f in completed:
f.result()
futures.add(
executor.submit(
_optimize_sequential,
study,
func,
1,
timeout,
catch,
callbacks,
gc_after_trial,
True,
time_start,
progress_bar,
)
)
finally:
study._optimize_lock.release()
progress_bar.close()
def _optimize_sequential(
study: "optuna.Study",
func: "optuna.study.study.ObjectiveFuncType",
n_trials: Optional[int],
timeout: Optional[float],
catch: Tuple[Type[Exception], ...],
callbacks: Optional[List[Callable[["optuna.Study", FrozenTrial], None]]],
gc_after_trial: bool,
reseed_sampler_rng: bool,
time_start: Optional[datetime.datetime],
progress_bar: Optional[pbar_module._ProgressBar],
) -> None:
if reseed_sampler_rng:
study.sampler.reseed_rng()
i_trial = 0
if time_start is None:
time_start = datetime.datetime.now()
while True:
if study._stop_flag:
break
if n_trials is not None:
if i_trial >= n_trials:
break
i_trial += 1
if timeout is not None:
elapsed_seconds = (datetime.datetime.now() - time_start).total_seconds()
if elapsed_seconds >= timeout:
break
try:
trial = _run_trial(study, func, catch)
except Exception:
raise
finally:
# The following line mitigates memory problems that can be occurred in some
# environments (e.g., services that use computing containers such as CircleCI).
# Please refer to the following PR for further details:
# https://github.com/optuna/optuna/pull/325.
if gc_after_trial:
gc.collect()
if callbacks is not None:
frozen_trial = copy.deepcopy(study._storage.get_trial(trial._trial_id))
for callback in callbacks:
callback(study, frozen_trial)
if progress_bar is not None:
progress_bar.update((datetime.datetime.now() - time_start).total_seconds())
study._storage.remove_session()
def _run_trial(
study: "optuna.Study",
func: "optuna.study.study.ObjectiveFuncType",
catch: Tuple[Type[Exception], ...],
) -> trial_module.Trial:
if study._storage.is_heartbeat_enabled():
optuna.storages.fail_stale_trials(study)
trial = study.ask()
state: Optional[TrialState] = None
values: Optional[List[float]] = None
func_err: Optional[Exception] = None
func_err_fail_exc_info: Optional[Any] = None
# Set to a string if `func` returns correctly but the return value violates assumptions.
values_conversion_failure_message: Optional[str] = None
stop_event: Optional[Event] = None
thread: Optional[Thread] = None
if study._storage.is_heartbeat_enabled():
stop_event = Event()
thread = Thread(
target=_record_heartbeat, args=(trial._trial_id, study._storage, stop_event)
)
thread.start()
try:
value_or_values = func(trial)
except exceptions.TrialPruned as e:
# TODO(mamu): Handle multi-objective cases.
state = TrialState.PRUNED
func_err = e
except Exception as e:
state = TrialState.FAIL
func_err = e
func_err_fail_exc_info = sys.exc_info()
else:
# TODO(hvy): Avoid checking the values both here and inside `Study.tell`.
values, values_conversion_failure_message = _check_and_convert_to_values(
len(study.directions), value_or_values, trial.number
)
if values_conversion_failure_message is not None:
state = TrialState.FAIL
else:
state = TrialState.COMPLETE
if study._storage.is_heartbeat_enabled():
assert stop_event is not None
assert thread is not None
stop_event.set()
thread.join()
# `Study.tell` may raise during trial post-processing.
try:
study.tell(trial, values=values, state=state)
except Exception:
raise
finally:
if state == TrialState.COMPLETE:
study._log_completed_trial(trial, cast(List[float], values))
elif state == TrialState.PRUNED:
_logger.info("Trial {} pruned. {}".format(trial.number, str(func_err)))
elif state == TrialState.FAIL:
if func_err is not None:
_logger.warning(
"Trial {} failed because of the following error: {}".format(
trial.number, repr(func_err)
),
exc_info=func_err_fail_exc_info,
)
elif values_conversion_failure_message is not None:
_logger.warning(values_conversion_failure_message)
else:
assert False, "Should not reach."
else:
assert False, "Should not reach."
if state == TrialState.FAIL and func_err is not None and not isinstance(func_err, catch):
raise func_err
return trial
def _check_and_convert_to_values(
n_objectives: int, original_value: Union[float, Sequence[float]], trial_number: int
) -> Tuple[Optional[List[float]], Optional[str]]:
if isinstance(original_value, Sequence):
if n_objectives != len(original_value):
return (
None,
(
f"Trial {trial_number} failed, because the number of the values "
f"{len(original_value)} did not match the number of the objectives "
f"{n_objectives}."
),
)
else:
_original_values = list(original_value)
else:
_original_values = [original_value]
_checked_values = []
for v in _original_values:
checked_v, failure_message = _check_single_value(v, trial_number)
if failure_message is not None:
# TODO(Imamura): Construct error message taking into account all values and do not
# early return
# `value` is assumed to be ignored on failure so we can set it to any value.
return None, failure_message
elif isinstance(checked_v, float):
_checked_values.append(checked_v)
else:
assert False
return _checked_values, None
def _check_single_value(
original_value: float, trial_number: int
) -> Tuple[Optional[float], Optional[str]]:
value = None
failure_message = None
try:
value = float(original_value)
except (
ValueError,
TypeError,
):
failure_message = (
f"Trial {trial_number} failed, because the value {repr(original_value)} could not be "
"cast to float."
)
if value is not None and math.isnan(value):
value = None
failure_message = (
f"Trial {trial_number} failed, because the objective function returned "
f"{original_value}."
)
return value, failure_message
def _record_heartbeat(trial_id: int, storage: storages.BaseStorage, stop_event: Event) -> None:
heartbeat_interval = storage.get_heartbeat_interval()
assert heartbeat_interval is not None
while True:
storage.record_heartbeat(trial_id)
if stop_event.wait(timeout=heartbeat_interval):
return
|
functional.py
|
import argparse
import time
import uuid
from multiprocessing import Process, Queue
from unittest import mock
from django_sql_sniffer import listener
def test_end_2_end():
query_queue = Queue()
# define dummy method which will utilize Django DB cursor in the target process
def dummy_query_executor():
from django.db import connection
cursor = connection.cursor()
while True:
query = query_queue.get()
try:
cursor.execute(query)
except Exception as e:
pass # there are no tables so we just pass random uuid strings for testing purposes
# start the target process
tp = Process(name="target_process", target=dummy_query_executor)
tp.start()
# start the listener thread
with mock.patch('argparse.ArgumentParser.parse_args', return_value=argparse.Namespace(pid=tp.pid, tail=False, verbose=False, sum=False, count=False, number=3)):
parser = argparse.ArgumentParser()
args = parser.parse_args()
lt = listener.DjangoSQLListener(args)
lt.start()
# execute dummy queries
queries = []
for i in range(100):
query = str(uuid.uuid4())
queries.append(query)
query_queue.put(query)
# check listener configured properly
assert lt._target_pid == args.pid
assert lt._verbose == args.verbose
assert lt.analyzer._tail == args.tail
assert lt.analyzer._top == args.number
assert lt.analyzer._by_sum == args.sum
assert lt.analyzer._by_count == args.count
time.sleep(4)
tp.kill()
# check all queries captured
for query in queries:
assert query in lt.analyzer._executed_queries
# check that listener shuts down once it detects the target process is not alive
time.sleep(4)
assert not lt.is_alive()
|
dnstransfer.py
|
import os
import re
import threading
import lib.common
import lib.urlentity
MODULE_NAME = 'dnstransfer'
global dns_transfer_is_vul
def init():
global dns_transfer_is_vul
dns_transfer_is_vul = False
def transfer_try(domain, dns):
global dns_transfer_is_vul
subdomain = os.popen("dig @%s %s axfr" % (dns, domain)).read()
if subdomain.find('Transfer failed') == -1 and subdomain.find('timed out') == -1 and subdomain.find(
'not found') == -1 and subdomain.find('XFR size') > 0:
dns_transfer_is_vul = True
def dns_retrieve(domain):
dig = os.popen("dig ns %s" % domain).read()
dns_list = re.findall(r'NS\t(.*?).\n', dig)
return dns_list
def domain_retrieve(url):
url_obj = lib.urlentity.URLEntity(url)
domains = '.'.join(url_obj.get_hostname().split('.')[1:])
return domains
def run(url):
global dns_transfer_is_vul
domain = domain_retrieve(url=lib.common.SOURCE_URL)
dns_list = dns_retrieve(domain=domain)
tasks = []
for dns in dns_list:
t = threading.Thread(target=transfer_try, args=(domain, dns))
tasks.append(t)
t.setDaemon(True)
t.start()
for _t in tasks:
_t.join()
if dns_transfer_is_vul:
lib.common.RESULT_DICT[MODULE_NAME].append('Potential DNS Transfer Vulnerability Detected!')
lib.common.ALIVE_LINE[MODULE_NAME] += 1
if __name__ == '__main__':
print domain_retrieve('http://www.whu.edu.cn/')
|
test_marathon.py
|
import contextlib
import json
import os
import re
import sys
import threading
import pytest
from six.moves.BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from dcos import constants
from .common import (app, assert_command, assert_lines,
exec_command, list_deployments, popen_tty,
show_app, update_config, watch_all_deployments,
watch_deployment)
_ZERO_INSTANCE_APP_ID = 'zero-instance-app'
_ZERO_INSTANCE_APP_INSTANCES = 100
def test_help():
with open('dcoscli/data/help/marathon.txt') as content:
assert_command(['dcos', 'marathon', '--help'],
stdout=content.read().encode('utf-8'))
def test_version():
assert_command(['dcos', 'marathon', '--version'],
stdout=b'dcos-marathon version SNAPSHOT\n')
def test_info():
assert_command(['dcos', 'marathon', '--info'],
stdout=b'Deploy and manage applications to DC/OS\n')
def test_about():
returncode, stdout, stderr = exec_command(['dcos', 'marathon', 'about'])
assert returncode == 0
assert stderr == b''
result = json.loads(stdout.decode('utf-8'))
assert result['name'] == "marathon"
@pytest.fixture
def env():
r = os.environ.copy()
r.update({
constants.PATH_ENV: os.environ[constants.PATH_ENV],
constants.DCOS_CONFIG_ENV: os.path.join("tests", "data", "dcos.toml"),
})
return r
def test_missing_config(env):
with update_config("core.dcos_url", None, env):
assert_command(
['dcos', 'marathon', 'app', 'list'],
returncode=1,
stderr=(b'Missing required config parameter: "core.dcos_url". '
b'Please run `dcos config set core.dcos_url <value>`.\n'),
env=env)
def test_empty_list():
_list_apps()
def test_add_app():
with _zero_instance_app():
_list_apps('zero-instance-app')
def test_add_app_through_http():
with _zero_instance_app_through_http():
_list_apps('zero-instance-app')
def test_add_app_bad_resource():
stderr = (b'Can\'t read from resource: bad_resource.\n'
b'Please check that it exists.\n')
assert_command(['dcos', 'marathon', 'app', 'add', 'bad_resource'],
returncode=1,
stderr=stderr)
def test_add_app_with_filename():
with _zero_instance_app():
_list_apps('zero-instance-app')
def test_remove_app():
with _zero_instance_app():
pass
_list_apps()
def test_add_bad_json_app():
with open('tests/data/marathon/apps/bad.json') as fd:
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'add'],
stdin=fd)
assert returncode == 1
assert stdout == b''
assert stderr.decode('utf-8').startswith('Error loading JSON: ')
def test_add_existing_app():
with _zero_instance_app():
app_path = 'tests/data/marathon/apps/zero_instance_sleep_v2.json'
with open(app_path) as fd:
stderr = b"Application '/zero-instance-app' already exists\n"
assert_command(['dcos', 'marathon', 'app', 'add'],
returncode=1,
stderr=stderr,
stdin=fd)
def test_show_app():
with _zero_instance_app():
show_app('zero-instance-app')
def test_show_absolute_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
result = show_app('zero-instance-app')
show_app('zero-instance-app', result['version'])
def test_show_relative_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
show_app('zero-instance-app', "-1")
def test_show_missing_relative_app_version():
app_id = _ZERO_INSTANCE_APP_ID
with _zero_instance_app():
_update_app(
app_id,
'tests/data/marathon/apps/update_zero_instance_sleep.json')
# Marathon persists app versions indefinitely by ID, so pick a large
# index here in case the history is long
cmd = ['dcos', 'marathon', 'app', 'show', '--app-version=-200', app_id]
returncode, stdout, stderr = exec_command(cmd)
assert returncode == 1
assert stdout == b''
pattern = ("Application 'zero-instance-app' only has [1-9][0-9]* "
"version\\(s\\)\\.\n")
assert re.fullmatch(pattern, stderr.decode('utf-8'), flags=re.DOTALL)
def test_show_missing_absolute_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'show',
'--app-version=2000-02-11T20:39:32.972Z', 'zero-instance-app'])
assert returncode == 1
assert stdout == b''
assert stderr.decode('utf-8').startswith(
"Error: App '/zero-instance-app' does not exist")
def test_show_bad_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'show', '--app-version=20:39:32.972Z',
'zero-instance-app'])
assert returncode == 1
assert stdout == b''
assert stderr.startswith(b'Error while fetching')
pattern = (b"""{"message":"Invalid format: """
b"""\\"20:39:32.972Z\\" is malformed"""
b""" at \\":39:32.972Z\\""}\n""")
assert stderr.endswith(pattern)
def test_show_bad_relative_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
assert_command(
['dcos', 'marathon', 'app', 'show',
'--app-version=2', 'zero-instance-app'],
returncode=1,
stderr=b"Relative versions must be negative: 2\n")
def test_start_missing_app():
assert_command(
['dcos', 'marathon', 'app', 'start', 'missing-id'],
returncode=1,
stderr=b"Error: App '/missing-id' does not exist\n")
def test_start_app():
with _zero_instance_app():
_start_app('zero-instance-app')
def test_start_already_started_app():
with _zero_instance_app():
_start_app('zero-instance-app')
stdout = (b"Application 'zero-instance-app' already "
b"started: 1 instances.\n")
assert_command(
['dcos', 'marathon', 'app', 'start', 'zero-instance-app'],
returncode=1,
stdout=stdout)
def test_stop_missing_app():
assert_command(['dcos', 'marathon', 'app', 'stop', 'missing-id'],
returncode=1,
stderr=b"Error: App '/missing-id' does not exist\n")
def test_stop_app():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'stop', 'zero-instance-app'])
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def test_stop_already_stopped_app():
with _zero_instance_app():
stdout = (b"Application 'zero-instance-app' already "
b"stopped: 0 instances.\n")
assert_command(
['dcos', 'marathon', 'app', 'stop', 'zero-instance-app'],
returncode=1,
stdout=stdout)
def test_update_missing_app():
assert_command(['dcos', 'marathon', 'app', 'update', 'missing-id'],
stderr=b"Error: App '/missing-id' does not exist\n",
returncode=1)
def test_update_bad_type():
with _zero_instance_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update',
'zero-instance-app', 'cpus="a string"'])
stderr_end = b"""{
"details": [
{
"errors": [
"error.expected.jsnumber"
],
"path": "/cpus"
}
],
"message": "Invalid JSON"
}
"""
assert returncode == 1
assert stderr_end in stderr
assert stdout == b''
def test_update_invalid_request():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update', '{', 'instances'])
assert returncode == 1
assert stdout == b''
stderr = stderr.decode()
# TODO (tamar): this becomes 'Error: App '/{' does not exist\n"'
# in Marathon 0.11.0
assert stderr.startswith('Error on request')
assert stderr.endswith('HTTP 400: Bad Request\n')
def test_app_add_invalid_request():
path = os.path.join(
'tests', 'data', 'marathon', 'apps', 'app_add_400.json')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'add', path])
assert returncode == 1
assert stdout == b''
assert re.match(b"Error on request \[POST .*\]: HTTP 400: Bad Request:",
stderr)
stderr_end = b"""{
"details": [
{
"errors": [
"host is not a valid network type"
],
"path": "/container/docker/network"
}
],
"message": "Invalid JSON"
}
"""
assert stderr.endswith(stderr_end)
def test_update_app():
with _zero_instance_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update', 'zero-instance-app',
'cpus=1', 'mem=20', "cmd='sleep 100'"])
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def test_update_app_from_stdin():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
def test_restarting_stopped_app():
with _zero_instance_app():
stdout = (b"Unable to perform rolling restart of application '"
b"/zero-instance-app' because it has no running tasks\n")
assert_command(
['dcos', 'marathon', 'app', 'restart', 'zero-instance-app'],
returncode=1,
stdout=stdout)
def test_restarting_missing_app():
assert_command(['dcos', 'marathon', 'app', 'restart', 'missing-id'],
returncode=1,
stderr=b"Error: App '/missing-id' does not exist\n")
def test_restarting_app():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'restart', 'zero-instance-app'])
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def test_killing_app():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
task_set_1 = set([task['id']
for task in _list_tasks(3, 'zero-instance-app')])
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'kill', 'zero-instance-app'])
assert returncode == 0
assert stdout.decode().startswith('Killed tasks: ')
assert stderr == b''
watch_all_deployments()
task_set_2 = set([task['id']
for task in _list_tasks(app_id='zero-instance-app')])
assert len(task_set_1.intersection(task_set_2)) == 0
def test_killing_scaling_app():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
_list_tasks(3)
command = ['dcos', 'marathon', 'app', 'kill', '--scale',
'zero-instance-app']
returncode, stdout, stderr = exec_command(command)
assert returncode == 0
assert stdout.decode().startswith('Started deployment: ')
assert stdout.decode().find('version') > -1
assert stdout.decode().find('deploymentId') > -1
assert stderr == b''
watch_all_deployments()
_list_tasks(0)
def test_killing_with_host_app():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
existing_tasks = _list_tasks(3, 'zero-instance-app')
task_hosts = set([task['host'] for task in existing_tasks])
if len(task_hosts) <= 1:
pytest.skip('test needs 2 or more agents to succeed, '
'only {} agents available'.format(len(task_hosts)))
assert len(task_hosts) > 1
kill_host = list(task_hosts)[0]
expected_to_be_killed = set([task['id']
for task in existing_tasks
if task['host'] == kill_host])
not_to_be_killed = set([task['id']
for task in existing_tasks
if task['host'] != kill_host])
assert len(not_to_be_killed) > 0
assert len(expected_to_be_killed) > 0
command = ['dcos', 'marathon', 'app', 'kill', '--host', kill_host,
'zero-instance-app']
returncode, stdout, stderr = exec_command(command)
assert stdout.decode().startswith('Killed tasks: ')
assert stderr == b''
new_tasks = set([task['id'] for task in _list_tasks()])
assert not_to_be_killed.intersection(new_tasks) == not_to_be_killed
assert len(expected_to_be_killed.intersection(new_tasks)) == 0
@pytest.mark.skipif(
True, reason='https://github.com/mesosphere/marathon/issues/3251')
def test_kill_stopped_app():
with _zero_instance_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'kill', 'zero-instance-app'])
assert returncode == 1
assert stdout.decode().startswith('Killed tasks: []')
def test_kill_missing_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'kill', 'app'])
assert returncode == 1
assert stdout.decode() == ''
stderr_expected = "Error: App '/app' does not exist"
assert stderr.decode().strip() == stderr_expected
def test_list_version_missing_app():
assert_command(
['dcos', 'marathon', 'app', 'version', 'list', 'missing-id'],
returncode=1,
stderr=b"Error: App '/missing-id' does not exist\n")
def test_list_version_negative_max_count():
assert_command(['dcos', 'marathon', 'app', 'version', 'list',
'missing-id', '--max-count=-1'],
returncode=1,
stderr=b'Maximum count must be a positive number: -1\n')
def test_list_version_app():
app_id = _ZERO_INSTANCE_APP_ID
with _zero_instance_app():
_list_versions(app_id, 1)
_update_app(
app_id,
'tests/data/marathon/apps/update_zero_instance_sleep.json')
_list_versions(app_id, 2)
def test_list_version_max_count():
app_id = _ZERO_INSTANCE_APP_ID
with _zero_instance_app():
_update_app(
app_id,
'tests/data/marathon/apps/update_zero_instance_sleep.json')
_list_versions(app_id, 1, 1)
_list_versions(app_id, 2, 2)
_list_versions(app_id, 2, 3)
def test_list_empty_deployment():
list_deployments(0)
def test_list_deployment():
with _zero_instance_app():
_start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
list_deployments(1)
def test_list_deployment_table():
"""Simple sanity check for listing deployments with a table output.
The more specific testing is done in unit tests.
"""
with _zero_instance_app():
_start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
assert_lines(['dcos', 'marathon', 'deployment', 'list'], 2)
def test_list_deployment_missing_app():
with _zero_instance_app():
_start_app('zero-instance-app')
list_deployments(0, 'missing-id')
def test_list_deployment_app():
with _zero_instance_app():
_start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
list_deployments(1, 'zero-instance-app')
def test_rollback_missing_deployment():
assert_command(
['dcos', 'marathon', 'deployment', 'rollback', 'missing-deployment'],
returncode=1,
stderr=b'Error: DeploymentPlan missing-deployment does not exist\n')
def test_rollback_deployment():
with _zero_instance_app():
_start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
result = list_deployments(1, 'zero-instance-app')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'deployment', 'rollback', result[0]['id']])
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
assert 'deploymentId' in result
assert 'version' in result
assert stderr == b''
watch_all_deployments()
list_deployments(0)
def test_stop_deployment():
with _zero_instance_app():
_start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
result = list_deployments(1, 'zero-instance-app')
assert_command(
['dcos', 'marathon', 'deployment', 'stop', result[0]['id']])
list_deployments(0)
def test_watching_missing_deployment():
watch_deployment('missing-deployment', 1)
def test_watching_deployment():
with _zero_instance_app():
_start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
result = list_deployments(1, 'zero-instance-app')
watch_deployment(result[0]['id'], 60)
assert_command(
['dcos', 'marathon', 'deployment', 'stop', result[0]['id']])
list_deployments(0, 'zero-instance-app')
def test_list_empty_task():
_list_tasks(0)
def test_list_empty_task_not_running_app():
with _zero_instance_app():
_list_tasks(0)
def test_list_tasks():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
_list_tasks(3)
def test_list_tasks_table():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
assert_lines(['dcos', 'marathon', 'task', 'list'], 4)
def test_list_app_tasks():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
_list_tasks(3, 'zero-instance-app')
def test_list_missing_app_tasks():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
_list_tasks(0, 'missing-id')
def test_show_missing_task():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'task', 'show', 'missing-id'])
stderr = stderr.decode('utf-8')
assert returncode == 1
assert stdout == b''
assert stderr.startswith("Task '")
assert stderr.endswith("' does not exist\n")
def test_show_task():
with _zero_instance_app():
_start_app('zero-instance-app', 3)
watch_all_deployments()
result = _list_tasks(3, 'zero-instance-app')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'task', 'show', result[0]['id']])
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
assert result['appId'] == '/zero-instance-app'
assert stderr == b''
def test_stop_task():
with _zero_instance_app():
_start_app('zero-instance-app', 1)
watch_all_deployments()
task_list = _list_tasks(1, 'zero-instance-app')
task_id = task_list[0]['id']
_stop_task(task_id)
def test_stop_task_wipe():
with _zero_instance_app():
_start_app('zero-instance-app', 1)
watch_all_deployments()
task_list = _list_tasks(1, 'zero-instance-app')
task_id = task_list[0]['id']
_stop_task(task_id, '--wipe')
def test_stop_unknown_task():
with _zero_instance_app():
_start_app('zero-instance-app')
watch_all_deployments()
task_id = 'unknown-task-id'
_stop_task(task_id, expect_success=False)
def test_stop_unknown_task_wipe():
with _zero_instance_app():
_start_app('zero-instance-app')
watch_all_deployments()
task_id = 'unknown-task-id'
_stop_task(task_id, '--wipe', expect_success=False)
def test_bad_configuration(env):
with update_config('marathon.url', 'http://localhost:88888', env):
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'about'], env=env)
assert returncode == 1
assert stdout == b''
assert stderr.startswith(
b"URL [http://localhost:88888/v2/info] is unreachable")
def test_app_locked_error():
with app('tests/data/marathon/apps/sleep_many_instances.json',
'/sleep-many-instances',
wait=False):
stderr = b'Changes blocked: deployment already in progress for app.\n'
assert_command(
['dcos', 'marathon', 'app', 'stop', 'sleep-many-instances'],
returncode=1,
stderr=stderr)
@pytest.mark.skipif(sys.platform == 'win32',
reason="No pseudo terminal on windows")
def test_app_add_no_tty():
proc, master = popen_tty('dcos marathon app add')
stdout, stderr = proc.communicate()
os.close(master)
print(stdout)
print(stderr)
assert proc.wait() == 1
assert stdout == b''
assert stderr == (b"We currently don't support reading from the TTY. "
b"Please specify an application JSON.\n"
b"E.g.: dcos marathon app add < app_resource.json\n")
def _list_apps(app_id=None):
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'list', '--json'])
result = json.loads(stdout.decode('utf-8'))
if app_id is None:
assert len(result) == 0
else:
assert len(result) == 1
assert result[0]['id'] == '/' + app_id
assert returncode == 0
assert stderr == b''
return result
def _start_app(app_id, instances=None):
cmd = ['dcos', 'marathon', 'app', 'start', app_id]
if instances is not None:
cmd.append(str(instances))
returncode, stdout, stderr = exec_command(cmd)
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def _update_app(app_id, file_path):
with open(file_path) as fd:
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update', app_id],
stdin=fd)
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def _list_versions(app_id, expected_min_count, max_count=None):
cmd = ['dcos', 'marathon', 'app', 'version', 'list', app_id]
if max_count is not None:
cmd.append('--max-count={}'.format(max_count))
returncode, stdout, stderr = exec_command(cmd)
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
assert isinstance(result, list)
assert stderr == b''
# Marathon persists app versions indefinitely by ID, so there may be extras
assert len(result) >= expected_min_count
if max_count is not None:
assert len(result) <= max_count
def _list_tasks(expected_count=None, app_id=None):
cmd = ['dcos', 'marathon', 'task', 'list', '--json']
if app_id is not None:
cmd.append(app_id)
returncode, stdout, stderr = exec_command(cmd)
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
if expected_count:
assert len(result) == expected_count
assert stderr == b''
return result
def _stop_task(task_id, wipe=None, expect_success=True):
cmd = ['dcos', 'marathon', 'task', 'stop', task_id]
if wipe is not None:
cmd.append('--wipe')
returncode, stdout, stderr = exec_command(cmd)
if expect_success:
assert returncode == 0
assert stderr == b''
result = json.loads(stdout.decode('utf-8'))
assert result['id'] == task_id
else:
assert returncode == 1
@contextlib.contextmanager
def _zero_instance_app():
with app('tests/data/marathon/apps/zero_instance_sleep.json',
'zero-instance-app'):
yield
@contextlib.contextmanager
def _zero_instance_app_through_http():
class JSONRequestHandler (BaseHTTPRequestHandler):
def do_GET(self): # noqa: N802
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(open(
'tests/data/marathon/apps/zero_instance_sleep.json',
'rb').read())
host = 'localhost'
port = 12345
server = HTTPServer((host, port), JSONRequestHandler)
thread = threading.Thread(target=server.serve_forever)
thread.setDaemon(True)
thread.start()
with app('http://{}:{}'.format(host, port), 'zero-instance-app'):
try:
yield
finally:
server.shutdown()
|
util.py
|
import os
import tzlocal
from datetime import datetime
import logging
import time
from contextlib import contextmanager
import logging
log = logging.getLogger(__name__)
# https://stackoverflow.com/a/47087513/165783
def next_path(path_pattern):
"""
Finds the next free path in an sequentially named list of files
e.g. path_pattern = 'file-%s.txt':
file-1.txt
file-2.txt
file-3.txt
Runs in log(n) time where n is the number of existing files in sequence
"""
i = 1
# First do an exponential search
while os.path.exists(path_pattern % i):
i = i * 2
# Result lies somewhere in the interval (i/2..i]
# We call this interval (a..b] and narrow it down until a + 1 = b
a, b = (i / 2, i)
while a + 1 < b:
c = (a + b) / 2 # interval midpoint
a, b = (c, b) if os.path.exists(path_pattern % c) else (a, c)
return path_pattern % b, b
def local_iso_datetime():
"""
Returns ISO8601 formatted timestamp for the current time and timezone
"""
return tzlocal.get_localzone().localize(datetime.now()).isoformat()
# https://stackoverflow.com/a/31142078/165783
class CounterHandler(logging.Handler):
"""
Logging handler that counts logged messages by level
e.g::
handler = CounterHandler()
logger.addHandler(handler)
... code that uses logger ...
print("Warnings: %s" % handler.count(logging.WARN))
print("Errors: %s" % handler.count(logging.ERROR))
"""
counters = None
def __init__(self, *args, **kwargs):
super(CounterHandler, self).__init__(*args, **kwargs)
self.counters = {}
def emit(self, record):
l = record.levelname
if (l not in self.counters):
self.counters[l] = 0
self.counters[l] += 1
def count(self, level):
return self.counters.get(level, 0)
@contextmanager
def log_time(msg):
t1 = time.time()
yield
t2 = time.time()
log.info('%s: %0.2fs' % (msg, t2 - t1))
# Helpers to inject lists of values into SQL queries
def sql_list_placeholder(name, items):
return ', '.join([':%s%s' % (name, i) for i in range(len(items))])
def sql_list_argument(name, items):
return dict(zip(['%s%s' % (name, i) for i in range(len(items))], items))
import multiprocessing
from threading import Thread
from Queue import Queue, Empty
import math
import platform
def run_parallel(target, tasks, n_workers = None, use_processes = False):
"""
Runs tasks in parallel
`target` is a function
`tasks` is a list of argument tuples passed to the function. If `target` only takes one argument, then it doesn't need to
be wrapped in a tuple.
A generator yielding the result of each task is returned, in the form of a (result, error) tuple which allows errors to be
handled. The generator must be consumed in order to ensure all tasks are processed.
Results are yielded in the order they are completed, which is generally not the same as the order in which they are supplied.
Example::
def do_hard_work(a, b):
...
tasks = [(1,2), (5,2), (3,4) ....]
for result, error in run_parallel(do_hard_work, tasks):
print result
A pool of worker threads (or processes if `use_processes = True`) is used to process the tasks.
Threads may not always be able to achieve parallelism due to Python GIL.
If using processes, be careful not to use shared global resources such as database connection pools in the target function.
The number of workers defaults to the number of cpu cores as reported by `multiprocessing.cpu_count`, but can be set
using the `n_workers` parameter.
"""
if n_workers is None:
n_workers = multiprocessing.cpu_count()
# Multiprocessing has issues on Windows
if platform.system() == 'Windows':
use_processes = False
Q = multiprocessing.Queue if use_processes else Queue
# Setup queues
work_q = Q()
result_q = Q()
# Helper to get next item from queue without constantly blocking
def next(q):
while True:
try:
return q.get(True, 1) # Get with timeout so thread isn't constantly blocked
except Empty:
pass
except:
log.exception("Exception getting item from queue")
raise
# Setup worker threads
def worker(work_q, result_q):
while True:
task = next(work_q)
if task is None:
break
try:
if type(task) != tuple:
task = (task,)
result_q.put((target(*task), None))
except Exception as e:
log.exception("Exception in worker")
result_q.put((None, e))
for i in range(0, n_workers):
if use_processes:
p = multiprocessing.Process(target = worker, args = (work_q, result_q))
p.start()
else:
t = Thread(target = worker, args = (work_q, result_q))
t.daemon = True
t.start()
# Feed in tasks and yield results
i = 0
for task in tasks:
work_q.put(task)
i += 1
# Start getting results once all threads have something to do
if i > n_workers:
yield next(result_q)
i -= 1
# Signal threads to stop
for j in range(0, n_workers):
work_q.put(None)
# Finish collecting results
while i > 0:
yield next(result_q)
i -= 1
|
scripts_regression_tests.py
|
#!/usr/bin/env python
"""
Script containing CIME python regression test suite. This suite should be run
to confirm overall CIME correctness.
"""
import glob, os, re, shutil, signal, sys, tempfile, \
threading, time, logging, unittest, getpass, \
filecmp, time
from xml.etree.ElementTree import ParseError
LIB_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),"..","lib")
sys.path.append(LIB_DIR)
# Remove all pyc files to ensure we're testing the right things
import subprocess, argparse
subprocess.call('/bin/rm -f $(find . -name "*.pyc")', shell=True, cwd=LIB_DIR)
import six
from six import assertRaisesRegex
import stat as osstat
import collections
from CIME.utils import run_cmd, run_cmd_no_fail, get_lids, get_current_commit, safe_copy, CIMEError, get_cime_root
import get_tests
import CIME.test_scheduler, CIME.wait_for_tests
from CIME.test_scheduler import TestScheduler
from CIME.XML.compilers import Compilers
from CIME.XML.env_run import EnvRun
from CIME.XML.machines import Machines
from CIME.XML.files import Files
from CIME.case import Case
from CIME.code_checker import check_code, get_all_checkable_files
from CIME.test_status import *
SCRIPT_DIR = CIME.utils.get_scripts_root()
TOOLS_DIR = os.path.join(SCRIPT_DIR,"Tools")
TEST_COMPILER = None
GLOBAL_TIMEOUT = None
TEST_MPILIB = None
MACHINE = None
FAST_ONLY = False
NO_BATCH = False
NO_CMAKE = False
TEST_ROOT = None
NO_TEARDOWN = False
os.environ["CIME_GLOBAL_WALLTIME"] = "0:05:00"
# pragma pylint: disable=protected-access
###############################################################################
def run_cmd_assert_result(test_obj, cmd, from_dir=None, expected_stat=0, env=None, verbose=False):
###############################################################################
from_dir = os.getcwd() if from_dir is None else from_dir
stat, output, errput = run_cmd(cmd, from_dir=from_dir, env=env, verbose=verbose)
if expected_stat == 0:
expectation = "SHOULD HAVE WORKED, INSTEAD GOT STAT %s" % stat
else:
expectation = "EXPECTED STAT %s, INSTEAD GOT STAT %s" % (expected_stat, stat)
msg = \
"""
COMMAND: %s
FROM_DIR: %s
%s
OUTPUT: %s
ERRPUT: %s
""" % (cmd, from_dir, expectation, output, errput)
test_obj.assertEqual(stat, expected_stat, msg=msg)
return output
###############################################################################
def assert_test_status(test_obj, test_name, test_status_obj, test_phase, expected_stat):
###############################################################################
test_status = test_status_obj.get_status(test_phase)
test_obj.assertEqual(test_status, expected_stat, msg="Problem with {}: for phase '{}': has status '{}', expected '{}'".format(test_name, test_phase, test_status, expected_stat))
###############################################################################
def verify_perms(test_obj, root_dir):
###############################################################################
for root, dirs, files in os.walk(root_dir):
for filename in files:
full_path = os.path.join(root, filename)
st = os.stat(full_path)
test_obj.assertTrue(st.st_mode & osstat.S_IWGRP, msg="file {} is not group writeable".format(full_path))
test_obj.assertTrue(st.st_mode & osstat.S_IRGRP, msg="file {} is not group readable".format(full_path))
test_obj.assertTrue(st.st_mode & osstat.S_IROTH, msg="file {} is not world readable".format(full_path))
for dirname in dirs:
full_path = os.path.join(root, dirname)
st = os.stat(full_path)
test_obj.assertTrue(st.st_mode & osstat.S_IWGRP, msg="dir {} is not group writable".format(full_path))
test_obj.assertTrue(st.st_mode & osstat.S_IRGRP, msg="dir {} is not group readable".format(full_path))
test_obj.assertTrue(st.st_mode & osstat.S_IXGRP, msg="dir {} is not group executable".format(full_path))
test_obj.assertTrue(st.st_mode & osstat.S_IROTH, msg="dir {} is not world readable".format(full_path))
test_obj.assertTrue(st.st_mode & osstat.S_IXOTH, msg="dir {} is not world executable".format(full_path))
###############################################################################
class A_RunUnitTests(unittest.TestCase):
###############################################################################
def test_resolve_variable_name(self):
files = Files()
machinefile = files.get_value("MACHINES_SPEC_FILE")
self.assertTrue(os.path.isfile(machinefile),
msg="Path did not resolve to existing file %s" % machinefile)
def test_unittests(self):
# Finds all files contained in CIME/tests or its subdirectories that
# match the pattern 'test*.py', and runs the unit tests found there
# (i.e., tests defined using python's unittest module).
#
# This is analogous to running:
# python -m unittest discover -s CIME/tests -t .
# from cime/scripts/lib
#
# Yes, that means we have a bunch of unit tests run from this one unit
# test.
testsuite = unittest.defaultTestLoader.discover(
start_dir = os.path.join(LIB_DIR,"CIME","tests"),
pattern = 'test*.py',
top_level_dir = LIB_DIR)
testrunner = unittest.TextTestRunner(buffer=False)
# Disable logging; otherwise log messages written by code under test
# clutter the unit test output
log_lvl = logging.getLogger().getEffectiveLevel()
logging.disable(logging.CRITICAL)
try:
results = testrunner.run(testsuite)
finally:
logging.getLogger().setLevel(log_lvl)
self.assertTrue(results.wasSuccessful())
def test_lib_doctests(self):
# Find and run all the doctests in the lib directory tree
skip_list = ["six.py", "CIME/SystemTests/mvk.py", "CIME/SystemTests/pgn.py"]
for root, _, files in os.walk(LIB_DIR):
for file_ in files:
filepath = os.path.join(root, file_)[len(LIB_DIR)+1:]
if filepath.endswith(".py") and filepath not in skip_list:
with open(os.path.join(root, file_)) as fd:
content = fd.read()
if '>>>' in content:
print("Running doctests for {}".format(filepath))
run_cmd_assert_result(self, 'PYTHONPATH={}:$PYTHONPATH python -m doctest {} 2>&1'.format(LIB_DIR, filepath), from_dir=LIB_DIR)
else:
print("{} has no doctests".format(filepath))
###############################################################################
def make_fake_teststatus(path, testname, status, phase):
###############################################################################
expect(phase in CORE_PHASES, "Bad phase '%s'" % phase)
with TestStatus(test_dir=path, test_name=testname) as ts:
for core_phase in CORE_PHASES:
if core_phase == phase:
ts.set_status(core_phase, status, comments=("time=42" if phase == RUN_PHASE else ""))
break
else:
ts.set_status(core_phase, TEST_PASS_STATUS, comments=("time=42" if phase == RUN_PHASE else ""))
###############################################################################
def parse_test_status(line):
###############################################################################
regex = re.compile(r"Test '(\w+)' finished with status '(\w+)'")
m = regex.match(line)
return m.groups()
###############################################################################
def kill_subprocesses(name=None, sig=signal.SIGKILL, expected_num_killed=None, tester=None):
###############################################################################
# Kill all subprocesses
proc_ids = CIME.utils.find_proc_id(proc_name=name, children_only=True)
if (expected_num_killed is not None):
tester.assertEqual(len(proc_ids), expected_num_killed,
msg="Expected to find %d processes to kill, found %d" % (expected_num_killed, len(proc_ids)))
for proc_id in proc_ids:
try:
os.kill(proc_id, sig)
except OSError:
pass
###############################################################################
def kill_python_subprocesses(sig=signal.SIGKILL, expected_num_killed=None, tester=None):
###############################################################################
kill_subprocesses("[Pp]ython", sig, expected_num_killed, tester)
###########################################################################
def assert_dashboard_has_build(tester, build_name, expected_count=1):
###########################################################################
# Do not test E3SM dashboard if model is CESM
if CIME.utils.get_model() == "e3sm":
time.sleep(10) # Give chance for cdash to update
wget_file = tempfile.mktemp()
run_cmd_no_fail("wget https://my.cdash.org/api/v1/index.php?project=ACME_test --no-check-certificate -O %s" % wget_file)
raw_text = open(wget_file, "r").read()
os.remove(wget_file)
num_found = raw_text.count(build_name)
tester.assertEqual(num_found, expected_count,
msg="Dashboard did not have expected num occurances of build name '%s'. Expected %s, found %s" % (build_name, expected_count, num_found))
###############################################################################
def setup_proxy():
###############################################################################
if ("http_proxy" not in os.environ):
proxy = MACHINE.get_value("PROXY")
if (proxy is not None):
os.environ["http_proxy"] = proxy
return True
return False
###############################################################################
class N_TestUnitTest(unittest.TestCase):
###############################################################################
@classmethod
def setUpClass(cls):
cls._do_teardown = []
cls._testroot = os.path.join(TEST_ROOT, 'TestUnitTests')
cls._testdirs = []
def _has_unit_test_support(self):
if TEST_COMPILER is None:
default_compiler = MACHINE.get_default_compiler()
compiler = Compilers(MACHINE, compiler=default_compiler)
else:
compiler = Compilers(MACHINE, compiler=TEST_COMPILER)
attrs = {'MPILIB': 'mpi-serial', 'compile_threaded': 'FALSE'}
pfunit_path = compiler.get_optional_compiler_node("PFUNIT_PATH",
attributes=attrs)
if pfunit_path is None:
return False
else:
return True
def test_a_unit_test(self):
cls = self.__class__
if not self._has_unit_test_support():
self.skipTest("Skipping TestUnitTest - PFUNIT_PATH not found for the default compiler on this machine")
test_dir = os.path.join(cls._testroot,"unit_tester_test")
cls._testdirs.append(test_dir)
os.makedirs(test_dir)
unit_test_tool = os.path.abspath(os.path.join(get_cime_root(),"scripts","fortran_unit_testing","run_tests.py"))
test_spec_dir = os.path.join(os.path.dirname(unit_test_tool),"Examples", "interpolate_1d", "tests")
args = "--build-dir {} --test-spec-dir {}".format(test_dir, test_spec_dir)
args += " --machine {}".format(MACHINE.get_machine_name())
run_cmd_no_fail("{} {}".format(unit_test_tool, args))
cls._do_teardown.append(test_dir)
def test_b_cime_f90_unit_tests(self):
cls = self.__class__
if (FAST_ONLY):
self.skipTest("Skipping slow test")
if not self._has_unit_test_support():
self.skipTest("Skipping TestUnitTest - PFUNIT_PATH not found for the default compiler on this machine")
test_dir = os.path.join(cls._testroot,"driver_f90_tests")
cls._testdirs.append(test_dir)
os.makedirs(test_dir)
test_spec_dir = get_cime_root()
unit_test_tool = os.path.abspath(os.path.join(test_spec_dir,"scripts","fortran_unit_testing","run_tests.py"))
args = "--build-dir {} --test-spec-dir {}".format(test_dir, test_spec_dir)
args += " --machine {}".format(MACHINE.get_machine_name())
run_cmd_no_fail("{} {}".format(unit_test_tool, args))
cls._do_teardown.append(test_dir)
@classmethod
def tearDownClass(cls):
do_teardown = len(cls._do_teardown) > 0 and sys.exc_info() == (None, None, None) and not NO_TEARDOWN
teardown_root = True
for tfile in cls._testdirs:
if tfile not in cls._do_teardown:
print("Detected failed test or user request no teardown")
print("Leaving case directory : %s"%tfile)
teardown_root = False
elif do_teardown:
shutil.rmtree(tfile)
if teardown_root and do_teardown:
shutil.rmtree(cls._testroot)
###############################################################################
class J_TestCreateNewcase(unittest.TestCase):
###############################################################################
@classmethod
def setUpClass(cls):
cls._testdirs = []
cls._do_teardown = []
cls._testroot = os.path.join(TEST_ROOT, 'TestCreateNewcase')
def test_a_createnewcase(self):
cls = self.__class__
testdir = os.path.join(cls._testroot, 'testcreatenewcase')
if os.path.exists(testdir):
shutil.rmtree(testdir)
args = " --case %s --compset X --output-root %s --handle-preexisting-dirs=r --debug " % (testdir, cls._testroot)
if TEST_COMPILER is not None:
args = args + " --compiler %s"%TEST_COMPILER
if TEST_MPILIB is not None:
args = args + " --mpilib %s"%TEST_MPILIB
if CIME.utils.get_cime_default_driver() == "nuopc":
args += " --res f19_g17 "
else:
args += " --res f19_g16 "
cls._testdirs.append(testdir)
run_cmd_assert_result(self, "./create_newcase %s"%(args), from_dir=SCRIPT_DIR)
self.assertTrue(os.path.exists(testdir))
self.assertTrue(os.path.exists(os.path.join(testdir, "case.setup")))
run_cmd_assert_result(self, "./case.setup", from_dir=testdir)
run_cmd_assert_result(self, "./case.build", from_dir=testdir)
with Case(testdir, read_only=False) as case:
ntasks = case.get_value("NTASKS_ATM")
case.set_value("NTASKS_ATM", ntasks+1)
# this should fail with a locked file issue
run_cmd_assert_result(self, "./case.build",
from_dir=testdir, expected_stat=1)
run_cmd_assert_result(self, "./case.setup --reset", from_dir=testdir)
run_cmd_assert_result(self, "./case.build", from_dir=testdir)
with Case(testdir, read_only=False) as case:
case.set_value("CHARGE_ACCOUNT", "fred")
# this should not fail with a locked file issue
run_cmd_assert_result(self, "./case.build", from_dir=testdir)
run_cmd_assert_result(self, "./case.st_archive --test-all", from_dir=testdir)
# Trying to set values outside of context manager should fail
case = Case(testdir, read_only=False)
with self.assertRaises(CIMEError):
case.set_value("NTASKS_ATM", 42)
# Trying to read_xml with pending changes should fail
with self.assertRaises(CIMEError):
with Case(testdir, read_only=False) as case:
case.set_value("CHARGE_ACCOUNT", "fouc")
case.read_xml()
cls._do_teardown.append(testdir)
def test_aa_no_flush_on_instantiate(self):
testdir = os.path.join(self.__class__._testroot, 'testcreatenewcase')
with Case(testdir, read_only=False) as case:
for env_file in case._files:
self.assertFalse(env_file.needsrewrite, msg="Instantiating a case should not trigger a flush call")
with Case(testdir, read_only=False) as case:
case.set_value("HIST_OPTION","nyears")
runfile = case.get_env('run')
self.assertTrue(runfile.needsrewrite, msg="Expected flush call not triggered")
for env_file in case._files:
if env_file != runfile:
self.assertFalse(env_file.needsrewrite, msg="Unexpected flush triggered for file {}"
.format(env_file.filename))
# Flush the file
runfile.write()
# set it again to the same value
case.set_value("HIST_OPTION","nyears")
# now the file should not need to be flushed
for env_file in case._files:
self.assertFalse(env_file.needsrewrite, msg="Unexpected flush triggered for file {}"
.format(env_file.filename))
# Check once more with a new instance
with Case(testdir, read_only=False) as case:
case.set_value("HIST_OPTION","nyears")
for env_file in case._files:
self.assertFalse(env_file.needsrewrite, msg="Unexpected flush triggered for file {}"
.format(env_file.filename))
def test_b_user_mods(self):
cls = self.__class__
testdir = os.path.join(cls._testroot, 'testusermods')
if os.path.exists(testdir):
shutil.rmtree(testdir)
cls._testdirs.append(testdir)
user_mods_dir = os.path.join(CIME.utils.get_python_libs_root(), "..", "tests", "user_mods_test1")
args = " --case %s --compset X --user-mods-dir %s --output-root %s --handle-preexisting-dirs=r"% (testdir, user_mods_dir, cls._testroot)
if TEST_COMPILER is not None:
args = args + " --compiler %s"%TEST_COMPILER
if TEST_MPILIB is not None:
args = args + " --mpilib %s"%TEST_MPILIB
if CIME.utils.get_cime_default_driver() == "nuopc":
args += " --res f19_g17 "
else:
args += " --res f19_g16 "
run_cmd_assert_result(self, "%s/create_newcase %s "
% (SCRIPT_DIR, args),from_dir=SCRIPT_DIR)
self.assertTrue(os.path.isfile(os.path.join(testdir,"SourceMods","src.drv","somefile.F90")), msg="User_mods SourceMod missing")
with open(os.path.join(testdir,"user_nl_cpl"),"r") as fd:
contents = fd.read()
self.assertTrue("a different cpl test option" in contents, msg="User_mods contents of user_nl_cpl missing")
self.assertTrue("a cpl namelist option" in contents, msg="User_mods contents of user_nl_cpl missing")
cls._do_teardown.append(testdir)
def test_c_create_clone_keepexe(self):
cls = self.__class__
testdir = os.path.join(cls._testroot, 'test_create_clone_keepexe')
if os.path.exists(testdir):
shutil.rmtree(testdir)
prevtestdir = cls._testdirs[0]
user_mods_dir = os.path.join(CIME.utils.get_python_libs_root(), "..", "tests", "user_mods_test3")
cmd = "%s/create_clone --clone %s --case %s --keepexe --user-mods-dir %s" \
% (SCRIPT_DIR, prevtestdir, testdir, user_mods_dir)
run_cmd_assert_result(self, cmd, from_dir=SCRIPT_DIR, expected_stat=1)
def test_d_create_clone_new_user(self):
cls = self.__class__
testdir = os.path.join(cls._testroot, 'test_create_clone_new_user')
if os.path.exists(testdir):
shutil.rmtree(testdir)
prevtestdir = cls._testdirs[0]
cls._testdirs.append(testdir)
# change the USER and CIME_OUTPUT_ROOT to nonsense values
# this is intended as a test of whether create_clone is independent of user
run_cmd_assert_result(self, "./xmlchange USER=this_is_not_a_user",
from_dir=prevtestdir)
fakeoutputroot = cls._testroot.replace(os.environ.get("USER"), "this_is_not_a_user")
run_cmd_assert_result(self, "./xmlchange CIME_OUTPUT_ROOT=%s"%fakeoutputroot,
from_dir=prevtestdir)
# this test should pass (user name is replaced)
run_cmd_assert_result(self, "%s/create_clone --clone %s --case %s " %
(SCRIPT_DIR, prevtestdir, testdir),from_dir=SCRIPT_DIR)
shutil.rmtree(testdir)
# this test should pass
run_cmd_assert_result(self, "%s/create_clone --clone %s --case %s --cime-output-root %s" %
(SCRIPT_DIR, prevtestdir, testdir, cls._testroot),from_dir=SCRIPT_DIR)
cls._do_teardown.append(testdir)
def test_dd_create_clone_not_writable(self):
cls = self.__class__
testdir = os.path.join(cls._testroot, 'test_create_clone_not_writable')
if os.path.exists(testdir):
shutil.rmtree(testdir)
prevtestdir = cls._testdirs[0]
cls._testdirs.append(testdir)
with Case(prevtestdir, read_only=False) as case1:
case2 = case1.create_clone(testdir)
with self.assertRaises(CIMEError):
case2.set_value("CHARGE_ACCOUNT", "fouc")
def test_e_xmlquery(self):
# Set script and script path
xmlquery = "./xmlquery"
cls = self.__class__
casedir = cls._testdirs[0]
# Check for environment
self.assertTrue(os.path.isdir(SCRIPT_DIR))
self.assertTrue(os.path.isdir(TOOLS_DIR))
self.assertTrue(os.path.isfile(os.path.join(casedir,xmlquery)))
# Test command line options
with Case(casedir, read_only=True) as case:
STOP_N = case.get_value("STOP_N")
COMP_CLASSES = case.get_values("COMP_CLASSES")
BUILD_COMPLETE = case.get_value("BUILD_COMPLETE")
cmd = xmlquery + " STOP_N --value"
output = run_cmd_no_fail(cmd, from_dir=casedir)
self.assertTrue(output == str(STOP_N), msg="%s != %s"%(output, STOP_N))
cmd = xmlquery + " BUILD_COMPLETE --value"
output = run_cmd_no_fail(cmd, from_dir=casedir)
self.assertTrue(output == "TRUE", msg="%s != %s"%(output, BUILD_COMPLETE))
# we expect DOCN_MODE to be undefined in this X compset
# this test assures that we do not try to resolve this as a compvar
cmd = xmlquery + " DOCN_MODE --value"
_, output, error = run_cmd(cmd, from_dir=casedir)
self.assertTrue(error == "ERROR: No results found for variable DOCN_MODE",
msg="unexpected result for DOCN_MODE, output {}, error {}".
format(output, error))
for comp in COMP_CLASSES:
caseresult = case.get_value("NTASKS_%s"%comp)
cmd = xmlquery + " NTASKS_%s --value"%comp
output = run_cmd_no_fail(cmd, from_dir=casedir)
self.assertTrue(output == str(caseresult), msg="%s != %s"%(output, caseresult))
cmd = xmlquery + " NTASKS --subgroup %s --value"%comp
output = run_cmd_no_fail(cmd, from_dir=casedir)
self.assertTrue(output == str(caseresult), msg="%s != %s"%(output, caseresult))
if MACHINE.has_batch_system():
JOB_QUEUE = case.get_value("JOB_QUEUE", subgroup="case.run")
cmd = xmlquery + " JOB_QUEUE --subgroup case.run --value"
output = run_cmd_no_fail(cmd, from_dir=casedir)
self.assertTrue(output == JOB_QUEUE, msg="%s != %s"%(output, JOB_QUEUE))
cmd = xmlquery + " --listall"
run_cmd_no_fail(cmd, from_dir=casedir)
cls._do_teardown.append(cls._testroot)
def test_f_createnewcase_with_user_compset(self):
cls = self.__class__
testdir = os.path.join(cls._testroot, 'testcreatenewcase_with_user_compset')
if os.path.exists(testdir):
shutil.rmtree(testdir)
cls._testdirs.append(testdir)
pesfile = os.path.join("..","src","drivers","mct","cime_config","config_pes.xml")
args = "--case %s --compset 2000_SATM_XLND_SICE_SOCN_XROF_XGLC_SWAV --pesfile %s --res f19_g16 --output-root %s --handle-preexisting-dirs=r" % (testdir, pesfile, cls._testroot)
if CIME.utils.get_model() == "cesm":
args += " --run-unsupported"
if TEST_COMPILER is not None:
args += " --compiler %s"%TEST_COMPILER
if TEST_MPILIB is not None:
args = args + " --mpilib %s"%TEST_MPILIB
run_cmd_assert_result(self, "%s/create_newcase %s"%(SCRIPT_DIR, args), from_dir=SCRIPT_DIR)
run_cmd_assert_result(self, "./case.setup", from_dir=testdir)
run_cmd_assert_result(self, "./case.build", from_dir=testdir)
cls._do_teardown.append(testdir)
def test_g_createnewcase_with_user_compset_and_env_mach_pes(self):
cls = self.__class__
testdir = os.path.join(cls._testroot, 'testcreatenewcase_with_user_compset_and_env_mach_pes')
if os.path.exists(testdir):
shutil.rmtree(testdir)
previous_testdir = cls._testdirs[-1]
cls._testdirs.append(testdir)
pesfile = os.path.join(previous_testdir,"env_mach_pes.xml")
args = "--case %s --compset 2000_SATM_XLND_SICE_SOCN_XROF_XGLC_SWAV --pesfile %s --res f19_g16 --output-root %s --handle-preexisting-dirs=r" % (testdir, pesfile, cls._testroot)
if CIME.utils.get_model() == "cesm":
args += " --run-unsupported"
if TEST_COMPILER is not None:
args += " --compiler %s"%TEST_COMPILER
if TEST_MPILIB is not None:
args += " --mpilib %s"%TEST_MPILIB
run_cmd_assert_result(self, "%s/create_newcase %s"%(SCRIPT_DIR, args), from_dir=SCRIPT_DIR)
run_cmd_assert_result(self, "diff env_mach_pes.xml %s"%(previous_testdir), from_dir=testdir)
# this line should cause the diff to fail (I assume no machine is going to default to 17 tasks)
run_cmd_assert_result(self, "./xmlchange NTASKS=17", from_dir=testdir)
run_cmd_assert_result(self, "diff env_mach_pes.xml %s"%(previous_testdir), from_dir=testdir,
expected_stat=1)
cls._do_teardown.append(testdir)
def test_h_primary_component(self):
cls = self.__class__
testdir = os.path.join(cls._testroot, 'testprimarycomponent')
if os.path.exists(testdir):
shutil.rmtree(testdir)
cls._testdirs.append(testdir)
args = " --case CreateNewcaseTest --script-root %s --compset X --output-root %s --handle-preexisting-dirs u" % (testdir, cls._testroot)
if TEST_COMPILER is not None:
args += " --compiler %s"%TEST_COMPILER
if TEST_MPILIB is not None:
args += " --mpilib %s"%TEST_MPILIB
if CIME.utils.get_cime_default_driver() == "nuopc":
args += " --res f19_g17 "
else:
args += " --res f19_g16 "
run_cmd_assert_result(self, "%s/create_newcase %s" % (SCRIPT_DIR, args), from_dir=SCRIPT_DIR)
self.assertTrue(os.path.exists(testdir))
self.assertTrue(os.path.exists(os.path.join(testdir, "case.setup")))
with Case(testdir, read_only=False) as case:
case._compsetname = case.get_value("COMPSET")
case.set_comp_classes(case.get_values("COMP_CLASSES"))
primary = case._find_primary_component()
self.assertEqual(primary, "drv", msg="primary component test expected drv but got %s"%primary)
# now we are going to corrupt the case so that we can do more primary_component testing
case.set_valid_values("COMP_GLC","%s,fred"%case.get_value("COMP_GLC"))
case.set_value("COMP_GLC","fred")
primary = case._find_primary_component()
self.assertEqual(primary, "fred", msg="primary component test expected fred but got %s"%primary)
case.set_valid_values("COMP_ICE","%s,wilma"%case.get_value("COMP_ICE"))
case.set_value("COMP_ICE","wilma")
primary = case._find_primary_component()
self.assertEqual(primary, "wilma", msg="primary component test expected wilma but got %s"%primary)
case.set_valid_values("COMP_OCN","%s,bambam,docn"%case.get_value("COMP_OCN"))
case.set_value("COMP_OCN","bambam")
primary = case._find_primary_component()
self.assertEqual(primary, "bambam", msg="primary component test expected bambam but got %s"%primary)
case.set_valid_values("COMP_LND","%s,barney"%case.get_value("COMP_LND"))
case.set_value("COMP_LND","barney")
primary = case._find_primary_component()
# This is a "J" compset
self.assertEqual(primary, "allactive", msg="primary component test expected allactive but got %s"%primary)
case.set_value("COMP_OCN","docn")
case.set_valid_values("COMP_LND","%s,barney"%case.get_value("COMP_LND"))
case.set_value("COMP_LND","barney")
primary = case._find_primary_component()
self.assertEqual(primary, "barney", msg="primary component test expected barney but got %s"%primary)
case.set_valid_values("COMP_ATM","%s,wilma"%case.get_value("COMP_ATM"))
case.set_value("COMP_ATM","wilma")
primary = case._find_primary_component()
self.assertEqual(primary, "wilma", msg="primary component test expected wilma but got %s"%primary)
# this is a "E" compset
case._compsetname = case._compsetname.replace("XOCN","DOCN%SOM")
primary = case._find_primary_component()
self.assertEqual(primary, "allactive", msg="primary component test expected allactive but got %s"%primary)
# finally a "B" compset
case.set_value("COMP_OCN","bambam")
primary = case._find_primary_component()
self.assertEqual(primary, "allactive", msg="primary component test expected allactive but got %s"%primary)
cls._do_teardown.append(testdir)
def test_j_createnewcase_user_compset_vs_alias(self):
"""
Create a compset using the alias and another compset using the full compset name
and make sure they are the same by comparing the namelist files in CaseDocs.
Ignore the modelio files and clean the directory names out first.
"""
cls = self.__class__
testdir1 = os.path.join(cls._testroot, 'testcreatenewcase_user_compset')
if os.path.exists(testdir1):
shutil.rmtree(testdir1)
cls._testdirs.append(testdir1)
args = ' --case CreateNewcaseTest --script-root {} --compset 2000_DATM%NYF_SLND_SICE_DOCN%SOMAQP_SROF_SGLC_SWAV --res f19_g16 --output-root {} --handle-preexisting-dirs u' .format(testdir1, cls._testroot)
if CIME.utils.get_model() == "cesm":
args += " --run-unsupported"
if TEST_COMPILER is not None:
args += " --compiler %s"%TEST_COMPILER
if TEST_MPILIB is not None:
args += " --mpilib %s"%TEST_MPILIB
run_cmd_assert_result(self, "{}/create_newcase {}" .format (SCRIPT_DIR, args), from_dir=SCRIPT_DIR)
run_cmd_assert_result(self, "./case.setup ", from_dir=testdir1)
run_cmd_assert_result(self, "./preview_namelists ", from_dir=testdir1)
dir1 = os.path.join(testdir1,"CaseDocs")
dir2 = os.path.join(testdir1,"CleanCaseDocs")
os.mkdir(dir2)
for _file in os.listdir(dir1):
if "modelio" in _file:
continue
with open(os.path.join(dir1,_file),"r") as fi:
file_text = fi.read()
file_text = file_text.replace(os.path.basename(testdir1),"PATH")
with open(os.path.join(dir2,_file), "w") as fo:
fo.write(file_text)
cleancasedocs1 = dir2
testdir2 = os.path.join(cls._testroot, 'testcreatenewcase_alias_compset')
if os.path.exists(testdir2):
shutil.rmtree(testdir2)
cls._testdirs.append(testdir2)
args = ' --case CreateNewcaseTest --script-root {} --compset ADSOMAQP --res f19_g16 --output-root {} --handle-preexisting-dirs u'.format(testdir2, cls._testroot)
if CIME.utils.get_model() == "cesm":
args += " --run-unsupported"
if TEST_COMPILER is not None:
args += " --compiler %s"%TEST_COMPILER
if TEST_MPILIB is not None:
args += " --mpilib %s"%TEST_MPILIB
run_cmd_assert_result(self, "{}/create_newcase {}".format(SCRIPT_DIR, args), from_dir=SCRIPT_DIR)
run_cmd_assert_result(self, "./case.setup ", from_dir=testdir2)
run_cmd_assert_result(self, "./preview_namelists ", from_dir=testdir2)
dir1 = os.path.join(testdir2,"CaseDocs")
dir2 = os.path.join(testdir2,"CleanCaseDocs")
os.mkdir(dir2)
for _file in os.listdir(dir1):
if "modelio" in _file:
continue
with open(os.path.join(dir1,_file),"r") as fi:
file_text = fi.read()
file_text = file_text.replace(os.path.basename(testdir2),"PATH")
with open(os.path.join(dir2,_file), "w") as fo:
fo.write(file_text)
cleancasedocs2 = dir2
dcmp = filecmp.dircmp(cleancasedocs1, cleancasedocs2)
self.assertTrue(len(dcmp.diff_files) == 0, "CaseDocs differ {}".format(dcmp.diff_files))
cls._do_teardown.append(testdir1)
cls._do_teardown.append(testdir2)
def test_k_append_config(self):
machlist_before = MACHINE.list_available_machines()
self.assertEqual(len(machlist_before)>1, True, msg="Problem reading machine list")
newmachfile = os.path.join(get_cime_root(),"config",
"xml_schemas","config_machines_template.xml")
MACHINE.read(newmachfile)
machlist_after = MACHINE.list_available_machines()
self.assertEqual(len(machlist_after)-len(machlist_before), 1, msg="Not able to append config_machines.xml {} {}".format(len(machlist_after), len(machlist_before)))
self.assertEqual("mymachine" in machlist_after, True, msg="Not able to append config_machines.xml")
def test_m_createnewcase_alternate_drivers(self):
# Test that case.setup runs for nuopc and moab drivers
cls = self.__class__
model = CIME.utils.get_model()
for driver in ("nuopc", "moab"):
if not os.path.exists(os.path.join(get_cime_root(),"src","drivers",driver)):
self.skipTest("Skipping driver test for {}, driver not found".format(driver))
if ((model == 'cesm' and driver == 'moab') or
(model == 'e3sm' and driver == 'nuopc')):
continue
testdir = os.path.join(cls._testroot, 'testcreatenewcase.{}'.format( driver))
if os.path.exists(testdir):
shutil.rmtree(testdir)
args = " --driver {} --case {} --compset X --res f19_g16 --output-root {} --handle-preexisting-dirs=r".format(driver, testdir, cls._testroot)
if model == "cesm":
args += " --run-unsupported"
if TEST_COMPILER is not None:
args = args + " --compiler %s"%TEST_COMPILER
if TEST_MPILIB is not None:
args = args + " --mpilib %s"%TEST_MPILIB
cls._testdirs.append(testdir)
run_cmd_assert_result(self, "./create_newcase %s"%(args), from_dir=SCRIPT_DIR)
self.assertTrue(os.path.exists(testdir))
self.assertTrue(os.path.exists(os.path.join(testdir, "case.setup")))
run_cmd_assert_result(self, "./case.setup", from_dir=testdir)
with Case(testdir, read_only=False) as case:
comp_interface = case.get_value("COMP_INTERFACE")
self.assertTrue(driver == comp_interface, msg="%s != %s"%(driver, comp_interface))
cls._do_teardown.append(testdir)
@classmethod
def tearDownClass(cls):
do_teardown = len(cls._do_teardown) > 0 and sys.exc_info() == (None, None, None) and not NO_TEARDOWN
for tfile in cls._testdirs:
if tfile not in cls._do_teardown:
print("Detected failed test or user request no teardown")
print("Leaving case directory : %s"%tfile)
elif do_teardown:
try:
print ("Attempt to remove directory {}".format(tfile))
shutil.rmtree(tfile)
except BaseException:
print("Could not remove directory {}".format(tfile))
###############################################################################
class M_TestWaitForTests(unittest.TestCase):
###############################################################################
###########################################################################
def setUp(self):
###########################################################################
self._testroot = os.path.join(TEST_ROOT,"TestWaitForTests")
self._timestamp = CIME.utils.get_timestamp()
# basic tests
self._testdir_all_pass = os.path.join(self._testroot, 'scripts_regression_tests.testdir_all_pass')
self._testdir_with_fail = os.path.join(self._testroot, 'scripts_regression_tests.testdir_with_fail')
self._testdir_unfinished = os.path.join(self._testroot, 'scripts_regression_tests.testdir_unfinished')
self._testdir_unfinished2 = os.path.join(self._testroot, 'scripts_regression_tests.testdir_unfinished2')
# live tests
self._testdir_teststatus1 = os.path.join(self._testroot, 'scripts_regression_tests.testdir_teststatus1')
self._testdir_teststatus2 = os.path.join(self._testroot, 'scripts_regression_tests.testdir_teststatus2')
self._testdirs = [self._testdir_all_pass, self._testdir_with_fail, self._testdir_unfinished, self._testdir_unfinished2,
self._testdir_teststatus1, self._testdir_teststatus2]
basic_tests = self._testdirs[:self._testdirs.index(self._testdir_teststatus1)]
for testdir in self._testdirs:
if os.path.exists(testdir):
shutil.rmtree(testdir)
os.makedirs(testdir)
for r in range(10):
for testdir in basic_tests:
os.makedirs(os.path.join(testdir, str(r)))
make_fake_teststatus(os.path.join(testdir, str(r)), "Test_%d" % r, TEST_PASS_STATUS, RUN_PHASE)
make_fake_teststatus(os.path.join(self._testdir_with_fail, "5"), "Test_5", TEST_FAIL_STATUS, RUN_PHASE)
make_fake_teststatus(os.path.join(self._testdir_unfinished, "5"), "Test_5", TEST_PEND_STATUS, RUN_PHASE)
make_fake_teststatus(os.path.join(self._testdir_unfinished2, "5"), "Test_5", TEST_PASS_STATUS, SUBMIT_PHASE)
integration_tests = self._testdirs[len(basic_tests):]
for integration_test in integration_tests:
os.makedirs(os.path.join(integration_test, "0"))
make_fake_teststatus(os.path.join(integration_test, "0"), "Test_0", TEST_PASS_STATUS, CORE_PHASES[0])
# Set up proxy if possible
self._unset_proxy = setup_proxy()
self._thread_error = None
###########################################################################
def tearDown(self):
###########################################################################
do_teardown = sys.exc_info() == (None, None, None) and not NO_TEARDOWN
if do_teardown:
for testdir in self._testdirs:
shutil.rmtree(testdir)
kill_subprocesses()
if (self._unset_proxy):
del os.environ["http_proxy"]
###########################################################################
def simple_test(self, testdir, expected_results, extra_args="", build_name=None):
###########################################################################
# Need these flags to test dashboard if e3sm
if CIME.utils.get_model() == "e3sm" and build_name is not None:
extra_args += " -b %s" % build_name
expected_stat = 0 if expected_results == ["PASS"]*len(expected_results) else CIME.utils.TESTS_FAILED_ERR_CODE
output = run_cmd_assert_result(self, "%s/wait_for_tests -p ACME_test */TestStatus %s" % (TOOLS_DIR, extra_args),
from_dir=testdir, expected_stat=expected_stat)
lines = [line for line in output.splitlines() if line.startswith("Test '")]
self.assertEqual(len(lines), len(expected_results))
for idx, line in enumerate(lines):
testname, status = parse_test_status(line)
self.assertEqual(status, expected_results[idx])
self.assertEqual(testname, "Test_%d" % idx)
###########################################################################
def threaded_test(self, testdir, expected_results, extra_args="", build_name=None):
###########################################################################
try:
self.simple_test(testdir, expected_results, extra_args, build_name)
except AssertionError as e:
self._thread_error = str(e)
###########################################################################
def test_wait_for_test_all_pass(self):
###########################################################################
self.simple_test(self._testdir_all_pass, ["PASS"] * 10)
###########################################################################
def test_wait_for_test_with_fail(self):
###########################################################################
expected_results = ["FAIL" if item == 5 else "PASS" for item in range(10)]
self.simple_test(self._testdir_with_fail, expected_results)
###########################################################################
def test_wait_for_test_no_wait(self):
###########################################################################
expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)]
self.simple_test(self._testdir_unfinished, expected_results, "-n")
###########################################################################
def test_wait_for_test_timeout(self):
###########################################################################
expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)]
self.simple_test(self._testdir_unfinished, expected_results, "--timeout=3")
###########################################################################
def test_wait_for_test_wait_for_pend(self):
###########################################################################
run_thread = threading.Thread(target=self.threaded_test, args=(self._testdir_unfinished, ["PASS"] * 10))
run_thread.daemon = True
run_thread.start()
time.sleep(5) # Kinda hacky
self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited")
with TestStatus(test_dir=os.path.join(self._testdir_unfinished, "5")) as ts:
ts.set_status(RUN_PHASE, TEST_PASS_STATUS)
run_thread.join(timeout=10)
self.assertFalse(run_thread.isAlive(), msg="wait_for_tests should have finished")
self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error)
###########################################################################
def test_wait_for_test_wait_for_missing_run_phase(self):
###########################################################################
run_thread = threading.Thread(target=self.threaded_test, args=(self._testdir_unfinished2, ["PASS"] * 10))
run_thread.daemon = True
run_thread.start()
time.sleep(5) # Kinda hacky
self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited")
with TestStatus(test_dir=os.path.join(self._testdir_unfinished2, "5")) as ts:
ts.set_status(RUN_PHASE, TEST_PASS_STATUS)
run_thread.join(timeout=10)
self.assertFalse(run_thread.isAlive(), msg="wait_for_tests should have finished")
self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error)
###########################################################################
def test_wait_for_test_wait_kill(self):
###########################################################################
expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)]
run_thread = threading.Thread(target=self.threaded_test, args=(self._testdir_unfinished, expected_results))
run_thread.daemon = True
run_thread.start()
time.sleep(5)
self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited")
kill_python_subprocesses(signal.SIGTERM, expected_num_killed=1, tester=self)
run_thread.join(timeout=10)
self.assertFalse(run_thread.isAlive(), msg="wait_for_tests should have finished")
self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error)
###########################################################################
def test_wait_for_test_cdash_pass(self):
###########################################################################
expected_results = ["PASS"] * 10
build_name = "regression_test_pass_" + self._timestamp
run_thread = threading.Thread(target=self.threaded_test,
args=(self._testdir_all_pass, expected_results, "", build_name))
run_thread.daemon = True
run_thread.start()
run_thread.join(timeout=10)
self.assertFalse(run_thread.isAlive(), msg="wait_for_tests should have finished")
self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error)
assert_dashboard_has_build(self, build_name)
###########################################################################
def test_wait_for_test_cdash_kill(self):
###########################################################################
expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)]
build_name = "regression_test_kill_" + self._timestamp
run_thread = threading.Thread(target=self.threaded_test,
args=(self._testdir_unfinished, expected_results, "", build_name))
run_thread.daemon = True
run_thread.start()
time.sleep(5)
self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited")
kill_python_subprocesses(signal.SIGTERM, expected_num_killed=1, tester=self)
run_thread.join(timeout=10)
self.assertFalse(run_thread.isAlive(), msg="wait_for_tests should have finished")
self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error)
assert_dashboard_has_build(self, build_name)
if CIME.utils.get_model() == "e3sm":
cdash_result_dir = os.path.join(self._testdir_unfinished, "Testing")
tag_file = os.path.join(cdash_result_dir, "TAG")
self.assertTrue(os.path.isdir(cdash_result_dir))
self.assertTrue(os.path.isfile(tag_file))
tag = open(tag_file, "r").readlines()[0].strip()
xml_file = os.path.join(cdash_result_dir, tag, "Test.xml")
self.assertTrue(os.path.isfile(xml_file))
xml_contents = open(xml_file, "r").read()
self.assertTrue(r'<TestList><Test>Test_0</Test><Test>Test_1</Test><Test>Test_2</Test><Test>Test_3</Test><Test>Test_4</Test><Test>Test_5</Test><Test>Test_6</Test><Test>Test_7</Test><Test>Test_8</Test><Test>Test_9</Test></TestList>'
in xml_contents)
self.assertTrue(r'<Test Status="notrun"><Name>Test_5</Name>' in xml_contents)
# TODO: Any further checking of xml output worth doing?
###########################################################################
def live_test_impl(self, testdir, expected_results, last_phase, last_status):
###########################################################################
run_thread = threading.Thread(target=self.threaded_test, args=(testdir, expected_results))
run_thread.daemon = True
run_thread.start()
time.sleep(5)
self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited")
for core_phase in CORE_PHASES[1:]:
with TestStatus(test_dir=os.path.join(self._testdir_teststatus1, "0")) as ts:
ts.set_status(core_phase, last_status if core_phase == last_phase else TEST_PASS_STATUS)
time.sleep(5)
if core_phase != last_phase:
self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited after passing phase {}".format(core_phase))
else:
run_thread.join(timeout=10)
self.assertFalse(run_thread.isAlive(), msg="wait_for_tests should have finished after phase {}".format(core_phase))
break
self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error)
###########################################################################
def test_wait_for_test_test_status_integration_pass(self):
###########################################################################
self.live_test_impl(self._testdir_teststatus1, ["PASS"], RUN_PHASE, TEST_PASS_STATUS)
###########################################################################
def test_wait_for_test_test_status_integration_submit_fail(self):
###########################################################################
self.live_test_impl(self._testdir_teststatus1, ["FAIL"], SUBMIT_PHASE, TEST_FAIL_STATUS)
###############################################################################
class TestCreateTestCommon(unittest.TestCase):
###############################################################################
###########################################################################
def setUp(self):
###########################################################################
self._thread_error = None
self._unset_proxy = setup_proxy()
self._machine = MACHINE.get_machine_name()
self._compiler = MACHINE.get_default_compiler() if TEST_COMPILER is None else TEST_COMPILER
self._baseline_name = "fake_testing_only_%s" % CIME.utils.get_timestamp()
self._baseline_area = os.path.join(TEST_ROOT, "baselines")
self._testroot = TEST_ROOT
self._hasbatch = MACHINE.has_batch_system() and not NO_BATCH
self._do_teardown = not NO_TEARDOWN
###########################################################################
def tearDown(self):
###########################################################################
kill_subprocesses()
if (self._unset_proxy):
del os.environ["http_proxy"]
files_to_clean = []
baselines = os.path.join(self._baseline_area, self._baseline_name)
if (os.path.isdir(baselines)):
files_to_clean.append(baselines)
for test_id in ["master", self._baseline_name]:
for leftover in glob.glob(os.path.join(self._testroot, "*%s*" % test_id)):
files_to_clean.append(leftover)
do_teardown = self._do_teardown and sys.exc_info() == (None, None, None)
if (not do_teardown):
print("Detected failed test or user request no teardown")
print("Leaving files:")
for file_to_clean in files_to_clean:
print(" " + file_to_clean)
else:
# For batch machines need to avoid race condition as batch system
# finishes I/O for the case.
if self._hasbatch:
time.sleep(5)
for file_to_clean in files_to_clean:
if (os.path.isdir(file_to_clean)):
shutil.rmtree(file_to_clean)
else:
os.remove(file_to_clean)
###########################################################################
def _create_test(self, extra_args, test_id=None, pre_run_errors=False, run_errors=False, env_changes=""):
###########################################################################
test_id = CIME.utils.get_timestamp() if test_id is None else test_id
extra_args.append("-t {}".format(test_id))
extra_args.append("--baseline-root {}".format(self._baseline_area))
if NO_BATCH:
extra_args.append("--no-batch")
if TEST_COMPILER and ([extra_arg for extra_arg in extra_args if "--compiler" in extra_arg] == []):
extra_args.append("--compiler={}".format(TEST_COMPILER))
if TEST_MPILIB and ([extra_arg for extra_arg in extra_args if "--mpilib" in extra_arg] == []):
extra_args.append("--mpilib={}".format(TEST_MPILIB))
extra_args.append("--test-root={0} --output-root={0}".format(TEST_ROOT))
full_run = (set(extra_args) & set(["-n", "--namelist-only", "--no-setup", "--no-build"])) == set()
if self._hasbatch:
expected_stat = 0 if not pre_run_errors else CIME.utils.TESTS_FAILED_ERR_CODE
else:
expected_stat = 0 if not pre_run_errors and not run_errors else CIME.utils.TESTS_FAILED_ERR_CODE
run_cmd_assert_result(self, "{} {}/create_test {}".format(env_changes, SCRIPT_DIR, " ".join(extra_args)),
expected_stat=expected_stat)
if full_run:
self._wait_for_tests(test_id, expect_works=(not pre_run_errors and not run_errors))
###########################################################################
def _wait_for_tests(self, test_id, expect_works=True):
###########################################################################
if self._hasbatch:
timeout_arg = "--timeout={}".format(GLOBAL_TIMEOUT) if GLOBAL_TIMEOUT is not None else ""
expected_stat = 0 if expect_works else CIME.utils.TESTS_FAILED_ERR_CODE
run_cmd_assert_result(self, "{}/wait_for_tests {} *{}/TestStatus".format(TOOLS_DIR, timeout_arg, test_id),
from_dir=self._testroot, expected_stat=expected_stat)
###############################################################################
class O_TestTestScheduler(TestCreateTestCommon):
###############################################################################
###########################################################################
def test_a_phases(self):
###########################################################################
# exclude the MEMLEAK tests here.
tests = get_tests.get_full_test_names(["cime_test_only",
"^TESTMEMLEAKFAIL_P1.f09_g16.X",
"^TESTMEMLEAKPASS_P1.f09_g16.X",
"^TESTRUNSTARCFAIL_P1.f19_g16_rx1.A",
"^TESTTESTDIFF_P1.f19_g16_rx1.A",
"^TESTBUILDFAILEXC_P1.f19_g16_rx1.A",
"^TESTRUNFAILEXC_P1.f19_g16_rx1.A"],
self._machine, self._compiler)
self.assertEqual(len(tests), 3)
ct = TestScheduler(tests, test_root=TEST_ROOT, output_root=TEST_ROOT,
compiler=self._compiler, mpilib=TEST_MPILIB)
build_fail_test = [item for item in tests if "TESTBUILDFAIL" in item][0]
run_fail_test = [item for item in tests if "TESTRUNFAIL" in item][0]
pass_test = [item for item in tests if "TESTRUNPASS" in item][0]
self.assertTrue("BUILDFAIL" in build_fail_test, msg="Wrong test '%s'" % build_fail_test)
self.assertTrue("RUNFAIL" in run_fail_test, msg="Wrong test '%s'" % run_fail_test)
self.assertTrue("RUNPASS" in pass_test, msg="Wrong test '%s'" % pass_test)
for idx, phase in enumerate(ct._phases):
for test in ct._tests:
if (phase == CIME.test_scheduler.TEST_START):
continue
elif (phase == MODEL_BUILD_PHASE):
ct._update_test_status(test, phase, TEST_PEND_STATUS)
if (test == build_fail_test):
ct._update_test_status(test, phase, TEST_FAIL_STATUS)
self.assertTrue(ct._is_broken(test))
self.assertFalse(ct._work_remains(test))
else:
ct._update_test_status(test, phase, TEST_PASS_STATUS)
self.assertFalse(ct._is_broken(test))
self.assertTrue(ct._work_remains(test))
elif (phase == RUN_PHASE):
if (test == build_fail_test):
with self.assertRaises(CIMEError):
ct._update_test_status(test, phase, TEST_PEND_STATUS)
else:
ct._update_test_status(test, phase, TEST_PEND_STATUS)
self.assertFalse(ct._work_remains(test))
if (test == run_fail_test):
ct._update_test_status(test, phase, TEST_FAIL_STATUS)
self.assertTrue(ct._is_broken(test))
else:
ct._update_test_status(test, phase, TEST_PASS_STATUS)
self.assertFalse(ct._is_broken(test))
self.assertFalse(ct._work_remains(test))
else:
with self.assertRaises(CIMEError):
ct._update_test_status(test, ct._phases[idx+1], TEST_PEND_STATUS)
with self.assertRaises(CIMEError):
ct._update_test_status(test, phase, TEST_PASS_STATUS)
ct._update_test_status(test, phase, TEST_PEND_STATUS)
self.assertFalse(ct._is_broken(test))
self.assertTrue(ct._work_remains(test))
with self.assertRaises(CIMEError):
ct._update_test_status(test, phase, TEST_PEND_STATUS)
ct._update_test_status(test, phase, TEST_PASS_STATUS)
with self.assertRaises(CIMEError):
ct._update_test_status(test, phase, TEST_FAIL_STATUS)
self.assertFalse(ct._is_broken(test))
self.assertTrue(ct._work_remains(test))
###########################################################################
def test_b_full(self):
###########################################################################
tests = get_tests.get_full_test_names(["cime_test_only"], self._machine, self._compiler)
test_id="%s-%s" % (self._baseline_name, CIME.utils.get_timestamp())
ct = TestScheduler(tests, test_id=test_id, no_batch=NO_BATCH, test_root=TEST_ROOT,
output_root=TEST_ROOT,compiler=self._compiler, mpilib=TEST_MPILIB)
build_fail_test = [item for item in tests if "TESTBUILDFAIL_" in item][0]
build_fail_exc_test = [item for item in tests if "TESTBUILDFAILEXC" in item][0]
run_fail_test = [item for item in tests if "TESTRUNFAIL_" in item][0]
run_fail_exc_test = [item for item in tests if "TESTRUNFAILEXC" in item][0]
pass_test = [item for item in tests if "TESTRUNPASS" in item][0]
test_diff_test = [item for item in tests if "TESTTESTDIFF" in item][0]
mem_fail_test = [item for item in tests if "TESTMEMLEAKFAIL" in item][0]
mem_pass_test = [item for item in tests if "TESTMEMLEAKPASS" in item][0]
st_arch_fail_test = [item for item in tests if "TESTRUNSTARCFAIL" in item][0]
log_lvl = logging.getLogger().getEffectiveLevel()
logging.disable(logging.CRITICAL)
try:
ct.run_tests()
finally:
logging.getLogger().setLevel(log_lvl)
self._wait_for_tests(test_id, expect_works=False)
test_statuses = glob.glob("%s/*%s/TestStatus" % (self._testroot, test_id))
self.assertEqual(len(tests), len(test_statuses))
for test_status in test_statuses:
ts = TestStatus(test_dir=os.path.dirname(test_status))
test_name = ts.get_name()
log_files = glob.glob("%s/%s*%s/TestStatus.log" % (self._testroot, test_name, test_id))
self.assertEqual(len(log_files), 1, "Expected exactly one TestStatus.log file, found %d" % len(log_files))
log_file = log_files[0]
if (test_name == build_fail_test):
assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_FAIL_STATUS)
data = open(log_file, "r").read()
self.assertTrue("Intentional fail for testing infrastructure" in data,
"Broken test did not report build error:\n%s" % data)
elif (test_name == build_fail_exc_test):
data = open(log_file, "r").read()
assert_test_status(self, test_name, ts, SHAREDLIB_BUILD_PHASE, TEST_FAIL_STATUS)
self.assertTrue("Exception from init" in data,
"Broken test did not report build error:\n%s" % data)
elif (test_name == run_fail_test):
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_FAIL_STATUS)
elif (test_name == run_fail_exc_test):
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_FAIL_STATUS)
data = open(log_file, "r").read()
self.assertTrue("Exception from run_phase" in data,
"Broken test did not report run error:\n%s" % data)
elif (test_name == mem_fail_test):
assert_test_status(self, test_name, ts, MEMLEAK_PHASE, TEST_FAIL_STATUS)
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS)
elif (test_name == test_diff_test):
assert_test_status(self, test_name, ts, "COMPARE_base_rest", TEST_FAIL_STATUS)
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS)
elif test_name == st_arch_fail_test:
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS)
assert_test_status(self, test_name, ts, STARCHIVE_PHASE, TEST_FAIL_STATUS)
else:
self.assertTrue(test_name in [pass_test, mem_pass_test])
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS)
if (test_name == mem_pass_test):
assert_test_status(self, test_name, ts, MEMLEAK_PHASE, TEST_PASS_STATUS)
###########################################################################
def test_c_use_existing(self):
###########################################################################
tests = get_tests.get_full_test_names(["TESTBUILDFAIL_P1.f19_g16_rx1.A", "TESTRUNFAIL_P1.f19_g16_rx1.A", "TESTRUNPASS_P1.f19_g16_rx1.A"],
self._machine, self._compiler)
test_id="%s-%s" % (self._baseline_name, CIME.utils.get_timestamp())
ct = TestScheduler(tests, test_id=test_id, no_batch=NO_BATCH, test_root=TEST_ROOT,
output_root=TEST_ROOT,compiler=self._compiler, mpilib=TEST_MPILIB)
build_fail_test = [item for item in tests if "TESTBUILDFAIL" in item][0]
run_fail_test = [item for item in tests if "TESTRUNFAIL" in item][0]
pass_test = [item for item in tests if "TESTRUNPASS" in item][0]
log_lvl = logging.getLogger().getEffectiveLevel()
logging.disable(logging.CRITICAL)
try:
ct.run_tests()
finally:
logging.getLogger().setLevel(log_lvl)
test_statuses = glob.glob("%s/*%s/TestStatus" % (self._testroot, test_id))
self.assertEqual(len(tests), len(test_statuses))
self._wait_for_tests(test_id, expect_works=False)
for test_status in test_statuses:
casedir = os.path.dirname(test_status)
ts = TestStatus(test_dir=casedir)
test_name = ts.get_name()
if test_name == build_fail_test:
assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_FAIL_STATUS)
with TestStatus(test_dir=casedir) as ts:
ts.set_status(MODEL_BUILD_PHASE, TEST_PEND_STATUS)
elif test_name == run_fail_test:
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_FAIL_STATUS)
with TestStatus(test_dir=casedir) as ts:
ts.set_status(SUBMIT_PHASE, TEST_PEND_STATUS)
else:
self.assertTrue(test_name == pass_test)
assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_PASS_STATUS)
assert_test_status(self, test_name, ts, SUBMIT_PHASE, TEST_PASS_STATUS)
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS)
os.environ["TESTBUILDFAIL_PASS"] = "True"
os.environ["TESTRUNFAIL_PASS"] = "True"
ct2 = TestScheduler(tests, test_id=test_id, no_batch=NO_BATCH, use_existing=True,
test_root=TEST_ROOT,output_root=TEST_ROOT,compiler=self._compiler,
mpilib=TEST_MPILIB)
log_lvl = logging.getLogger().getEffectiveLevel()
logging.disable(logging.CRITICAL)
try:
ct2.run_tests()
finally:
logging.getLogger().setLevel(log_lvl)
self._wait_for_tests(test_id)
for test_status in test_statuses:
ts = TestStatus(test_dir=os.path.dirname(test_status))
test_name = ts.get_name()
assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_PASS_STATUS)
assert_test_status(self, test_name, ts, SUBMIT_PHASE, TEST_PASS_STATUS)
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS)
del os.environ["TESTBUILDFAIL_PASS"]
del os.environ["TESTRUNFAIL_PASS"]
# test that passed tests are not re-run
ct2 = TestScheduler(tests, test_id=test_id, no_batch=NO_BATCH, use_existing=True,
test_root=TEST_ROOT,output_root=TEST_ROOT,compiler=self._compiler,
mpilib=TEST_MPILIB)
log_lvl = logging.getLogger().getEffectiveLevel()
logging.disable(logging.CRITICAL)
try:
ct2.run_tests()
finally:
logging.getLogger().setLevel(log_lvl)
self._wait_for_tests(test_id)
for test_status in test_statuses:
ts = TestStatus(test_dir=os.path.dirname(test_status))
test_name = ts.get_name()
assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_PASS_STATUS)
assert_test_status(self, test_name, ts, SUBMIT_PHASE, TEST_PASS_STATUS)
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS)
###########################################################################
def test_d_retry(self):
###########################################################################
args = ["TESTBUILDFAIL_P1.f19_g16_rx1.A", "TESTRUNFAIL_P1.f19_g16_rx1.A", "TESTRUNPASS_P1.f19_g16_rx1.A", "--retry=1"]
self._create_test(args)
###############################################################################
class P_TestJenkinsGenericJob(TestCreateTestCommon):
###############################################################################
###########################################################################
def setUp(self):
###########################################################################
if CIME.utils.get_model() != "e3sm":
self.skipTest("Skipping Jenkins tests. E3SM feature")
TestCreateTestCommon.setUp(self)
# Need to run in a subdir in order to not have CTest clash. Name it
# such that it should be cleaned up by the parent tearDown
self._testdir = os.path.join(self._testroot, "jenkins_test_%s" % self._baseline_name)
os.makedirs(self._testdir)
# Change root to avoid clashing with other jenkins_generic_jobs
self._jenkins_root = os.path.join(self._testdir, "J")
###########################################################################
def tearDown(self):
###########################################################################
TestCreateTestCommon.tearDown(self)
if "TESTRUNDIFF_ALTERNATE" in os.environ:
del os.environ["TESTRUNDIFF_ALTERNATE"]
###########################################################################
def simple_test(self, expect_works, extra_args, build_name=None):
###########################################################################
if NO_BATCH:
extra_args += " --no-batch"
# Need these flags to test dashboard if e3sm
if CIME.utils.get_model() == "e3sm" and build_name is not None:
extra_args += " -p ACME_test --submit-to-cdash --cdash-build-group=Nightly -c %s" % build_name
run_cmd_assert_result(self, "%s/jenkins_generic_job -r %s %s -B %s" % (TOOLS_DIR, self._testdir, extra_args, self._baseline_area),
from_dir=self._testdir, expected_stat=(0 if expect_works else CIME.utils.TESTS_FAILED_ERR_CODE))
###########################################################################
def threaded_test(self, expect_works, extra_args, build_name=None):
###########################################################################
try:
self.simple_test(expect_works, extra_args, build_name)
except AssertionError as e:
self._thread_error = str(e)
###########################################################################
def assert_num_leftovers(self, suite):
###########################################################################
num_tests_in_tiny = len(get_tests.get_test_suite(suite))
jenkins_dirs = glob.glob("%s/*%s*/" % (self._jenkins_root, self._baseline_name.capitalize())) # case dirs
# scratch_dirs = glob.glob("%s/*%s*/" % (self._testroot, test_id)) # blr/run dirs
self.assertEqual(num_tests_in_tiny, len(jenkins_dirs),
msg="Wrong number of leftover directories in %s, expected %d, see %s" % \
(self._jenkins_root, num_tests_in_tiny, jenkins_dirs))
# JGF: Can't test this at the moment due to root change flag given to jenkins_generic_job
# self.assertEqual(num_tests_in_tiny + 1, len(scratch_dirs),
# msg="Wrong number of leftover directories in %s, expected %d, see %s" % \
# (self._testroot, num_tests_in_tiny, scratch_dirs))
###########################################################################
def test_jenkins_generic_job(self):
###########################################################################
# Generate fresh baselines so that this test is not impacted by
# unresolved diffs
self.simple_test(True, "-t cime_test_only_pass -g -b %s" % self._baseline_name)
self.assert_num_leftovers("cime_test_only_pass")
build_name = "jenkins_generic_job_pass_%s" % CIME.utils.get_timestamp()
self.simple_test(True, "-t cime_test_only_pass -b %s" % self._baseline_name, build_name=build_name)
self.assert_num_leftovers("cime_test_only_pass") # jenkins_generic_job should have automatically cleaned up leftovers from prior run
assert_dashboard_has_build(self, build_name)
###########################################################################
def test_jenkins_generic_job_kill(self):
###########################################################################
build_name = "jenkins_generic_job_kill_%s" % CIME.utils.get_timestamp()
run_thread = threading.Thread(target=self.threaded_test, args=(False, " -t cime_test_only_slow_pass -b master --baseline-compare=no", build_name))
run_thread.daemon = True
run_thread.start()
time.sleep(120)
kill_subprocesses(sig=signal.SIGTERM)
run_thread.join(timeout=30)
self.assertFalse(run_thread.isAlive(), msg="jenkins_generic_job should have finished")
self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error)
assert_dashboard_has_build(self, build_name)
###########################################################################
def test_jenkins_generic_job_realistic_dash(self):
###########################################################################
# The actual quality of the cdash results for this test can only
# be inspected manually
# Generate fresh baselines so that this test is not impacted by
# unresolved diffs
self.simple_test(False, "-t cime_test_all -g -b %s" % self._baseline_name)
self.assert_num_leftovers("cime_test_all")
# Should create a diff
os.environ["TESTRUNDIFF_ALTERNATE"] = "True"
# Should create a nml diff
# Modify namelist
fake_nl = """
&fake_nml
fake_item = 'fake'
fake = .true.
/"""
baseline_glob = glob.glob(os.path.join(self._baseline_area, self._baseline_name, "TESTRUNPASS*"))
self.assertEqual(len(baseline_glob), 1, msg="Expected one match, got:\n%s" % "\n".join(baseline_glob))
for baseline_dir in baseline_glob:
nl_path = os.path.join(baseline_dir, "CaseDocs", "datm_in")
self.assertTrue(os.path.isfile(nl_path), msg="Missing file %s" % nl_path)
os.chmod(nl_path, osstat.S_IRUSR | osstat.S_IWUSR)
with open(nl_path, "a") as nl_file:
nl_file.write(fake_nl)
build_name = "jenkins_generic_job_mixed_%s" % CIME.utils.get_timestamp()
self.simple_test(False, "-t cime_test_all -b %s" % self._baseline_name, build_name=build_name)
self.assert_num_leftovers("cime_test_all") # jenkins_generic_job should have automatically cleaned up leftovers from prior run
assert_dashboard_has_build(self, build_name)
###############################################################################
class M_TestCimePerformance(TestCreateTestCommon):
###############################################################################
###########################################################################
def test_cime_case_ctrl_performance(self):
###########################################################################
ts = time.time()
num_repeat = 5
for _ in range(num_repeat):
self._create_test(["cime_tiny","--no-build"])
elapsed = time.time() - ts
print("Perf test result: {:0.2f}".format(elapsed))
###############################################################################
class T_TestRunRestart(TestCreateTestCommon):
###############################################################################
###########################################################################
def test_run_restart(self):
###########################################################################
driver = CIME.utils.get_cime_default_driver()
if driver == "mct":
walltime="00:15:00"
else:
walltime="00:30:00"
self._create_test(["--walltime "+walltime,"NODEFAIL_P1.f09_g16.X"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"{}.{}".format(CIME.utils.get_full_test_name("NODEFAIL_P1.f09_g16.X", machine=self._machine, compiler=self._compiler), self._baseline_name))
rundir = run_cmd_no_fail("./xmlquery RUNDIR --value", from_dir=casedir)
fail_sentinel = os.path.join(rundir, "FAIL_SENTINEL")
self.assertTrue(os.path.exists(fail_sentinel), msg="Missing %s" % fail_sentinel)
self.assertEqual(open(fail_sentinel, "r").read().count("FAIL"), 3)
###########################################################################
def test_run_restart_too_many_fails(self):
###########################################################################
driver = CIME.utils.get_cime_default_driver()
if driver == "mct":
walltime="00:15:00"
else:
walltime="00:30:00"
self._create_test(["--walltime "+walltime,"NODEFAIL_P1.f09_g16.X"], test_id=self._baseline_name, env_changes="NODEFAIL_NUM_FAILS=5", run_errors=True)
casedir = os.path.join(self._testroot,
"{}.{}".format(CIME.utils.get_full_test_name("NODEFAIL_P1.f09_g16.X", machine=self._machine, compiler=self._compiler), self._baseline_name))
rundir = run_cmd_no_fail("./xmlquery RUNDIR --value", from_dir=casedir)
fail_sentinel = os.path.join(rundir, "FAIL_SENTINEL")
self.assertTrue(os.path.exists(fail_sentinel), msg="Missing %s" % fail_sentinel)
self.assertEqual(open(fail_sentinel, "r").read().count("FAIL"), 4)
###############################################################################
class Q_TestBlessTestResults(TestCreateTestCommon):
###############################################################################
###########################################################################
def setUp(self):
###########################################################################
TestCreateTestCommon.setUp(self)
# Set a restrictive umask so we can test that SharedAreas used for
# recording baselines are working
restrictive_mask = 0o027
self._orig_umask = os.umask(restrictive_mask)
###########################################################################
def tearDown(self):
###########################################################################
TestCreateTestCommon.tearDown(self)
if "TESTRUNDIFF_ALTERNATE" in os.environ:
del os.environ["TESTRUNDIFF_ALTERNATE"]
os.umask(self._orig_umask)
###############################################################################
def test_bless_test_results(self):
###############################################################################
# Generate some baselines
test_name = "TESTRUNDIFF_P1.f19_g16_rx1.A"
if CIME.utils.get_model() == "e3sm":
genargs = ["-g", "-o", "-b", self._baseline_name, test_name]
compargs = ["-c", "-b", self._baseline_name, test_name]
else:
genargs = ["-g", self._baseline_name, "-o", test_name,
"--baseline-root ", self._baseline_area]
compargs = ["-c", self._baseline_name, test_name,
"--baseline-root ", self._baseline_area]
self._create_test(genargs)
# Hist compare should pass
self._create_test(compargs)
# Change behavior
os.environ["TESTRUNDIFF_ALTERNATE"] = "True"
# Hist compare should now fail
test_id = "%s-%s" % (self._baseline_name, CIME.utils.get_timestamp())
self._create_test(compargs, test_id=test_id, run_errors=True)
# compare_test_results should detect the fail
cpr_cmd = "{}/compare_test_results --test-root {} -t {} 2>&1" \
.format(TOOLS_DIR, TEST_ROOT, test_id)
output = run_cmd_assert_result(self, cpr_cmd, expected_stat=CIME.utils.TESTS_FAILED_ERR_CODE)
# use regex
expected_pattern = re.compile(r'FAIL %s[^\s]* BASELINE' % test_name)
the_match = expected_pattern.search(output)
self.assertNotEqual(the_match, None,
msg="Cmd '%s' failed to display failed test in output:\n%s" % (cpr_cmd, output))
# Bless
run_cmd_no_fail("{}/bless_test_results --test-root {} --hist-only --force -t {}"
.format(TOOLS_DIR, TEST_ROOT, test_id))
# Hist compare should now pass again
self._create_test(compargs)
verify_perms(self, self._baseline_area)
###############################################################################
def test_rebless_namelist(self):
###############################################################################
# Generate some namelist baselines
test_to_change = "TESTRUNPASS_P1.f19_g16_rx1.A"
if CIME.utils.get_model() == "e3sm":
genargs = ["-n", "-g", "-o", "-b", self._baseline_name, "cime_test_only_pass"]
compargs = ["-n", "-c", "-b", self._baseline_name, "cime_test_only_pass"]
else:
genargs = ["-n", "-g", self._baseline_name, "-o", "cime_test_only_pass"]
compargs = ["-n", "-c", self._baseline_name, "cime_test_only_pass"]
self._create_test(genargs)
# Basic namelist compare
test_id = "%s-%s" % (self._baseline_name, CIME.utils.get_timestamp())
self._create_test(compargs, test_id=test_id)
# Check standalone case.cmpgen_namelists
casedir = os.path.join(self._testroot,
"%s.C.%s" % (CIME.utils.get_full_test_name(test_to_change, machine=self._machine, compiler=self._compiler), test_id))
run_cmd_assert_result(self, "./case.cmpgen_namelists", from_dir=casedir)
# compare_test_results should pass
cpr_cmd = "{}/compare_test_results --test-root {} -n -t {} 2>&1" \
.format(TOOLS_DIR, TEST_ROOT, test_id)
output = run_cmd_assert_result(self, cpr_cmd)
# use regex
expected_pattern = re.compile(r'PASS %s[^\s]* NLCOMP' % test_to_change)
the_match = expected_pattern.search(output)
self.assertNotEqual(the_match, None,
msg="Cmd '%s' failed to display passed test in output:\n%s" % (cpr_cmd, output))
# Modify namelist
fake_nl = """
&fake_nml
fake_item = 'fake'
fake = .true.
/"""
baseline_area = self._baseline_area
baseline_glob = glob.glob(os.path.join(baseline_area, self._baseline_name, "TEST*"))
self.assertEqual(len(baseline_glob), 3, msg="Expected three matches, got:\n%s" % "\n".join(baseline_glob))
for baseline_dir in baseline_glob:
nl_path = os.path.join(baseline_dir, "CaseDocs", "datm_in")
self.assertTrue(os.path.isfile(nl_path), msg="Missing file %s" % nl_path)
os.chmod(nl_path, osstat.S_IRUSR | osstat.S_IWUSR)
with open(nl_path, "a") as nl_file:
nl_file.write(fake_nl)
# Basic namelist compare should now fail
test_id = "%s-%s" % (self._baseline_name, CIME.utils.get_timestamp())
self._create_test(compargs, test_id=test_id, pre_run_errors=True)
casedir = os.path.join(self._testroot,
"%s.C.%s" % (CIME.utils.get_full_test_name(test_to_change, machine=self._machine, compiler=self._compiler), test_id))
run_cmd_assert_result(self, "./case.cmpgen_namelists", from_dir=casedir, expected_stat=100)
# preview namelists should work
run_cmd_assert_result(self, "./preview_namelists", from_dir=casedir)
# This should still fail
run_cmd_assert_result(self, "./case.cmpgen_namelists", from_dir=casedir, expected_stat=100)
# compare_test_results should fail
cpr_cmd = "{}/compare_test_results --test-root {} -n -t {} 2>&1" \
.format(TOOLS_DIR, TEST_ROOT, test_id)
output = run_cmd_assert_result(self, cpr_cmd, expected_stat=CIME.utils.TESTS_FAILED_ERR_CODE)
# use regex
expected_pattern = re.compile(r'FAIL %s[^\s]* NLCOMP' % test_to_change)
the_match = expected_pattern.search(output)
self.assertNotEqual(the_match, None,
msg="Cmd '%s' failed to display passed test in output:\n%s" % (cpr_cmd, output))
# Bless
run_cmd_no_fail("{}/bless_test_results --test-root {} -n --force -t {}"
.format(TOOLS_DIR, TEST_ROOT, test_id))
# Basic namelist compare should now pass again
self._create_test(compargs)
verify_perms(self, self._baseline_area)
class X_TestQueryConfig(unittest.TestCase):
def test_query_compsets(self):
run_cmd_no_fail("{}/query_config --compsets".format(SCRIPT_DIR))
def test_query_components(self):
run_cmd_no_fail("{}/query_config --components".format(SCRIPT_DIR))
def test_query_grids(self):
run_cmd_no_fail("{}/query_config --grids".format(SCRIPT_DIR))
def test_query_machines(self):
run_cmd_no_fail("{}/query_config --machines".format(SCRIPT_DIR))
###############################################################################
class Z_FullSystemTest(TestCreateTestCommon):
###############################################################################
###########################################################################
def test_full_system(self):
###########################################################################
# Put this inside any test that's slow
if (FAST_ONLY):
self.skipTest("Skipping slow test")
self._create_test(["--walltime=0:15:00", "cime_developer"], test_id=self._baseline_name)
run_cmd_assert_result(self, "%s/cs.status.%s" % (self._testroot, self._baseline_name),
from_dir=self._testroot)
# Ensure that we can get test times
test_statuses = glob.glob(os.path.join(self._testroot, "*%s" % self._baseline_name, "TestStatus"))
for test_status in test_statuses:
test_time = CIME.wait_for_tests.get_test_time(os.path.dirname(test_status))
self.assertIs(type(test_time), int, msg="get time did not return int for %s" % test_status)
self.assertTrue(test_time > 0, msg="test time was zero for %s" % test_status)
# Test that re-running works
tests = get_tests.get_test_suite("cime_developer", machine=self._machine, compiler=self._compiler)
for test in tests:
casedir = os.path.join(TEST_ROOT, "%s.%s" % (test, self._baseline_name))
# Subtle issue: The run phases of these tests will be in the PASS state until
# the submitted case.test script is run, which could take a while if the system is
# busy. This potentially leaves a window where the wait_for_tests command below will
# not wait for the re-submitted jobs to run because it sees the original PASS.
# The code below forces things back to PEND to avoid this race condition. Note
# that we must use the MEMLEAK phase, not the RUN phase, because RUN being in a non-PEND
# state is how system tests know they are being re-run and must reset certain
# case settings.
if self._hasbatch:
with TestStatus(test_dir=casedir) as ts:
ts.set_status(MEMLEAK_PHASE, TEST_PEND_STATUS)
run_cmd_assert_result(self, "./case.submit --skip-preview-namelist", from_dir=casedir)
self._wait_for_tests(self._baseline_name)
###############################################################################
class K_TestCimeCase(TestCreateTestCommon):
###############################################################################
###########################################################################
def test_cime_case(self):
###########################################################################
self._create_test(["--no-build", "TESTRUNPASS_P1.f19_g16_rx1.A"], test_id=self._baseline_name)
self.assertEqual(type(MACHINE.get_value("MAX_TASKS_PER_NODE")), int)
self.assertTrue(type(MACHINE.get_value("PROJECT_REQUIRED")) in [type(None) , bool])
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_P1.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
with Case(casedir, read_only=False) as case:
build_complete = case.get_value("BUILD_COMPLETE")
self.assertFalse(build_complete,
msg="Build complete had wrong value '%s'" %
build_complete)
case.set_value("BUILD_COMPLETE", True)
build_complete = case.get_value("BUILD_COMPLETE")
self.assertTrue(build_complete,
msg="Build complete had wrong value '%s'" %
build_complete)
case.flush()
build_complete = run_cmd_no_fail("./xmlquery BUILD_COMPLETE --value",
from_dir=casedir)
self.assertEqual(build_complete, "TRUE",
msg="Build complete had wrong value '%s'" %
build_complete)
# Test some test properties
self.assertEqual(case.get_value("TESTCASE"), "TESTRUNPASS")
def _batch_test_fixture(self, testcase_name):
if not MACHINE.has_batch_system() or NO_BATCH:
self.skipTest("Skipping testing user prerequisites without batch systems")
testdir = os.path.join(TEST_ROOT, testcase_name)
if os.path.exists(testdir):
shutil.rmtree(testdir)
run_cmd_assert_result(self, ("{}/create_newcase --case {} --script-root {} " +
"--compset X --res f19_g16 --handle-preexisting-dirs=r --output-root {}").format(
SCRIPT_DIR, testcase_name, testdir, testdir),
from_dir=SCRIPT_DIR)
return testdir
###########################################################################
def test_cime_case_prereq(self):
###########################################################################
testcase_name = 'prereq_test'
testdir = self._batch_test_fixture(testcase_name)
with Case(testdir, read_only=False) as case:
if case.get_value("depend_string") is None:
self.skipTest("Skipping prereq test, depend_string was not provided for this batch system")
job_name = "case.run"
prereq_name = 'prereq_test'
batch_commands = case.submit_jobs(prereq=prereq_name, job=job_name, skip_pnl=True, dry_run=True)
self.assertTrue(isinstance(batch_commands, collections.Sequence), "case.submit_jobs did not return a sequence for a dry run")
self.assertTrue(len(batch_commands) > 0, "case.submit_jobs did not return any job submission string")
# The first element in the internal sequence should just be the job name
# The second one (batch_cmd_index) should be the actual batch submission command
batch_cmd_index = 1
# The prerequisite should be applied to all jobs, though we're only expecting one
for batch_cmd in batch_commands:
self.assertTrue(isinstance(batch_cmd, collections.Sequence), "case.submit_jobs did not return a sequence of sequences")
self.assertTrue(len(batch_cmd) > batch_cmd_index, "case.submit_jobs returned internal sequences with length <= {}".format(batch_cmd_index))
self.assertTrue(isinstance(batch_cmd[1], six.string_types), "case.submit_jobs returned internal sequences without the batch command string as the second parameter: {}".format(batch_cmd[1]))
batch_cmd_args = batch_cmd[1]
jobid_ident = "jobid"
dep_str_fmt = case.get_env('batch').get_value('depend_string', subgroup=None)
self.assertTrue(jobid_ident in dep_str_fmt, "dependency string doesn't include the jobid identifier {}".format(jobid_ident))
dep_str = dep_str_fmt[:dep_str_fmt.index(jobid_ident)]
prereq_substr = None
while dep_str in batch_cmd_args:
dep_id_pos = batch_cmd_args.find(dep_str) + len(dep_str)
batch_cmd_args = batch_cmd_args[dep_id_pos:]
prereq_substr = batch_cmd_args[:len(prereq_name)]
if prereq_substr == prereq_name:
break
self.assertTrue(prereq_name in prereq_substr, "Dependencies added, but not the user specified one")
###########################################################################
def test_cime_case_allow_failed_prereq(self):
###########################################################################
testcase_name = 'allow_failed_prereq_test'
testdir = self._batch_test_fixture(testcase_name)
with Case(testdir, read_only=False) as case:
depend_allow = case.get_value("depend_allow_string")
if depend_allow is None:
self.skipTest("Skipping allow_failed_prereq test, depend_allow_string was not provided for this batch system")
job_name = "case.run"
prereq_name = "prereq_allow_fail_test"
depend_allow = depend_allow.replace("jobid", prereq_name)
batch_commands = case.submit_jobs(prereq=prereq_name, allow_fail=True, job=job_name, skip_pnl=True, dry_run=True)
self.assertTrue(isinstance(batch_commands, collections.Sequence), "case.submit_jobs did not return a sequence for a dry run")
num_submissions = 1
if case.get_value("DOUT_S"):
num_submissions = 2
self.assertTrue(len(batch_commands) == num_submissions, "case.submit_jobs did not return any job submission strings")
self.assertTrue(depend_allow in batch_commands[0][1])
###########################################################################
def test_cime_case_resubmit_immediate(self):
###########################################################################
testcase_name = 'resubmit_immediate_test'
testdir = self._batch_test_fixture(testcase_name)
with Case(testdir, read_only=False) as case:
depend_string = case.get_value("depend_string")
if depend_string is None:
self.skipTest("Skipping resubmit_immediate test, depend_string was not provided for this batch system")
depend_string = re.sub('jobid.*$','',depend_string)
job_name = "case.run"
num_submissions = 6
case.set_value("RESUBMIT", num_submissions - 1)
batch_commands = case.submit_jobs(job=job_name, skip_pnl=True, dry_run=True, resubmit_immediate=True)
self.assertTrue(isinstance(batch_commands, collections.Sequence), "case.submit_jobs did not return a sequence for a dry run")
if case.get_value("DOUT_S"):
num_submissions = 12
self.assertTrue(len(batch_commands) == num_submissions, "case.submit_jobs did not return {} submitted jobs".format(num_submissions))
for i, cmd in enumerate(batch_commands):
if i > 0:
self.assertTrue(depend_string in cmd[1])
###########################################################################
def test_cime_case_st_archive_resubmit(self):
###########################################################################
testcase_name = "st_archive_resubmit_test"
testdir = self._batch_test_fixture(testcase_name)
with Case(testdir, read_only=False) as case:
case.case_setup(clean=False, test_mode=False, reset=True)
orig_resubmit = 2
case.set_value("RESUBMIT", orig_resubmit)
case.case_st_archive(resubmit=False)
new_resubmit = case.get_value("RESUBMIT")
self.assertTrue(orig_resubmit == new_resubmit, "st_archive resubmitted when told not to")
case.case_st_archive(resubmit=True)
new_resubmit = case.get_value("RESUBMIT")
self.assertTrue((orig_resubmit - 1) == new_resubmit, "st_archive did not resubmit when told to")
###########################################################################
def test_cime_case_build_threaded_1(self):
###########################################################################
self._create_test(["--no-build", "TESTRUNPASS_P1x1.f19_g16_rx1.A"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_P1x1.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
with Case(casedir, read_only=False) as case:
build_threaded = case.get_value("SMP_PRESENT")
self.assertFalse(build_threaded)
build_threaded = case.get_build_threaded()
self.assertFalse(build_threaded)
case.set_value("FORCE_BUILD_SMP", True)
build_threaded = case.get_build_threaded()
self.assertTrue(build_threaded)
###########################################################################
def test_cime_case_build_threaded_2(self):
###########################################################################
self._create_test(["--no-build", "TESTRUNPASS_P1x2.f19_g16_rx1.A"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_P1x2.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
with Case(casedir, read_only=False) as case:
build_threaded = case.get_value("SMP_PRESENT")
self.assertTrue(build_threaded)
build_threaded = case.get_build_threaded()
self.assertTrue(build_threaded)
###########################################################################
def test_cime_case_mpi_serial(self):
###########################################################################
self._create_test(["--no-build", "TESTRUNPASS_Mmpi-serial_P10.f19_g16_rx1.A"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_Mmpi-serial_P10.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
with Case(casedir, read_only=True) as case:
# Serial cases should not be using pnetcdf
self.assertEqual(case.get_value("CPL_PIO_TYPENAME"), "netcdf")
# Serial cases should be using 1 task
self.assertEqual(case.get_value("TOTALPES"), 1)
self.assertEqual(case.get_value("NTASKS_CPL"), 1)
###########################################################################
def test_cime_case_force_pecount(self):
###########################################################################
self._create_test(["--no-build", "--force-procs=16", "--force-threads=8", "TESTRUNPASS.f19_g16_rx1.A"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_P16x8.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
with Case(casedir, read_only=True) as case:
self.assertEqual(case.get_value("NTASKS_CPL"), 16)
self.assertEqual(case.get_value("NTHRDS_CPL"), 8)
###########################################################################
def test_cime_case_xmlchange_append(self):
###########################################################################
self._create_test(["--no-build", "TESTRUNPASS_P1x1.f19_g16_rx1.A"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_P1x1.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
run_cmd_assert_result(self, "./xmlchange --id PIO_CONFIG_OPTS --val='-opt1'", from_dir=casedir)
result = run_cmd_assert_result(self, "./xmlquery --value PIO_CONFIG_OPTS", from_dir=casedir)
self.assertEqual(result, "-opt1")
run_cmd_assert_result(self, "./xmlchange --id PIO_CONFIG_OPTS --val='-opt2' --append", from_dir=casedir)
result = run_cmd_assert_result(self, "./xmlquery --value PIO_CONFIG_OPTS", from_dir=casedir)
self.assertEqual(result, "-opt1 -opt2")
###########################################################################
def test_cime_case_test_walltime_mgmt_1(self):
###########################################################################
if CIME.utils.get_model() != "e3sm":
self.skipTest("Skipping walltime test. Depends on E3SM batch settings")
test_name = "ERS.f19_g16_rx1.A"
machine, compiler = "blues", "gnu"
self._create_test(["--no-setup", "--machine={}".format(machine), test_name], test_id=self._baseline_name,
env_changes="unset CIME_GLOBAL_WALLTIME &&")
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "0:10:00")
result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "batch")
###########################################################################
def test_cime_case_test_walltime_mgmt_2(self):
###########################################################################
if CIME.utils.get_model() != "e3sm":
self.skipTest("Skipping walltime test. Depends on E3SM batch settings")
test_name = "ERS_P64.f19_g16_rx1.A"
machine, compiler = "blues", "gnu"
self._create_test(["--no-setup", "--machine={}".format(machine), test_name], test_id=self._baseline_name,
env_changes="unset CIME_GLOBAL_WALLTIME &&")
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "03:00:00")
result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "batch")
###########################################################################
def test_cime_case_test_walltime_mgmt_3(self):
###########################################################################
if CIME.utils.get_model() != "e3sm":
self.skipTest("Skipping walltime test. Depends on E3SM batch settings")
test_name = "ERS_P64.f19_g16_rx1.A"
machine, compiler = "blues", "gnu"
self._create_test(["--no-setup", "--machine={}".format(machine), "--walltime=0:10:00", test_name], test_id=self._baseline_name,
env_changes="unset CIME_GLOBAL_WALLTIME &&")
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "0:10:00")
result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "batch") # Not smart enough to select faster queue
###########################################################################
def test_cime_case_test_walltime_mgmt_4(self):
###########################################################################
if CIME.utils.get_model() != "e3sm":
self.skipTest("Skipping walltime test. Depends on E3SM batch settings")
test_name = "ERS_P1.f19_g16_rx1.A"
machine, compiler = "blues", "gnu"
self._create_test(["--no-setup", "--machine={}".format(machine), "--walltime=2:00:00", test_name], test_id=self._baseline_name,
env_changes="unset CIME_GLOBAL_WALLTIME &&")
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "2:00:00")
result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "batch")
###########################################################################
def test_cime_case_test_walltime_mgmt_5(self):
###########################################################################
if CIME.utils.get_model() != "e3sm":
self.skipTest("Skipping walltime test. Depends on E3SM batch settings")
test_name = "ERS_P1.f19_g16_rx1.A"
machine, compiler = "blues", "gnu"
self._create_test(["--no-setup", "--machine={}".format(machine), test_name], test_id=self._baseline_name,
env_changes="unset CIME_GLOBAL_WALLTIME &&")
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
run_cmd_assert_result(self, "./xmlchange JOB_QUEUE=slartibartfast --subgroup=case.test", from_dir=casedir, expected_stat=1)
run_cmd_assert_result(self, "./xmlchange JOB_QUEUE=slartibartfast --force --subgroup=case.test", from_dir=casedir)
result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "03:00:00")
result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir)
self.assertEqual(result, "slartibartfast")
###########################################################################
def test_cime_case_test_walltime_mgmt_6(self):
###########################################################################
if not self._hasbatch:
self.skipTest("Skipping walltime test. Depends on batch system")
test_name = "ERS_P1.f19_g16_rx1.A"
self._create_test(["--no-build", test_name], test_id=self._baseline_name,
env_changes="unset CIME_GLOBAL_WALLTIME &&")
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
run_cmd_assert_result(self, "./xmlchange JOB_WALLCLOCK_TIME=421:32:11 --subgroup=case.test", from_dir=casedir)
run_cmd_assert_result(self, "./case.setup --reset", from_dir=casedir)
result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir)
with Case(casedir) as case:
walltime_format = case.get_value("walltime_format", subgroup=None)
if walltime_format is not None and walltime_format.count(":") == 1:
self.assertEqual(result, "421:32")
else:
self.assertEqual(result, "421:32:11")
###########################################################################
def test_cime_case_test_walltime_mgmt_7(self):
###########################################################################
if not self._hasbatch:
self.skipTest("Skipping walltime test. Depends on batch system")
test_name = "ERS_P1.f19_g16_rx1.A"
self._create_test(["--no-build", "--walltime=01:00:00", test_name], test_id=self._baseline_name,
env_changes="unset CIME_GLOBAL_WALLTIME &&")
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
run_cmd_assert_result(self, "./xmlchange JOB_WALLCLOCK_TIME=421:32:11 --subgroup=case.test", from_dir=casedir)
run_cmd_assert_result(self, "./case.setup --reset", from_dir=casedir)
result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir)
with Case(casedir) as case:
walltime_format = case.get_value("walltime_format", subgroup=None)
if walltime_format is not None and walltime_format.count(":") == 1:
self.assertEqual(result, "421:32")
else:
self.assertEqual(result, "421:32:11")
###########################################################################
def test_cime_case_test_custom_project(self):
###########################################################################
test_name = "ERS_P1.f19_g16_rx1.A"
machine, compiler = "melvin", "gnu" # have to use a machine both models know and one that doesn't put PROJECT in any key paths
self._create_test(["--no-setup", "--machine={}".format(machine), "--compiler={}".format(compiler), "--project=testproj", test_name],
test_id=self._baseline_name,
env_changes="unset CIME_GLOBAL_WALLTIME &&")
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
result = run_cmd_assert_result(self, "./xmlquery --value PROJECT --subgroup=case.test", from_dir=casedir)
self.assertEqual(result, "testproj")
###########################################################################
def test_create_test_longname(self):
###########################################################################
self._create_test(["SMS.f19_g16.2000_SATM_XLND_SICE_SOCN_XROF_XGLC_SWAV", "--no-build"])
###########################################################################
def test_env_loading(self):
###########################################################################
if self._machine != "melvin":
self.skipTest("Skipping env load test - Only works on melvin")
self._create_test(["--no-build", "TESTRUNPASS.f19_g16_rx1.A"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
with Case(casedir, read_only=True) as case:
env_mach = case.get_env("mach_specific")
orig_env = dict(os.environ)
env_mach.load_env(case)
module_env = dict(os.environ)
os.environ.clear()
os.environ.update(orig_env)
env_mach.load_env(case, force_method="generic")
generic_env = dict(os.environ)
os.environ.clear()
os.environ.update(orig_env)
problems = ""
for mkey, mval in module_env.items():
if mkey not in generic_env:
if not mkey.startswith("PS") and mkey != "OLDPWD":
problems += "Generic missing key: {}\n".format(mkey)
elif mval != generic_env[mkey] and mkey not in ["_", "SHLVL", "PWD"] and not mkey.endswith("()"):
problems += "Value mismatch for key {}: {} != {}\n".format(mkey, repr(mval), repr(generic_env[mkey]))
for gkey in generic_env.keys():
if gkey not in module_env:
problems += "Modules missing key: {}\n".format(gkey)
self.assertEqual(problems, "", msg=problems)
###########################################################################
def test_case_submit_interface(self):
###########################################################################
try:
import imp
except ImportError:
print("imp not found, skipping case.submit interface test")
return
sys.path.append(TOOLS_DIR)
case_submit_path = os.path.join(TOOLS_DIR, "case.submit")
submit_interface = imp.load_source("case_submit_interface", case_submit_path)
sys.argv = ["case.submit", "--batch-args", "'random_arguments_here.%j'",
"--mail-type", "fail", "--mail-user", "'random_arguments_here.%j'"]
submit_interface._main_func(None, True)
###########################################################################
def test_xml_caching(self):
###########################################################################
self._create_test(["--no-build", "TESTRUNPASS.f19_g16_rx1.A"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
active = os.path.join(casedir, "env_run.xml")
backup = os.path.join(casedir, "env_run.xml.bak")
safe_copy(active, backup)
with Case(casedir, read_only=False) as case:
env_run = EnvRun(casedir, read_only=True)
self.assertEqual(case.get_value("RUN_TYPE"), "startup")
case.set_value("RUN_TYPE", "branch")
self.assertEqual(case.get_value("RUN_TYPE"), "branch")
self.assertEqual(env_run.get_value("RUN_TYPE"), "branch")
with Case(casedir) as case:
self.assertEqual(case.get_value("RUN_TYPE"), "branch")
time.sleep(0.2)
safe_copy(backup, active)
with Case(casedir, read_only=False) as case:
self.assertEqual(case.get_value("RUN_TYPE"), "startup")
case.set_value("RUN_TYPE", "branch")
with Case(casedir, read_only=False) as case:
self.assertEqual(case.get_value("RUN_TYPE"), "branch")
time.sleep(0.2)
safe_copy(backup, active)
case.read_xml() # Manual re-sync
self.assertEqual(case.get_value("RUN_TYPE"), "startup")
case.set_value("RUN_TYPE", "branch")
self.assertEqual(case.get_value("RUN_TYPE"), "branch")
with Case(casedir) as case:
self.assertEqual(case.get_value("RUN_TYPE"), "branch")
time.sleep(0.2)
safe_copy(backup, active)
env_run = EnvRun(casedir, read_only=True)
self.assertEqual(env_run.get_value("RUN_TYPE"), "startup")
with Case(casedir, read_only=False) as case:
self.assertEqual(case.get_value("RUN_TYPE"), "startup")
case.set_value("RUN_TYPE", "branch")
# behind the back detection
with self.assertRaises(CIMEError):
with Case(casedir, read_only=False) as case:
time.sleep(0.2)
safe_copy(backup, active)
with Case(casedir, read_only=False) as case:
case.set_value("RUN_TYPE", "branch")
with self.assertRaises(CIMEError):
with Case(casedir) as case:
time.sleep(0.2)
safe_copy(backup, active)
###########################################################################
def test_configure(self):
###########################################################################
self._create_test(["SMS.f09_g16.X", "--no-build"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
"{}.{}".format(CIME.utils.get_full_test_name("SMS.f09_g16.X", machine=self._machine, compiler=self._compiler), self._baseline_name))
manual_config_dir = os.path.join(casedir, "manual_config")
os.mkdir(manual_config_dir)
run_cmd_no_fail("{} --machine={} --compiler={}".format(os.path.join(get_cime_root(), "tools", "configure"), self._machine, self._compiler), from_dir=manual_config_dir)
with open(os.path.join(casedir, "env_mach_specific.xml"), "r") as fd:
case_env_contents = fd.read()
with open(os.path.join(manual_config_dir, "env_mach_specific.xml"), "r") as fd:
man_env_contents = fd.read()
self.assertEqual(case_env_contents, man_env_contents)
###############################################################################
class X_TestSingleSubmit(TestCreateTestCommon):
###############################################################################
###########################################################################
def test_single_submit(self):
###########################################################################
# Skip unless on a batch system and users did not select no-batch
if (not self._hasbatch):
self.skipTest("Skipping single submit. Not valid without batch")
if CIME.utils.get_model() != "e3sm":
self.skipTest("Skipping single submit. E3SM experimental feature")
if self._machine not in ["sandiatoss3"]:
self.skipTest("Skipping single submit. Only works on sandiatoss3")
# Keep small enough for now that we don't have to worry about load balancing
self._create_test(["--single-submit", "SMS_Ln9_P8.f45_g37_rx1.A", "SMS_Ln9_P8.f19_g16_rx1.A"],
env_changes="unset CIME_GLOBAL_WALLTIME &&")
###############################################################################
class L_TestSaveTimings(TestCreateTestCommon):
###############################################################################
###########################################################################
def simple_test(self, manual_timing=False):
###########################################################################
timing_flag = "" if manual_timing else "--save-timing"
driver = CIME.utils.get_cime_default_driver()
if driver == "mct":
walltime="00:15:00"
else:
walltime="00:30:00"
self._create_test(["SMS_Ln9_P1.f19_g16_rx1.A", timing_flag, "--walltime="+walltime], test_id=self._baseline_name)
statuses = glob.glob("%s/*%s/TestStatus" % (self._testroot, self._baseline_name))
self.assertEqual(len(statuses), 1, msg="Should have had exactly one match, found %s" % statuses)
casedir = os.path.dirname(statuses[0])
with Case(casedir, read_only=True) as case:
lids = get_lids(case)
timing_dir = case.get_value("SAVE_TIMING_DIR")
casename = case.get_value("CASE")
self.assertEqual(len(lids), 1, msg="Expected one LID, found %s" % lids)
if manual_timing:
run_cmd_assert_result(self, "cd %s && %s/save_provenance postrun" % (casedir, TOOLS_DIR))
if CIME.utils.get_model() == "e3sm":
provenance_dirs = glob.glob(os.path.join(timing_dir, "performance_archive", getpass.getuser(), casename, lids[0] + "*"))
self.assertEqual(len(provenance_dirs), 1, msg="provenance dirs were missing")
verify_perms(self, timing_dir)
###########################################################################
def test_save_timings(self):
###########################################################################
self.simple_test()
###########################################################################
def test_save_timings_manual(self):
###########################################################################
self.simple_test(manual_timing=True)
# Machinery for Macros generation tests.
class MockMachines(object):
"""A mock version of the Machines object to simplify testing."""
def __init__(self, name, os_):
"""Store the name."""
self.name = name
self.os = os_
def get_machine_name(self):
"""Return the name we were given."""
return self.name
def get_value(self, var_name):
"""Allow the operating system to be queried."""
assert var_name == "OS", "Build asked for a value not " \
"implemented in the testing infrastructure."
return self.os
def is_valid_compiler(self, _): # pylint:disable=no-self-use
"""Assume all compilers are valid."""
return True
def is_valid_MPIlib(self, _):
"""Assume all MPILIB settings are valid."""
return True
# pragma pylint: disable=unused-argument
def get_default_MPIlib(self, attributes=None):
return "mpich2"
def get_default_compiler(self):
return "intel"
def get_macros(macro_maker, build_xml, build_system):
"""Generate build system ("Macros" file) output from config_compilers XML.
Arguments:
macro_maker - The underlying Build object.
build_xml - A string containing the XML to operate on.
build_system - Either "Makefile" or "CMake", depending on desired output.
The return value is a string containing the build system output.
"""
# Build.write_macros expects file-like objects as input, so
# we need to wrap the strings in StringIO objects.
xml = six.StringIO(str(build_xml))
output = six.StringIO()
output_format = None
if build_system == "Makefile":
output_format = "make"
elif build_system == "CMake":
output_format = "cmake"
else:
output_format = build_system
macro_maker.write_macros_file(macros_file=output,
output_format=output_format, xml=xml)
return str(output.getvalue())
def _wrap_config_compilers_xml(inner_string):
"""Utility function to create a config_compilers XML string.
Pass this function a string containing <compiler> elements, and it will add
the necessary header/footer to the file.
"""
_xml_template = """<?xml version="1.0" encoding="UTF-8"?>
<config_compilers>
{}
</config_compilers>
"""
return _xml_template.format(inner_string)
class MakefileTester(object):
"""Helper class for checking Makefile output.
Public methods:
__init__
query_var
assert_variable_equals
assert_variable_matches
"""
# Note that the following is a Makefile and the echo line must begin with a tab
_makefile_template = """
include Macros
query:
\techo '$({})' > query.out
"""
def __init__(self, parent, make_string):
"""Constructor for Makefile test helper class.
Arguments:
parent - The TestCase object that is using this item.
make_string - Makefile contents to test.
"""
self.parent = parent
self.make_string = make_string
def query_var(self, var_name, env, var):
"""Request the value of a variable in the Makefile, as a string.
Arguments:
var_name - Name of the variable to query.
env - A dict containing extra environment variables to set when calling
make.
var - A dict containing extra make variables to set when calling make.
(The distinction between env and var actually matters only for
CMake, though.)
"""
if env is None:
env = dict()
if var is None:
var = dict()
# Write the Makefile strings to temporary files.
temp_dir = tempfile.mkdtemp()
macros_file_name = os.path.join(temp_dir, "Macros")
makefile_name = os.path.join(temp_dir, "Makefile")
output_name = os.path.join(temp_dir, "query.out")
with open(macros_file_name, "w") as macros_file:
macros_file.write(self.make_string)
with open(makefile_name, "w") as makefile:
makefile.write(self._makefile_template.format(var_name))
environment = os.environ.copy()
environment.update(env)
environment.update(var)
gmake_exe = MACHINE.get_value("GMAKE")
if gmake_exe is None:
gmake_exe = "gmake"
run_cmd_assert_result(self.parent, "%s query --directory=%s 2>&1" % (gmake_exe, temp_dir), env=environment)
with open(output_name, "r") as output:
query_result = output.read().strip()
# Clean up the Makefiles.
shutil.rmtree(temp_dir)
return query_result
def assert_variable_equals(self, var_name, value, env=None, var=None):
"""Assert that a variable in the Makefile has a given value.
Arguments:
var_name - Name of variable to check.
value - The string that the variable value should be equal to.
env - Optional. Dict of environment variables to set when calling make.
var - Optional. Dict of make variables to set when calling make.
"""
self.parent.assertEqual(self.query_var(var_name, env, var), value)
def assert_variable_matches(self, var_name, regex, env=None, var=None):
"""Assert that a variable in the Makefile matches a regex.
Arguments:
var_name - Name of variable to check.
regex - The regex to match.
env - Optional. Dict of environment variables to set when calling make.
var - Optional. Dict of make variables to set when calling make.
"""
self.parent.assertRegexpMatches(self.query_var(var_name, env, var), regex)
class CMakeTester(object):
"""Helper class for checking CMake output.
Public methods:
__init__
query_var
assert_variable_equals
assert_variable_matches
"""
_cmakelists_template = """
include(./Macros.cmake)
file(WRITE query.out "${{{}}}")
"""
def __init__(self, parent, cmake_string):
"""Constructor for CMake test helper class.
Arguments:
parent - The TestCase object that is using this item.
cmake_string - CMake contents to test.
"""
self.parent = parent
self.cmake_string = cmake_string
def query_var(self, var_name, env, var):
"""Request the value of a variable in Macros.cmake, as a string.
Arguments:
var_name - Name of the variable to query.
env - A dict containing extra environment variables to set when calling
cmake.
var - A dict containing extra CMake variables to set when calling cmake.
"""
if env is None:
env = dict()
if var is None:
var = dict()
# Write the CMake strings to temporary files.
temp_dir = tempfile.mkdtemp()
macros_file_name = os.path.join(temp_dir, "Macros.cmake")
cmakelists_name = os.path.join(temp_dir, "CMakeLists.txt")
output_name = os.path.join(temp_dir, "query.out")
with open(macros_file_name, "w") as macros_file:
for key in var:
macros_file.write("set({} {})\n".format(key, var[key]))
macros_file.write(self.cmake_string)
with open(cmakelists_name, "w") as cmakelists:
cmakelists.write(self._cmakelists_template.format(var_name))
environment = os.environ.copy()
environment.update(env)
os_ = MACHINE.get_value("OS")
# cmake will not work on cray systems without this flag
if os_ == "CNL":
cmake_args = "-DCMAKE_SYSTEM_NAME=Catamount"
else:
cmake_args = ""
run_cmd_assert_result(self.parent, "cmake %s . 2>&1" % cmake_args, from_dir=temp_dir, env=environment)
with open(output_name, "r") as output:
query_result = output.read().strip()
# Clean up the CMake files.
shutil.rmtree(temp_dir)
return query_result
def assert_variable_equals(self, var_name, value, env=None, var=None):
"""Assert that a variable in the CMakeLists has a given value.
Arguments:
var_name - Name of variable to check.
value - The string that the variable value should be equal to.
env - Optional. Dict of environment variables to set when calling cmake.
var - Optional. Dict of CMake variables to set when calling cmake.
"""
self.parent.assertEqual(self.query_var(var_name, env, var), value)
def assert_variable_matches(self, var_name, regex, env=None, var=None):
"""Assert that a variable in the CMkeLists matches a regex.
Arguments:
var_name - Name of variable to check.
regex - The regex to match.
env - Optional. Dict of environment variables to set when calling cmake.
var - Optional. Dict of CMake variables to set when calling cmake.
"""
self.parent.assertRegexpMatches(self.query_var(var_name, env, var), regex)
###############################################################################
class G_TestMacrosBasic(unittest.TestCase):
###############################################################################
"""Basic infrastructure tests.
This class contains tests that do not actually depend on the output of the
macro file conversion. This includes basic smoke testing and tests of
error-handling in the routine.
"""
def test_script_is_callable(self):
"""The test script can be called on valid output without dying."""
# This is really more a smoke test of this script than anything else.
maker = Compilers(MockMachines("mymachine", "SomeOS"), version=2.0)
test_xml = _wrap_config_compilers_xml("<compiler><SUPPORTS_CXX>FALSE</SUPPORTS_CXX></compiler>")
get_macros(maker, test_xml, "Makefile")
def test_script_rejects_bad_xml(self):
"""The macro writer rejects input that's not valid XML."""
maker = Compilers(MockMachines("mymachine", "SomeOS"), version=2.0)
with self.assertRaises(ParseError):
get_macros(maker, "This is not valid XML.", "Makefile")
def test_script_rejects_bad_build_system(self):
"""The macro writer rejects a bad build system string."""
maker = Compilers(MockMachines("mymachine", "SomeOS"), version=2.0)
bad_string = "argle-bargle."
with assertRaisesRegex(self,
CIMEError,
"Unrecognized build system provided to write_macros: " + bad_string):
get_macros(maker, "This string is irrelevant.", bad_string)
###############################################################################
class H_TestMakeMacros(unittest.TestCase):
###############################################################################
"""Makefile macros tests.
This class contains tests of the Makefile output of Build.
Aside from the usual setUp and test methods, this class has a utility method
(xml_to_tester) that converts XML input directly to a MakefileTester object.
"""
def setUp(self):
self.test_os = "SomeOS"
self.test_machine = "mymachine"
self.test_compiler = MACHINE.get_default_compiler() if TEST_COMPILER is None else TEST_COMPILER
self.test_mpilib = MACHINE.get_default_MPIlib(attributes={"compiler":self.test_compiler}) if TEST_MPILIB is None else TEST_MPILIB
self._maker = Compilers(MockMachines(self.test_machine, self.test_os), version=2.0)
def xml_to_tester(self, xml_string):
"""Helper that directly converts an XML string to a MakefileTester."""
test_xml = _wrap_config_compilers_xml(xml_string)
return MakefileTester(self, get_macros(self._maker, test_xml, "Makefile"))
def test_generic_item(self):
"""The macro writer can write out a single generic item."""
xml_string = "<compiler><SUPPORTS_CXX>FALSE</SUPPORTS_CXX></compiler>"
tester = self.xml_to_tester(xml_string)
tester.assert_variable_equals("SUPPORTS_CXX", "FALSE")
def test_machine_specific_item(self):
"""The macro writer can pick out a machine-specific item."""
xml1 = """<compiler MACH="{}"><SUPPORTS_CXX>TRUE</SUPPORTS_CXX></compiler>""".format(self.test_machine)
xml2 = """<compiler><SUPPORTS_CXX>FALSE</SUPPORTS_CXX></compiler>"""
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE")
# Do this a second time, but with elements in the reverse order, to
# ensure that the code is not "cheating" by taking the first match.
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE")
def test_ignore_non_match(self):
"""The macro writer ignores an entry with the wrong machine name."""
xml1 = """<compiler MACH="bad"><SUPPORTS_CXX>TRUE</SUPPORTS_CXX></compiler>"""
xml2 = """<compiler><SUPPORTS_CXX>FALSE</SUPPORTS_CXX></compiler>"""
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("SUPPORTS_CXX", "FALSE")
# Again, double-check that we don't just get lucky with the order.
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("SUPPORTS_CXX", "FALSE")
def test_os_specific_item(self):
"""The macro writer can pick out an OS-specific item."""
xml1 = """<compiler OS="{}"><SUPPORTS_CXX>TRUE</SUPPORTS_CXX></compiler>""".format(self.test_os)
xml2 = """<compiler><SUPPORTS_CXX>FALSE</SUPPORTS_CXX></compiler>"""
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE")
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE")
def test_mach_other_compiler(self):
"""The macro writer compiler-specific logic works as expected."""
xml1 = """<compiler COMPILER="{}"><CFLAGS><base>a b c</base></CFLAGS></compiler>""".format(self.test_compiler)
xml2 = """<compiler MACH="{}" COMPILER="other"><CFLAGS><base>x y z</base></CFLAGS></compiler>""".format(self.test_machine)
xml3 = """<compiler MACH="{}" COMPILER="{}"><CFLAGS><append>x y z</append></CFLAGS></compiler>""".format(self.test_machine,self.test_compiler)
xml4 = """<compiler MACH="{}" COMPILER="{}"><CFLAGS><base>x y z</base></CFLAGS></compiler>""".format(self.test_machine,self.test_compiler)
tester = self.xml_to_tester(xml1)
tester.assert_variable_equals("CFLAGS", "a b c",env={"COMPILER":self.test_compiler})
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("CFLAGS", "a b c",env={"COMPILER":self.test_compiler})
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("CFLAGS", "a b c",env={"COMPILER":self.test_compiler})
tester = self.xml_to_tester(xml1+xml3)
tester.assert_variable_equals("CFLAGS", "a b c x y z",env={"COMPILER":self.test_compiler})
tester = self.xml_to_tester(xml1+xml4)
tester.assert_variable_equals("CFLAGS", "x y z",env={"COMPILER":self.test_compiler})
tester = self.xml_to_tester(xml4+xml1)
tester.assert_variable_equals("CFLAGS", "x y z",env={"COMPILER":self.test_compiler})
def test_mach_beats_os(self):
"""The macro writer chooses machine-specific over os-specific matches."""
xml1 = """<compiler OS="{}"><SUPPORTS_CXX>FALSE</SUPPORTS_CXX></compiler>""".format(self.test_os)
xml2 = """<compiler MACH="{}"><SUPPORTS_CXX>TRUE</SUPPORTS_CXX></compiler>""".format(self.test_machine)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE")
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE")
def test_mach_and_os_beats_mach(self):
"""The macro writer chooses the most-specific match possible."""
xml1 = """<compiler MACH="{}"><SUPPORTS_CXX>FALSE</SUPPORTS_CXX></compiler>""".format(self.test_machine)
xml2 = """<compiler MACH="{}" OS="{}"><SUPPORTS_CXX>TRUE</SUPPORTS_CXX></compiler>"""
xml2 = xml2.format(self.test_machine, self.test_os)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE")
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE")
def test_build_time_attribute(self):
"""The macro writer writes conditionals for build-time choices."""
xml1 = """<compiler><MPI_PATH MPILIB="mpich">/path/to/mpich</MPI_PATH></compiler>"""
xml2 = """<compiler><MPI_PATH MPILIB="openmpi">/path/to/openmpi</MPI_PATH></compiler>"""
xml3 = """<compiler><MPI_PATH>/path/to/default</MPI_PATH></compiler>"""
tester = self.xml_to_tester(xml1+xml2+xml3)
tester.assert_variable_equals("MPI_PATH", "/path/to/default")
tester.assert_variable_equals("MPI_PATH", "/path/to/mpich", env={"MPILIB": "mpich"})
tester.assert_variable_equals("MPI_PATH", "/path/to/openmpi", env={"MPILIB": "openmpi"})
tester = self.xml_to_tester(xml3+xml2+xml1)
tester.assert_variable_equals("MPI_PATH", "/path/to/default")
tester.assert_variable_equals("MPI_PATH", "/path/to/mpich", env={"MPILIB": "mpich"})
tester.assert_variable_equals("MPI_PATH", "/path/to/openmpi", env={"MPILIB": "openmpi"})
def test_reject_duplicate_defaults(self):
"""The macro writer dies if given many defaults."""
xml1 = """<compiler><MPI_PATH>/path/to/default</MPI_PATH></compiler>"""
xml2 = """<compiler><MPI_PATH>/path/to/other_default</MPI_PATH></compiler>"""
with assertRaisesRegex(self,
CIMEError,
"Variable MPI_PATH is set ambiguously in config_compilers.xml."):
self.xml_to_tester(xml1+xml2)
def test_reject_duplicates(self):
"""The macro writer dies if given many matches for a given configuration."""
xml1 = """<compiler><MPI_PATH MPILIB="mpich">/path/to/mpich</MPI_PATH></compiler>"""
xml2 = """<compiler><MPI_PATH MPILIB="mpich">/path/to/mpich2</MPI_PATH></compiler>"""
with assertRaisesRegex(self,
CIMEError,
"Variable MPI_PATH is set ambiguously in config_compilers.xml."):
self.xml_to_tester(xml1+xml2)
def test_reject_ambiguous(self):
"""The macro writer dies if given an ambiguous set of matches."""
xml1 = """<compiler><MPI_PATH MPILIB="mpich">/path/to/mpich</MPI_PATH></compiler>"""
xml2 = """<compiler><MPI_PATH DEBUG="FALSE">/path/to/mpi-debug</MPI_PATH></compiler>"""
with assertRaisesRegex(self,
CIMEError,
"Variable MPI_PATH is set ambiguously in config_compilers.xml."):
self.xml_to_tester(xml1+xml2)
def test_compiler_changeable_at_build_time(self):
"""The macro writer writes information for multiple compilers."""
xml1 = """<compiler><SUPPORTS_CXX>FALSE</SUPPORTS_CXX></compiler>"""
xml2 = """<compiler COMPILER="gnu"><SUPPORTS_CXX>TRUE</SUPPORTS_CXX></compiler>"""
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("SUPPORTS_CXX", "TRUE", env={"COMPILER": "gnu"})
tester.assert_variable_equals("SUPPORTS_CXX", "FALSE")
def test_base_flags(self):
"""Test that we get "base" compiler flags."""
xml1 = """<compiler><FFLAGS><base>-O2</base></FFLAGS></compiler>"""
tester = self.xml_to_tester(xml1)
tester.assert_variable_equals("FFLAGS", "-O2")
def test_machine_specific_base_flags(self):
"""Test selection among base compiler flag sets based on machine."""
xml1 = """<compiler><FFLAGS><base>-O2</base></FFLAGS></compiler>"""
xml2 = """<compiler MACH="{}"><FFLAGS><base>-O3</base></FFLAGS></compiler>""".format(self.test_machine)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("FFLAGS", "-O3")
def test_build_time_base_flags(self):
"""Test selection of base flags based on build-time attributes."""
xml1 = """<compiler><FFLAGS><base>-O2</base></FFLAGS></compiler>"""
xml2 = """<compiler><FFLAGS><base DEBUG="TRUE">-O3</base></FFLAGS></compiler>"""
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("FFLAGS", "-O2")
tester.assert_variable_equals("FFLAGS", "-O3", env={"DEBUG": "TRUE"})
def test_build_time_base_flags_same_parent(self):
"""Test selection of base flags in the same parent element."""
xml1 = """<base>-O2</base>"""
xml2 = """<base DEBUG="TRUE">-O3</base>"""
tester = self.xml_to_tester("<compiler><FFLAGS>"+xml1+xml2+"</FFLAGS></compiler>")
tester.assert_variable_equals("FFLAGS", "-O2")
tester.assert_variable_equals("FFLAGS", "-O3", env={"DEBUG": "TRUE"})
# Check for order independence here, too.
tester = self.xml_to_tester("<compiler><FFLAGS>"+xml2+xml1+"</FFLAGS></compiler>")
tester.assert_variable_equals("FFLAGS", "-O2")
tester.assert_variable_equals("FFLAGS", "-O3", env={"DEBUG": "TRUE"})
def test_append_flags(self):
"""Test appending flags to a list."""
xml1 = """<compiler><FFLAGS><base>-delicious</base></FFLAGS></compiler>"""
xml2 = """<compiler><FFLAGS><append>-cake</append></FFLAGS></compiler>"""
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("FFLAGS", "-delicious -cake")
# Order independence, as usual.
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("FFLAGS", "-delicious -cake")
def test_machine_specific_append_flags(self):
"""Test appending flags that are either more or less machine-specific."""
xml1 = """<compiler><FFLAGS><append>-delicious</append></FFLAGS></compiler>"""
xml2 = """<compiler MACH="{}"><FFLAGS><append>-cake</append></FFLAGS></compiler>""".format(self.test_machine)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_matches("FFLAGS", "^(-delicious -cake|-cake -delicious)$")
def test_machine_specific_base_keeps_append_flags(self):
"""Test that machine-specific base flags don't override default append flags."""
xml1 = """<compiler><FFLAGS><append>-delicious</append></FFLAGS></compiler>"""
xml2 = """<compiler MACH="{}"><FFLAGS><base>-cake</base></FFLAGS></compiler>""".format(self.test_machine)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("FFLAGS", "-cake -delicious")
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("FFLAGS", "-cake -delicious")
def test_machine_specific_base_and_append_flags(self):
"""Test that machine-specific base flags coexist with machine-specific append flags."""
xml1 = """<compiler MACH="{}"><FFLAGS><append>-delicious</append></FFLAGS></compiler>""".format(self.test_machine)
xml2 = """<compiler MACH="{}"><FFLAGS><base>-cake</base></FFLAGS></compiler>""".format(self.test_machine)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("FFLAGS", "-cake -delicious")
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("FFLAGS", "-cake -delicious")
def test_append_flags_without_base(self):
"""Test appending flags to a value set before Macros is included."""
xml1 = """<compiler><FFLAGS><append>-cake</append></FFLAGS></compiler>"""
tester = self.xml_to_tester(xml1)
tester.assert_variable_equals("FFLAGS", "-delicious -cake", var={"FFLAGS": "-delicious"})
def test_build_time_append_flags(self):
"""Test build_time selection of compiler flags."""
xml1 = """<compiler><FFLAGS><append>-cake</append></FFLAGS></compiler>"""
xml2 = """<compiler><FFLAGS><append DEBUG="TRUE">-and-pie</append></FFLAGS></compiler>"""
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("FFLAGS", "-cake")
tester.assert_variable_matches("FFLAGS", "^(-cake -and-pie|-and-pie -cake)$", env={"DEBUG": "TRUE"})
def test_environment_variable_insertion(self):
"""Test that ENV{..} inserts environment variables."""
# DO it again with $ENV{} style
xml1 = """<compiler><LDFLAGS><append>-L$ENV{NETCDF} -lnetcdf</append></LDFLAGS></compiler>"""
tester = self.xml_to_tester(xml1)
tester.assert_variable_equals("LDFLAGS", "-L/path/to/netcdf -lnetcdf",
env={"NETCDF": "/path/to/netcdf"})
def test_shell_command_insertion(self):
"""Test that $SHELL insert shell command output."""
xml1 = """<compiler><FFLAGS><base>-O$SHELL{echo 2} -fast</base></FFLAGS></compiler>"""
tester = self.xml_to_tester(xml1)
tester.assert_variable_equals("FFLAGS", "-O2 -fast")
def test_multiple_shell_commands(self):
"""Test that more than one $SHELL element can be used."""
xml1 = """<compiler><FFLAGS><base>-O$SHELL{echo 2} -$SHELL{echo fast}</base></FFLAGS></compiler>"""
tester = self.xml_to_tester(xml1)
tester.assert_variable_equals("FFLAGS", "-O2 -fast")
def test_env_and_shell_command(self):
"""Test that $ENV works inside $SHELL elements."""
xml1 = """<compiler><FFLAGS><base>-O$SHELL{echo $ENV{OPT_LEVEL}} -fast</base></FFLAGS></compiler>"""
tester = self.xml_to_tester(xml1)
tester.assert_variable_equals("FFLAGS", "-O2 -fast", env={"OPT_LEVEL": "2"})
def test_config_variable_insertion(self):
"""Test that $VAR insert variables from config_compilers."""
# Construct an absurd chain of references just to sure that we don't
# pass by accident, e.g. outputting things in the right order just due
# to good luck in a hash somewhere.
xml1 = """<MPI_LIB_NAME>stuff-${MPI_PATH}-stuff</MPI_LIB_NAME>"""
xml2 = """<MPI_PATH>${MPICC}</MPI_PATH>"""
xml3 = """<MPICC>${MPICXX}</MPICC>"""
xml4 = """<MPICXX>${MPIFC}</MPICXX>"""
xml5 = """<MPIFC>mpicc</MPIFC>"""
tester = self.xml_to_tester("<compiler>"+xml1+xml2+xml3+xml4+xml5+"</compiler>")
tester.assert_variable_equals("MPI_LIB_NAME", "stuff-mpicc-stuff")
def test_config_reject_self_references(self):
"""Test that $VAR self-references are rejected."""
# This is a special case of the next test, which also checks circular
# references.
xml1 = """<MPI_LIB_NAME>${MPI_LIB_NAME}</MPI_LIB_NAME>"""
err_msg = r".* has bad \$VAR references. Check for circular references or variables that are used in a \$VAR but not actually defined."
with assertRaisesRegex(self,CIMEError, err_msg):
self.xml_to_tester("<compiler>"+xml1+"</compiler>")
def test_config_reject_cyclical_references(self):
"""Test that cyclical $VAR references are rejected."""
xml1 = """<MPI_LIB_NAME>${MPI_PATH}</MPI_LIB_NAME>"""
xml2 = """<MPI_PATH>${MPI_LIB_NAME}</MPI_PATH>"""
err_msg = r".* has bad \$VAR references. Check for circular references or variables that are used in a \$VAR but not actually defined."
with assertRaisesRegex(self,CIMEError, err_msg):
self.xml_to_tester("<compiler>"+xml1+xml2+"</compiler>")
def test_variable_insertion_with_machine_specific_setting(self):
"""Test that machine-specific $VAR dependencies are correct."""
xml1 = """<compiler><MPI_LIB_NAME>something</MPI_LIB_NAME></compiler>"""
xml2 = """<compiler MACH="{}"><MPI_LIB_NAME>$MPI_PATH</MPI_LIB_NAME></compiler>""".format(self.test_machine)
xml3 = """<compiler><MPI_PATH>${MPI_LIB_NAME}</MPI_PATH></compiler>"""
err_msg = r".* has bad \$VAR references. Check for circular references or variables that are used in a \$VAR but not actually defined."
with assertRaisesRegex(self,CIMEError, err_msg):
self.xml_to_tester(xml1+xml2+xml3)
def test_override_with_machine_and_new_attributes(self):
"""Test that overrides with machine-specific settings with added attributes work correctly."""
xml1 = """
<compiler COMPILER="{}">
<SCC>icc</SCC>
<MPICXX>mpicxx</MPICXX>
<MPIFC>mpif90</MPIFC>
<MPICC>mpicc</MPICC>
</compiler>""".format(self.test_compiler)
xml2 = """
<compiler COMPILER="{}" MACH="{}">
<MPICXX>mpifoo</MPICXX>
<MPIFC MPILIB="{}">mpiffoo</MPIFC>
<MPICC MPILIB="NOT_MY_MPI">mpifouc</MPICC>
</compiler>
""".format(self.test_compiler, self.test_machine, self.test_mpilib)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("SCC", "icc", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
tester.assert_variable_equals("MPICXX", "mpifoo", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
tester.assert_variable_equals("MPIFC", "mpiffoo", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
tester.assert_variable_equals("MPICC", "mpicc", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("SCC", "icc", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
tester.assert_variable_equals("MPICXX", "mpifoo", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
tester.assert_variable_equals("MPIFC", "mpiffoo", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
tester.assert_variable_equals("MPICC", "mpicc", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
def test_override_with_machine_and_same_attributes(self):
"""Test that machine-specific conditional overrides with the same attribute work correctly."""
xml1 = """
<compiler COMPILER="{}">
<MPIFC MPILIB="{}">mpifc</MPIFC>
</compiler>""".format(self.test_compiler, self.test_mpilib)
xml2 = """
<compiler MACH="{}" COMPILER="{}">
<MPIFC MPILIB="{}">mpif90</MPIFC>
</compiler>
""".format(self.test_machine, self.test_compiler, self.test_mpilib)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("MPIFC", "mpif90", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("MPIFC", "mpif90", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib})
def test_appends_not_overriden(self):
"""Test that machine-specific base value changes don't interfere with appends."""
xml1="""
<compiler COMPILER="{}">
<FFLAGS>
<base>-base1</base>
<append DEBUG="FALSE">-debug1</append>
</FFLAGS>
</compiler>""".format(self.test_compiler)
xml2="""
<compiler MACH="{}" COMPILER="{}">
<FFLAGS>
<base>-base2</base>
<append DEBUG="TRUE">-debug2</append>
</FFLAGS>
</compiler>""".format(self.test_machine, self.test_compiler)
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("FFLAGS", "-base2", env={"COMPILER": self.test_compiler})
tester.assert_variable_equals("FFLAGS", "-base2 -debug2", env={"COMPILER": self.test_compiler, "DEBUG": "TRUE"})
tester.assert_variable_equals("FFLAGS", "-base2 -debug1", env={"COMPILER": self.test_compiler, "DEBUG": "FALSE"})
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("FFLAGS", "-base2", env={"COMPILER": self.test_compiler})
tester.assert_variable_equals("FFLAGS", "-base2 -debug2", env={"COMPILER": self.test_compiler, "DEBUG": "TRUE"})
tester.assert_variable_equals("FFLAGS", "-base2 -debug1", env={"COMPILER": self.test_compiler, "DEBUG": "FALSE"})
def test_multilevel_specificity(self):
"""Check that settings with multiple levels of machine-specificity can be resolved."""
xml1="""
<compiler>
<MPIFC DEBUG="FALSE">mpifc</MPIFC>
</compiler>"""
xml2="""
<compiler OS="{}">
<MPIFC MPILIB="{}">mpif03</MPIFC>
</compiler>""".format(self.test_os, self.test_mpilib)
xml3="""
<compiler MACH="{}">
<MPIFC DEBUG="TRUE">mpif90</MPIFC>
</compiler>""".format(self.test_machine)
# To verify order-independence, test every possible ordering of blocks.
testers = []
testers.append(self.xml_to_tester(xml1+xml2+xml3))
testers.append(self.xml_to_tester(xml1+xml3+xml2))
testers.append(self.xml_to_tester(xml2+xml1+xml3))
testers.append(self.xml_to_tester(xml2+xml3+xml1))
testers.append(self.xml_to_tester(xml3+xml1+xml2))
testers.append(self.xml_to_tester(xml3+xml2+xml1))
for tester in testers:
tester.assert_variable_equals("MPIFC", "mpif90", env={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib, "DEBUG": "TRUE"})
tester.assert_variable_equals("MPIFC", "mpif03", env={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib, "DEBUG": "FALSE"})
tester.assert_variable_equals("MPIFC", "mpifc", env={"COMPILER": self.test_compiler, "MPILIB": "NON_MATCHING_MPI", "DEBUG": "FALSE"})
def test_remove_dependency_issues(self):
"""Check that overridden settings don't cause inter-variable dependencies."""
xml1="""
<compiler>
<MPIFC>${SFC}</MPIFC>
</compiler>"""
xml2="""
<compiler MACH="{}">""".format(self.test_machine) + """
<SFC>${MPIFC}</SFC>
<MPIFC>mpif90</MPIFC>
</compiler>"""
tester = self.xml_to_tester(xml1+xml2)
tester.assert_variable_equals("SFC", "mpif90")
tester.assert_variable_equals("MPIFC", "mpif90")
tester = self.xml_to_tester(xml2+xml1)
tester.assert_variable_equals("SFC", "mpif90")
tester.assert_variable_equals("MPIFC", "mpif90")
###############################################################################
class I_TestCMakeMacros(H_TestMakeMacros):
###############################################################################
"""CMake macros tests.
This class contains tests of the CMake output of Build.
This class simply inherits all of the methods of TestMakeOutput, but changes
the definition of xml_to_tester to create a CMakeTester instead.
"""
def xml_to_tester(self, xml_string):
"""Helper that directly converts an XML string to a MakefileTester."""
test_xml = _wrap_config_compilers_xml(xml_string)
if (NO_CMAKE):
self.skipTest("Skipping cmake test")
else:
return CMakeTester(self, get_macros(self._maker, test_xml, "CMake"))
###############################################################################
class S_TestManageAndQuery(unittest.TestCase):
"""Tests various scripts to manage and query xml files"""
def _run_and_assert_query_testlist(self, extra_args=""):
"""Ensure that query_testlist runs successfully with the given extra arguments"""
files = Files()
testlist_drv = files.get_value("TESTS_SPEC_FILE", {"component":"drv"})
run_cmd_assert_result(self, "{}/query_testlists --xml-testlist {} {}".format(
SCRIPT_DIR, testlist_drv, extra_args))
def test_query_testlists_runs(self):
"""Make sure that query_testlists runs successfully
This simply makes sure that query_testlists doesn't generate any errors
when it runs. This helps ensure that changes in other utilities don't
break query_testlists.
"""
self._run_and_assert_query_testlist(extra_args="--show-options")
def test_query_testlists_define_testtypes_runs(self):
"""Make sure that query_testlists runs successfully with the --define-testtypes argument"""
self._run_and_assert_query_testlist(extra_args="--define-testtypes")
def test_query_testlists_count_runs(self):
"""Make sure that query_testlists runs successfully with the --count argument"""
self._run_and_assert_query_testlist(extra_args="--count")
def test_query_testlists_list_runs(self):
"""Make sure that query_testlists runs successfully with the --list argument"""
self._run_and_assert_query_testlist(extra_args="--list categories")
###############################################################################
class B_CheckCode(unittest.TestCase):
###############################################################################
# Tests are generated in the main loop below
longMessage = True
all_results = None
def make_pylint_test(pyfile, all_files):
def test(self):
if B_CheckCode.all_results is None:
B_CheckCode.all_results = check_code(all_files)
#pylint: disable=unsubscriptable-object
result = B_CheckCode.all_results[pyfile]
self.assertTrue(result == "", msg=result)
return test
def check_for_pylint():
#pylint: disable=import-error
from distutils.spawn import find_executable
pylint = find_executable("pylint")
if pylint is not None:
output = run_cmd_no_fail("pylint --version")
pylintver = re.search(r"pylint\s+(\d+)[.](\d+)[.](\d+)", output)
major = int(pylintver.group(1))
minor = int(pylintver.group(2))
if pylint is None or major < 1 or (major == 1 and minor < 5):
print("pylint version 1.5 or newer not found, pylint tests skipped")
return False
return True
def write_provenance_info():
curr_commit = get_current_commit(repo=LIB_DIR)
logging.info("\nTesting commit %s" % curr_commit)
cime_model = CIME.utils.get_model()
logging.info("Using cime_model = %s" % cime_model)
logging.info("Testing machine = %s" % MACHINE.get_machine_name())
if TEST_COMPILER is not None:
logging.info("Testing compiler = %s"% TEST_COMPILER)
if TEST_MPILIB is not None:
logging.info("Testing mpilib = %s"% TEST_MPILIB)
logging.info("Test root: %s\n" % TEST_ROOT)
def _main_func(description):
global MACHINE
global NO_CMAKE
global FAST_ONLY
global NO_BATCH
global TEST_COMPILER
global TEST_MPILIB
global TEST_ROOT
global GLOBAL_TIMEOUT
global NO_TEARDOWN
config = CIME.utils.get_cime_config()
help_str = \
"""
{0} [TEST] [TEST]
OR
{0} --help
\033[1mEXAMPLES:\033[0m
\033[1;32m# Run the full suite \033[0m
> {0}
\033[1;32m# Run all code checker tests \033[0m
> {0} B_CheckCode
\033[1;32m# Run test test_wait_for_test_all_pass from class M_TestWaitForTests \033[0m
> {0} M_TestWaitForTests.test_wait_for_test_all_pass
""".format(os.path.basename(sys.argv[0]))
parser = argparse.ArgumentParser(usage=help_str,
description=description,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--fast", action="store_true",
help="Skip full system tests, which saves a lot of time")
parser.add_argument("--no-batch", action="store_true",
help="Do not submit jobs to batch system, run locally."
" If false, will default to machine setting.")
parser.add_argument("--no-cmake", action="store_true",
help="Do not run cmake tests")
parser.add_argument("--no-teardown", action="store_true",
help="Do not delete directories left behind by testing")
parser.add_argument("--machine",
help="Select a specific machine setting for cime")
parser.add_argument("--compiler",
help="Select a specific compiler setting for cime")
parser.add_argument( "--mpilib",
help="Select a specific compiler setting for cime")
parser.add_argument( "--test-root",
help="Select a specific test root for all cases created by the testing")
parser.add_argument("--timeout", type=int,
help="Select a specific timeout for all tests")
ns, args = parser.parse_known_args()
# Now set the sys.argv to the unittest_args (leaving sys.argv[0] alone)
sys.argv[1:] = args
FAST_ONLY = ns.fast
NO_BATCH = ns.no_batch
NO_CMAKE = ns.no_cmake
GLOBAL_TIMEOUT = ns.timeout
NO_TEARDOWN = ns.no_teardown
if ns.machine is not None:
MACHINE = Machines(machine=ns.machine)
os.environ["CIME_MACHINE"] = ns.machine
elif "CIME_MACHINE" in os.environ:
mach_name = os.environ["CIME_MACHINE"]
MACHINE = Machines(machine=mach_name)
elif config.has_option("create_test", "MACHINE"):
MACHINE = Machines(machine=config.get("create_test", "MACHINE"))
elif config.has_option("main", "MACHINE"):
MACHINE = Machines(machine=config.get("main", "MACHINE"))
else:
MACHINE = Machines()
if ns.compiler is not None:
TEST_COMPILER = ns.compiler
elif config.has_option("create_test", "COMPILER"):
TEST_COMPILER = config.get("create_test", "COMPILER")
elif config.has_option("main", "COMPILER"):
TEST_COMPILER = config.get("main", "COMPILER")
if ns.mpilib is not None:
TEST_MPILIB = ns.mpilib
elif config.has_option("create_test", "MPILIB"):
TEST_MPILIB = config.get("create_test", "MPILIB")
elif config.has_option("main", "MPILIB"):
TEST_MPILIB = config.get("main", "MPILIB")
if ns.test_root is not None:
TEST_ROOT = ns.test_root
elif config.has_option("create_test", "TEST_ROOT"):
TEST_ROOT = config.get("create_test", "TEST_ROOT")
else:
TEST_ROOT = os.path.join(MACHINE.get_value("CIME_OUTPUT_ROOT"),
"scripts_regression_test.%s"% CIME.utils.get_timestamp())
args = lambda: None # just something to set attrs on
for log_param in ["debug", "silent", "verbose"]:
flag = "--%s" % log_param
if flag in sys.argv:
sys.argv.remove(flag)
setattr(args, log_param, True)
else:
setattr(args, log_param, False)
args = CIME.utils.parse_args_and_handle_standard_logging_options(args, None)
write_provenance_info()
# Find all python files in repo and create a pylint test for each
if check_for_pylint():
files_to_test = get_all_checkable_files()
for file_to_test in files_to_test:
pylint_test = make_pylint_test(file_to_test, files_to_test)
testname = "test_pylint_%s" % file_to_test.replace("/", "_").replace(".", "_")
expect(not hasattr(B_CheckCode, testname), "Repeat %s" % testname)
setattr(B_CheckCode, testname, pylint_test)
try:
unittest.main(verbosity=2, catchbreak=True)
except CIMEError as e:
if e.__str__() != "False":
print("Detected failures, leaving directory:", TEST_ROOT)
else:
print("All pass, removing directory:", TEST_ROOT)
if os.path.exists(TEST_ROOT) and not NO_TEARDOWN:
shutil.rmtree(TEST_ROOT)
raise
if (__name__ == "__main__"):
_main_func(__doc__)
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip)
import electrum
from electrum import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest)
from electrum.bitcoin import COIN, is_address
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain,
UserCancelled, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
decimal_point_to_base_unit_name,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs)
from electrum.util import PR_TYPE_ONCHAIN, PR_TYPE_LN
from electrum.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.util import PR_PAID, PR_FAILED
from electrum.util import pr_expiration_values
from electrum.lnutil import ln_dummy_address
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
#from .fee_slider import FeeSlider
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER, MONOSPACE_FONT)
from .util import ButtonsTextEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.tl_windows = []
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab(wallet)
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
if self.wallet.has_lightning():
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QWidget()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'invoice_status', 'request_status']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'channels_updated':
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.lnworker and wallet.network:
wallet.network.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "LBRY Vault Testnet" if constants.net.TESTNET else "LBRY Vault"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
if self.wallet.has_lightning():
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
tools_menu.addAction(_("&Lightning Network"), self.gui_object.show_lightning_dialog).setEnabled(bool(self.wallet.has_lightning() and self.network))
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog).setEnabled(bool(self.network and self.network.local_watchtower))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "LBRY Vault",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("LBRY Vault's focus is speed, with low resource usage and simplifying LBRY Credits.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the LBRY system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.lnworker:
l = self.wallet.lnworker.get_balance()
text += u' \U0001f5f2 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self, wallet):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', 3600)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding LBRY Credits addresses.'),
_('The LBRY Credits address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('On-chain'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
def on_receive_address_changed():
addr = str(self.receive_address_e.text())
self.receive_address_widgets.setVisible(bool(addr))
msg = _('LBRY Credits address where the payment should be received. Note that each payment request uses a different LBRY Credits address.')
receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(on_receive_address_changed)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
self.receive_address_e.setMinimumHeight(6 * char_width_in_lineedit())
self.receive_address_e.setMaximumHeight(10 * char_width_in_lineedit())
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Incoming payments'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
receive_tabbed_widgets = QTabWidget()
receive_tabbed_widgets.addTab(self.receive_qr, 'QR Code')
receive_tabbed_widgets.addTab(self.receive_payreq_e, 'Text')
vbox_receive_address = QVBoxLayout()
vbox_receive_address.setContentsMargins(0, 0, 0, 0)
vbox_receive_address.setSpacing(0)
vbox_receive_address.addWidget(receive_address_label)
vbox_receive_address.addWidget(self.receive_address_e)
self.receive_address_widgets = QWidget()
self.receive_address_widgets.setLayout(vbox_receive_address)
size_policy = self.receive_address_widgets.sizePolicy()
size_policy.setRetainSizeWhenHidden(True)
self.receive_address_widgets.setSizePolicy(size_policy)
vbox_receive = QVBoxLayout()
vbox_receive.addWidget(receive_tabbed_widgets)
vbox_receive.addWidget(self.receive_address_widgets)
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addLayout(vbox_receive)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
on_receive_address_changed()
return w
def delete_request(self, key):
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', 3600)
if is_lightning:
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
self.address_list.update()
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
def create_bitcoin_request(self, amount, message, expiration):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a LBRY Credits address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a LBRY Credits address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = FreezableLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay"), self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Outgoing payments'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs) as e:
self.max_button.setChecked(False)
self.show_error(str(e))
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Bitcoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") + f"{err.idx+1}: {err.line_content[:40]}... ({repr(err.exc)})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice, amount_sat=None):
attempts = LN_NUM_PAYMENT_ATTEMPTS
def task():
self.wallet.lnworker.pay(invoice, amount_sat, attempts)
self.do_clear()
self.wallet.thread.add(task)
self.invoice_list.update()
def on_request_status(self, key, status):
if key not in self.wallet.receive_requests:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
def on_invoice_status(self, key, status):
if key not in self.wallet.invoices:
return
self.invoice_list.update_item(key, status)
if status == PR_PAID:
self.show_message(_('Payment succeeded'))
self.need_update.set()
elif status == PR_FAILED:
self.show_error(_('Payment failed'))
else:
pass
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self._is_onchain:
invoice = self.payto_e.lightning_invoice
if not invoice:
return
if not self.wallet.lnworker:
self.show_error(_('Lightning is disabled'))
return
invoice_dict = self.wallet.lnworker.parse_bech32_invoice(invoice)
if invoice_dict.get('amount') is None:
amount = self.amount_e.get_amount()
if amount:
invoice_dict['amount'] = amount
else:
self.show_error(_('No amount'))
return
return invoice_dict
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(outputs, message, self.payment_request, self.payto_URI)
def do_save_invoice(self):
invoice = self.read_invoice()
if not invoice:
return
self.wallet.save_invoice(invoice)
self.do_clear()
self.invoice_list.update()
def do_pay(self):
invoice = self.read_invoice()
if not invoice:
return
self.wallet.save_invoice(invoice)
self.invoice_list.update()
self.do_clear()
self.do_pay_invoice(invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice['outputs']
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice):
if invoice['type'] == PR_TYPE_LN:
self.pay_lightning_invoice(invoice['invoice'], amount_sat=invoice['amount'])
elif invoice['type'] == PR_TYPE_ONCHAIN:
outputs = invoice['outputs']
self.pay_onchain_dialog(self.get_coins(), outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def pay_onchain_dialog(self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if output_values.count('!') > 1:
self.show_error(_("More than one output set to spend max"))
return
if self.config.get('advanced_preview'):
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
return
output_value = '!' if '!' in output_values else sum(output_values)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if d.not_enough_funds:
self.show_message(_('Not Enough Funds'))
return
cancelled, is_send, password, tx = d.run()
if cancelled:
return
if is_send:
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
def preview_tx_dialog(self, *, make_tx, external_keypairs=None):
d = PreviewTxDialog(make_tx=make_tx, external_keypairs=external_keypairs,
window=self)
d.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, funding_sat):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(coins=coins,
funding_sat=funding_sat,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
# however, the user must not be allowed to broadcast early
make_tx = self.mktx_for_open_channel(funding_sat)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_success(args):
chan, funding_tx = args
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
if not funding_tx.is_complete():
self.show_transaction(funding_tx)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(e))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and invoice['status'] == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
from electrum.lnaddr import lndecode, LnDecodeException
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.amount is not None:
self.amount_e.setAmount(lnaddr.amount * COIN)
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
invoice = self.wallet.get_invoice(key)
if invoice is None:
self.show_error('Cannot find payment request in wallet.')
return
bip70 = invoice.get('bip70')
if bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(bip70))
pr.verify(self.contacts)
self.show_bip70_details(pr)
def show_bip70_details(self, pr: 'paymentrequest.PaymentRequest'):
key = pr.get_id()
d = WindowModalDialog(self, _("BIP70 Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x.value)+ self.base_unit() + ' @ ' + x.address, pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
# note: "delete" disabled as invoice is saved with a different key in wallet.invoices that we do not have here
# def do_delete():
# if self.question(_('Delete invoice?')):
# self.wallet.delete_invoice(key)
# self.history_list.update()
# self.invoice_list.update()
# d.close()
# deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, CloseButton(d)))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(config=self.config,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
if self.wallet.has_lightning() and self.network:
self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
sb.addPermanentWidget(self.lightning_button)
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def disable_lightning(self):
warning = _('This will delete your lightning private keys')
r = self.question(_('Disable Lightning payments?') + '\n\n' + warning)
if not r:
return
self.wallet.remove_lightning()
self.show_warning(_('Lightning keys have been removed. This wallet will be closed'))
self.close()
def enable_lightning(self):
warning1 = _("Lightning support in Electrum is experimental. Do not put large amounts in lightning channels.")
warning2 = _("Funds stored in lightning channels are not recoverable from your seed. You must backup your wallet file everytime you create a new channel.")
r = self.question(_('Enable Lightning payments?') + '\n\n' + _('WARNINGS') + ': ' + '\n\n' + warning1 + '\n\n' + warning2)
if not r:
return
self.wallet.init_lightning()
self.show_warning(_('Lightning keys have been initialized. This wallet will be closed'))
self.close()
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
if self.wallet.has_lightning():
lightning_b = QPushButton(_('Disable'))
lightning_b.clicked.connect(dialog.close)
lightning_b.clicked.connect(self.disable_lightning)
lightning_label = QLabel(_('Enabled'))
lightning_b.setDisabled(bool(self.wallet.lnworker.channels))
else:
lightning_b = QPushButton(_('Enable'))
lightning_b.clicked.connect(dialog.close)
lightning_b.clicked.connect(self.enable_lightning)
lightning_label = QLabel(_('Disabled'))
grid.addWidget(QLabel(_('Lightning')), 5, 0)
grid.addWidget(lightning_label, 5, 1)
grid.addWidget(lightning_b, 5, 2)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
mpk_text.repaint() # macOS hack for #4777
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
# only show the combobox if multiple master keys are defined
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
btns = run_hook('wallet_info_buttons', self, dialog) or Buttons(CloseButton(dialog))
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
# if redeem_script:
# vbox.addWidget(QLabel(_("Redeem Script") + ':'))
# rds_e = ShowQRTextEdit(text=redeem_script)
# rds_e.addCopyButton(self.app)
# vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = self.getOpenFileName(_("Select your transaction file"),
TRANSACTION_FILE_EXTENSION_FILTER)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
self.show_message(repr(e))
return
scriptpubkey = bfh(bitcoin.address_to_script(addr))
outputs = [PartialTxOutput(scriptpubkey=scriptpubkey, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx: Transaction, new_tx: PartialTransaction) -> None:
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
fee = self.wallet.get_tx_fee(txid)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(txid)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current Fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('Current Fee rate') + ': %s' % self.format_fee_rate(1000 * old_fee_rate)))
vbox.addWidget(QLabel(_('New Fee rate') + ':'))
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
vbox.addWidget(feerate_e)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_slider.deactivate()
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate, coins=self.get_coins())
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_desc=tx_label)
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
|
grpc.py
|
"""
Utilities for running GRPC services: compile protobuf, patch legacy versions, etc
"""
from __future__ import annotations
import os
import threading
from typing import Any, Dict, Iterable, Iterator, NamedTuple, Optional, Tuple, Type, TypeVar, Union
import grpc
from hivemind.proto import runtime_pb2
from hivemind.utils.logging import get_logger
from hivemind.utils.networking import Endpoint
from hivemind.utils.timed_storage import TimedStorage, ValueWithExpiration, get_dht_time
logger = get_logger(__name__)
Stub = TypeVar("Stub")
GRPC_KEEPALIVE_OPTIONS = (
("grpc.keepalive_time_ms", 60 * 1000),
("grpc.keepalive_timeout_ms", 60 * 1000),
("grpc.keepalive_permit_without_calls", True),
("grpc.http2.max_pings_without_data", 0),
("grpc.http2.min_time_between_pings_ms", 30 * 1000),
("grpc.http2.min_ping_interval_without_data_ms", 10 * 1000),
)
class ChannelInfo(NamedTuple):
target: Endpoint
aio: bool
options: Tuple[Tuple[str, str], ...]
credentials: Optional[grpc.ChannelCredentials]
compression: Optional[grpc.Compression]
class ChannelCache(TimedStorage[ChannelInfo, Tuple[Union[grpc.Channel, grpc.aio.Channel], Dict]]):
"""
A process-wide cache of gRPC channels, supports both normal and aio channels, secure/insecure channels, etc
Based on grpcio internal channel cache by Richard Belleville and Lidi Zheng (thanks!)
Unlike TimedStorage, ChannelCache actively evicts stale channels even if the cache is not accessed
Unlike grpc._simple_stubs.ChannelCache, this implementation supports aio and does not forcibly close active channels
"""
MAXIMUM_CHANNELS = int(os.environ.get("GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM", 4096))
EVICTION_PERIOD_SECONDS = float(os.environ.get("GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS", 10 * 60))
logger.debug(f"Eviction period = {EVICTION_PERIOD_SECONDS}s, max channels = {MAXIMUM_CHANNELS}")
_singleton: Optional[ChannelCache] = None
_singleton_pid: int = os.getpid()
_lock: threading.RLock = threading.RLock()
_update_eviction_evt: threading.Event = threading.Event()
def __init__(self, _created_as_singleton=False):
assert _created_as_singleton, f"Please use {self.__class__.__name__}.get_singleton()"
super().__init__(maxsize=self.MAXIMUM_CHANNELS)
self._is_active = True
self._nearest_expiration_time = float("inf")
self._eviction_thread = threading.Thread(target=self._evict_stale_channels_in_background, daemon=True)
self._eviction_thread.start()
@classmethod
def get_singleton(cls):
"""Get or create the channel cache for the current process"""
with cls._lock:
if cls._singleton is None or cls._singleton_pid != os.getpid():
if cls._singleton is not None:
cls._singleton._stop_background_thread()
cls._singleton, cls._singleton_pid = cls(_created_as_singleton=True), os.getpid()
return cls._singleton
@classmethod
def get_stub(
cls,
target: Endpoint,
stub_type: Type[Stub],
*,
aio: bool,
options: Tuple[Tuple[str, Any]] = (),
channel_credentials: Optional[grpc.ChannelCredentials] = None,
compression: Optional[grpc.Compression] = None,
) -> Stub:
"""
Create a grpc channel with given options or reuse pre-existing one
:param target: the recipient's address and port
:param stub_type: a gRPC stub (client) to be instantiated
:param aio: if True, returns grpc.Channel, otherwise returns grpc.aio.Channel
:param options: see https://grpc.github.io/grpc/core/group__grpc__arg__keys.html
:param channel_credentials: if specified, create a secure channel usin these credentials (default = insecure)
:param compression: see https://github.com/grpc/grpc/tree/master/examples/python/compression
"""
cache = cls.get_singleton()
with cls._lock:
key = ChannelInfo(target, aio, tuple(options), channel_credentials, compression)
entry: ValueWithExpiration = super(cls, cache).get(key)
if entry is not None:
channel, stubs = entry.value
else:
channel = cls._create_channel(*key)
stubs = {}
channel._channel.check_connectivity_state(True)
if stub_type not in stubs:
stubs[stub_type] = stub_type(channel)
# either cache channel or update expiration of an existing channel
expiration_time = get_dht_time() + cls.EVICTION_PERIOD_SECONDS
super(cls, cache).store(key, (channel, stubs), expiration_time)
if expiration_time < cache._nearest_expiration_time:
cache._nearest_expiration_time = expiration_time
cls._update_eviction_evt.set()
return stubs[stub_type]
@classmethod
def _create_channel(
cls,
target: Endpoint,
aio: bool,
extra_options: Tuple[Tuple[str, Any], ...],
channel_credentials: Optional[grpc.ChannelCredentials],
compression: Optional[grpc.Compression],
) -> Union[grpc.Channel, grpc.aio.Channel]:
namespace = grpc.aio if aio else grpc
options = extra_options + GRPC_KEEPALIVE_OPTIONS
if channel_credentials is None:
logger.debug(
f"Creating insecure {namespace} channel with options '{options}' " f"and compression '{compression}'"
)
return namespace.insecure_channel(target, options=options, compression=compression)
else:
logger.debug(
f"Creating secure {namespace} channel with credentials '{channel_credentials}', "
f"options '{options}' and compression '{compression}'"
)
return namespace.secure_channel(
target, credentials=channel_credentials, options=options, compression=compression
)
def _evict_stale_channels_in_background(self):
while self._is_active:
now = get_dht_time()
time_to_wait = max(0.0, self._nearest_expiration_time - now)
interrupted_early = self._update_eviction_evt.wait(time_to_wait if time_to_wait != float("inf") else None)
if interrupted_early:
self._update_eviction_evt.clear()
continue
with self._lock:
self._remove_outdated()
_, entry = super().top()
self._nearest_expiration_time = entry.expiration_time if entry is not None else float("inf")
def _stop_background_thread(self):
with self._lock:
self._is_active = False
self._update_eviction_evt.set()
def store(self, *args, **kwargs) -> ValueError:
raise ValueError(f"Please use {self.__class__.__name__}.get_stub to get or create stubs")
def get(self, *args, **kwargs) -> ValueError:
raise ValueError(f"Please use {self.__class__.__name__}.get_stub to get or create stubs")
def top(self) -> ValueError:
raise ValueError(f"Please use {self.__class__.__name__}.get_stub to get or create stubs")
STREAMING_CHUNK_SIZE_BYTES = 2**16
def split_for_streaming(
serialized_tensor: runtime_pb2.Tensor,
chunk_size_bytes: int = STREAMING_CHUNK_SIZE_BYTES,
) -> Iterator[runtime_pb2.Tensor]:
"""Split serialized_tensor into multiple chunks for gRPC streaming"""
buffer = memoryview(serialized_tensor.buffer)
num_chunks = len(range(0, len(buffer), chunk_size_bytes))
yield runtime_pb2.Tensor(
compression=serialized_tensor.compression,
buffer=buffer[:chunk_size_bytes].tobytes(),
chunks=num_chunks,
size=serialized_tensor.size,
dtype=serialized_tensor.dtype,
requires_grad=serialized_tensor.requires_grad,
)
for chunk_start in range(chunk_size_bytes, len(buffer), chunk_size_bytes):
yield runtime_pb2.Tensor(buffer=buffer[chunk_start : chunk_start + chunk_size_bytes].tobytes())
def combine_from_streaming(stream: Iterable[runtime_pb2.Tensor]) -> runtime_pb2.Tensor:
"""Restore a result of split_into_chunks into a single serialized tensor"""
stream = iter(stream)
first_chunk = next(stream)
serialized_tensor = runtime_pb2.Tensor()
serialized_tensor.CopyFrom(first_chunk)
buffer_chunks = [first_chunk.buffer]
for tensor_part in stream:
buffer_chunks.append(tensor_part.buffer)
serialized_tensor.buffer = b"".join(buffer_chunks)
return serialized_tensor
|
demoprocess.py
|
# 作者:甜咖啡
# 新建时间:2021/4/11 22:11
from multiprocessing import Process
def demo1(name):
print('执行自定义函数的参数:', name)
if __name__ == '__main__':
p = Process(target=demo1, args=('123',))
p.start()
p.join()
print(p.name)
print('主进程执行完成')
|
learner.py
|
from typing import Tuple
import glob
import os
import shutil
import signal
import threading
import time
from collections import OrderedDict, deque
from os.path import join
from queue import Empty, Queue, Full
from threading import Thread
import numpy as np
import psutil
import torch
# from torch.nn.utils.rnn import PackedSequence, invert_permutation
from torch.multiprocessing import Process, Event as MultiprocessingEvent
if os.name == 'nt':
from utils.faster_fifo_stub import Queue as MpQueue
else:
from faster_fifo import Queue as MpQueue
from algorithms.utils.algo_utils import TaskType, list_of_dicts_to_dict_of_lists, memory_stats, cuda_envvars_for_policy, \
TensorBatcher, iter_dicts_recursively, copy_dict_structure, ObjectPool
from algorithms.dqn.model import create_dqn
from algorithms.utils.action_distributions import get_action_distribution, is_continuous_action_space
from algorithms.utils.algo_utils import calculate_gae, EPS
from algorithms.utils.pytorch_utils import to_scalar
from utils.decay import LinearDecay
from utils.timing import Timing
from utils.utils import log, AttrDict, experiment_dir, ensure_dir_exists, join_or_kill, safe_get
class LearnerWorker:
def __init__(
self, worker_idx, policy_id, cfg, obs_space, action_space, report_queue, policy_worker_queues, shared_buffers,
policy_lock, resume_experience_collection_cv,
):
log.info('Initializing the learner %d for policy %d', worker_idx, policy_id)
self.worker_idx = worker_idx
self.policy_id = policy_id
self.cfg = cfg
# PBT-related stuff
self.should_save_model = True # set to true if we need to save the model to disk on the next training iteration
self.load_policy_id = None # non-None when we need to replace our parameters with another policy's parameters
self.pbt_mutex = threading.Lock()
self.new_cfg = None # non-None when we need to update the learning hyperparameters
self.terminate = False
self.num_batches_processed = 0
self.obs_space = obs_space
self.action_space = action_space
self.rollout_tensors = shared_buffers.tensor_trajectories
self.traj_tensors_available = shared_buffers.is_traj_tensor_available
self.policy_versions = shared_buffers.policy_versions
self.stop_experience_collection = shared_buffers.stop_experience_collection
self.stop_experience_collection_num_msgs = self.resume_experience_collection_num_msgs = 0
self.device = None
self.dqn = None
self.optimizer = None
self.policy_lock = policy_lock
self.resume_experience_collection_cv = resume_experience_collection_cv
self.task_queue = MpQueue()
self.report_queue = report_queue
self.initialized_event = MultiprocessingEvent()
self.initialized_event.clear()
self.model_saved_event = MultiprocessingEvent()
self.model_saved_event.clear()
# queues corresponding to policy workers using the same policy
# we send weight updates via these queues
self.policy_worker_queues = policy_worker_queues
self.experience_buffer_queue = Queue()
self.tensor_batch_pool = ObjectPool()
self.tensor_batcher = TensorBatcher(self.tensor_batch_pool)
self.with_training = True # set to False for debugging no-training regime
self.train_in_background = self.cfg.train_in_background_thread # set to False for debugging
self.training_thread = Thread(target=self._train_loop) if self.train_in_background else None
self.train_thread_initialized = threading.Event()
self.is_training = False
self.train_step = self.env_steps = 0
# decay rate at which summaries are collected
# save summaries every 20 seconds in the beginning, but decay to every 4 minutes in the limit, because we
# do not need frequent summaries for longer experiments
self.summary_rate_decay_seconds = LinearDecay([(0, 20), (100000, 120), (1000000, 240)])
self.last_summary_time = 0
self.last_saved_time = self.last_milestone_time = 0
self.discarded_experience_over_time = deque([], maxlen=30)
self.discarded_experience_timer = time.time()
self.num_discarded_rollouts = 0
self.process = Process(target=self._run, daemon=True)
if is_continuous_action_space(self.action_space) and self.cfg.exploration_loss == 'symmetric_kl':
raise NotImplementedError('KL-divergence exploration loss is not supported with '
'continuous action spaces. Use entropy exploration loss')
if self.cfg.exploration_loss_coeff == 0.0:
self.exploration_loss_func = lambda action_distr: 0.0
elif self.cfg.exploration_loss == 'entropy':
self.exploration_loss_func = self.entropy_exploration_loss
elif self.cfg.exploration_loss == 'symmetric_kl':
self.exploration_loss_func = self.symmetric_kl_exploration_loss
else:
raise NotImplementedError(f'{self.cfg.exploration_loss} not supported!')
def start_process(self):
self.process.start()
def _init(self):
log.info('Waiting for the learner to initialize...')
self.train_thread_initialized.wait()
log.info('Learner %d initialized', self.worker_idx)
self.initialized_event.set()
def _terminate(self):
self.terminate = True
def _broadcast_model_weights(self):
state_dict = self.dqn.main.state_dict()
policy_version = self.train_step
log.debug('Broadcast model weights for model version %d', policy_version)
model_state = (policy_version, state_dict)
for q in self.policy_worker_queues:
q.put((TaskType.INIT_MODEL, model_state))
def _mark_rollout_buffer_free(self, rollout):
r = rollout
self.traj_tensors_available[r.worker_idx, r.split_idx][r.env_idx, r.agent_idx, r.traj_buffer_idx] = 1
def _prepare_train_buffer(self, rollouts, macro_batch_size, timing):
trajectories = [AttrDict(r['t']) for r in rollouts]
with timing.add_time('buffers'):
buffer = AttrDict()
# by the end of this loop the buffer is a dictionary containing lists of numpy arrays
for i, t in enumerate(trajectories):
for key, x in t.items():
if key not in buffer:
buffer[key] = []
buffer[key].append(x)
# convert lists of dict observations to a single dictionary of lists
for key, x in buffer.items():
if isinstance(x[0], (dict, OrderedDict)):
buffer[key] = list_of_dicts_to_dict_of_lists(x)
if not self.cfg.with_vtrace:
with timing.add_time('calc_gae'):
buffer = self._calculate_gae(buffer)
with timing.add_time('batching'):
# concatenate rollouts from different workers into a single batch efficiently
# that is, if we already have memory for the buffers allocated, we can just copy the data into
# existing cached tensors instead of creating new ones. This is a performance optimization.
use_pinned_memory = self.cfg.device == 'gpu'
buffer = self.tensor_batcher.cat(buffer, macro_batch_size, use_pinned_memory, timing)
with timing.add_time('buff_ready'):
for r in rollouts:
self._mark_rollout_buffer_free(r)
with timing.add_time('tensors_gpu_float'):
device_buffer = self._copy_train_data_to_device(buffer)
with timing.add_time('squeeze'):
# will squeeze actions only in simple categorical case
tensors_to_squeeze = [
'actions', 'log_prob_actions', 'policy_version', 'values',
'rewards', 'dones', 'rewards_cpu', 'dones_cpu',
]
for tensor_name in tensors_to_squeeze:
device_buffer[tensor_name].squeeze_()
# we no longer need the cached buffer, and can put it back into the pool
self.tensor_batch_pool.put(buffer)
return device_buffer
def _macro_batch_size(self, batch_size):
return self.cfg.num_batches_per_iteration * batch_size
def _process_macro_batch(self, rollouts, batch_size, timing):
macro_batch_size = self._macro_batch_size(batch_size)
assert macro_batch_size % self.cfg.rollout == 0
assert self.cfg.rollout % self.cfg.recurrence == 0
assert macro_batch_size % self.cfg.recurrence == 0
samples = env_steps = 0
for rollout in rollouts:
samples += rollout['length']
env_steps += rollout['env_steps']
with timing.add_time('prepare'):
buffer = self._prepare_train_buffer(rollouts, macro_batch_size, timing)
self.experience_buffer_queue.put((buffer, batch_size, samples, env_steps))
if not self.cfg.benchmark and self.cfg.train_in_background_thread:
# in PyTorch 1.4.0 there is an intense memory spike when the very first batch is being processed
# we wait here until this is over so we can continue queueing more batches onto a GPU without having
# a risk to run out of GPU memory
while self.num_batches_processed < 1:
log.debug('Waiting for the first batch to be processed')
time.sleep(0.5)
def _process_rollouts(self, rollouts, timing):
# batch_size can potentially change through PBT, so we should keep it the same and pass it around
# using function arguments, instead of using global self.cfg
batch_size = self.cfg.batch_size
rollouts_in_macro_batch = self._macro_batch_size(batch_size) // self.cfg.rollout
if len(rollouts) < rollouts_in_macro_batch:
return rollouts
discard_rollouts = 0
policy_version = self.train_step
for r in rollouts:
rollout_min_version = r['t']['policy_version'].min().item()
if policy_version - rollout_min_version >= self.cfg.max_policy_lag:
discard_rollouts += 1
self._mark_rollout_buffer_free(r)
else:
break
if discard_rollouts > 0:
log.warning(
'Discarding %d old rollouts, cut by policy lag threshold %d (learner %d)',
discard_rollouts, self.cfg.max_policy_lag, self.policy_id,
)
rollouts = rollouts[discard_rollouts:]
self.num_discarded_rollouts += discard_rollouts
if len(rollouts) >= rollouts_in_macro_batch:
# process newest rollouts
rollouts_to_process = rollouts[:rollouts_in_macro_batch]
rollouts = rollouts[rollouts_in_macro_batch:]
self._process_macro_batch(rollouts_to_process, batch_size, timing)
# log.info('Unprocessed rollouts: %d (%d samples)', len(rollouts), len(rollouts) * self.cfg.rollout)
return rollouts
def _get_minibatches(self, batch_size, experience_size):
"""Generating minibatches for training."""
assert self.cfg.rollout % self.cfg.recurrence == 0
assert experience_size % batch_size == 0, f'experience size: {experience_size}, batch size: {batch_size}'
if self.cfg.num_batches_per_iteration == 1:
return [None] # single minibatch is actually the entire buffer, we don't need indices
# indices that will start the mini-trajectories from the same episode (for bptt)
indices = np.arange(0, experience_size, self.cfg.recurrence)
indices = np.random.permutation(indices)
# complete indices of mini trajectories, e.g. with recurrence==4: [4, 16] -> [4, 5, 6, 7, 16, 17, 18, 19]
indices = [np.arange(i, i + self.cfg.recurrence) for i in indices]
indices = np.concatenate(indices)
assert len(indices) == experience_size
num_minibatches = experience_size // batch_size
minibatches = np.split(indices, num_minibatches)
return minibatches
@staticmethod
def _get_minibatch(buffer, indices):
if indices is None:
# handle the case of a single batch, where the entire buffer is a minibatch
return buffer
mb = AttrDict()
for item, x in buffer.items():
if isinstance(x, (dict, OrderedDict)):
mb[item] = AttrDict()
for key, x_elem in x.items():
mb[item][key] = x_elem[indices]
else:
mb[item] = x[indices]
return mb
def _should_save_summaries(self):
summaries_every_seconds = self.summary_rate_decay_seconds.at(self.train_step)
if time.time() - self.last_summary_time < summaries_every_seconds:
return False
return True
def _after_optimizer_step(self):
"""A hook to be called after each optimizer step."""
self.train_step += 1
self._maybe_save()
def _maybe_save(self):
if time.time() - self.last_saved_time >= self.cfg.save_every_sec or self.should_save_model:
self._save()
self.model_saved_event.set()
self.should_save_model = False
self.last_saved_time = time.time()
@staticmethod
def checkpoint_dir(cfg, policy_id):
checkpoint_dir = join(experiment_dir(cfg=cfg), f'checkpoint_p{policy_id}')
return ensure_dir_exists(checkpoint_dir)
@staticmethod
def get_checkpoints(checkpoints_dir):
checkpoints = glob.glob(join(checkpoints_dir, 'checkpoint_*'))
return sorted(checkpoints)
def _get_checkpoint_dict(self):
checkpoint = {
'train_step': self.train_step,
'env_steps': self.env_steps,
'model': self.dqn.main.state_dict(),
'optimizer': self.optimizer.state_dict(),
}
return checkpoint
def _save(self):
checkpoint = self._get_checkpoint_dict()
assert checkpoint is not None
checkpoint_dir = self.checkpoint_dir(self.cfg, self.policy_id)
tmp_filepath = join(checkpoint_dir, '.temp_checkpoint')
checkpoint_name = f'checkpoint_{self.train_step:09d}_{self.env_steps}.pth'
filepath = join(checkpoint_dir, checkpoint_name)
log.info('Saving %s...', tmp_filepath)
torch.save(checkpoint, tmp_filepath)
log.info('Renaming %s to %s', tmp_filepath, filepath)
os.rename(tmp_filepath, filepath)
while len(self.get_checkpoints(checkpoint_dir)) > self.cfg.keep_checkpoints:
oldest_checkpoint = self.get_checkpoints(checkpoint_dir)[0]
if os.path.isfile(oldest_checkpoint):
log.debug('Removing %s', oldest_checkpoint)
os.remove(oldest_checkpoint)
if self.cfg.save_milestones_sec > 0:
# milestones enabled
if time.time() - self.last_milestone_time >= self.cfg.save_milestones_sec:
milestones_dir = ensure_dir_exists(join(checkpoint_dir, 'milestones'))
milestone_path = join(milestones_dir, f'{checkpoint_name}.milestone')
log.debug('Saving a milestone %s', milestone_path)
shutil.copy(filepath, milestone_path)
self.last_milestone_time = time.time()
def _prepare_observations(self, obs_tensors, gpu_buffer_obs):
for d, gpu_d, k, v, _ in iter_dicts_recursively(obs_tensors, gpu_buffer_obs):
device, dtype = self.actor_critic.device_and_type_for_input_tensor(k) #TODO
tensor = v.detach().to(device, copy=True).type(dtype)
gpu_d[k] = tensor
def _copy_train_data_to_device(self, buffer):
device_buffer = copy_dict_structure(buffer)
for key, item in buffer.items():
if key == 'obs':
self._prepare_observations(item, device_buffer['obs'])
else:
device_tensor = item.detach().to(self.device, copy=True, non_blocking=True)
device_buffer[key] = device_tensor.float()
device_buffer['dones_cpu'] = buffer.dones.to('cpu', copy=True, non_blocking=True).float()
device_buffer['rewards_cpu'] = buffer.rewards.to('cpu', copy=True, non_blocking=True).float()
return device_buffer
def _train(self, gpu_buffer, batch_size, experience_size, timing):
with torch.no_grad():
early_stopping_tolerance = 1e-6
early_stop = False
prev_epoch_actor_loss = 1e9
epoch_actor_losses = []
# V-trace parameters
# noinspection PyArgumentList
rho_hat = torch.Tensor([self.cfg.vtrace_rho])
# noinspection PyArgumentList
c_hat = torch.Tensor([self.cfg.vtrace_c])
clip_ratio_high = 1.0 + self.cfg.ppo_clip_ratio # e.g. 1.1
# this still works with e.g. clip_ratio = 2, while PPO's 1-r would give negative ratio
clip_ratio_low = 1.0 / clip_ratio_high
clip_value = self.cfg.ppo_clip_value
gamma = self.cfg.gamma
recurrence = self.cfg.recurrence
if self.cfg.with_vtrace:
assert recurrence == self.cfg.rollout and recurrence > 1, \
'V-trace requires to recurrence and rollout to be equal'
num_sgd_steps = 0
stats_and_summaries = None
if not self.with_training:
return stats_and_summaries
for epoch in range(self.cfg.ppo_epochs):
with timing.add_time('epoch_init'):
if early_stop or self.terminate:
break
summary_this_epoch = force_summaries = False
minibatches = self._get_minibatches(batch_size, experience_size)
for batch_num in range(len(minibatches)):
with timing.add_time('minibatch_init'):
indices = minibatches[batch_num]
# current minibatch consisting of short trajectory segments with length == recurrence
mb = self._get_minibatch(gpu_buffer, indices)
# calculate policy head outside of recurrent loop
with timing.add_time('forward_head'):
head_outputs = self.actor_critic.forward_head(mb.obs)
# initial rnn states
with timing.add_time('bptt_initial'):
if self.cfg.use_rnn:
head_output_seq, rnn_states, inverted_select_inds = build_rnn_inputs(
head_outputs, mb.dones_cpu, mb.rnn_states, recurrence,
)
else:
rnn_states = mb.rnn_states[::recurrence]
# calculate RNN outputs for each timestep in a loop
with timing.add_time('bptt'):
if self.cfg.use_rnn:
with timing.add_time('bptt_forward_core'):
core_output_seq, _ = self.actor_critic.forward_core(head_output_seq, rnn_states)
core_outputs = build_core_out_from_seq(core_output_seq, inverted_select_inds)
else:
core_outputs, _ = self.actor_critic.forward_core(head_outputs, rnn_states)
with timing.add_time('tail'):
assert core_outputs.shape[0] == head_outputs.shape[0]
# calculate policy tail outside of recurrent loop
result = self.actor_critic.forward_tail(core_outputs, with_action_distribution=True)
action_distribution = result.action_distribution
log_prob_actions = action_distribution.log_prob(mb.actions)
ratio = torch.exp(log_prob_actions - mb.log_prob_actions) # pi / pi_old
# super large/small values can cause numerical problems and are probably noise anyway
ratio = torch.clamp(ratio, 0.05, 20.0)
values = result.values.squeeze()
num_trajectories = head_outputs.size(0) // recurrence
with torch.no_grad(): # these computations are not the part of the computation graph
if self.cfg.with_vtrace:
ratios_cpu = ratio.cpu()
values_cpu = values.cpu()
rewards_cpu = mb.rewards_cpu
dones_cpu = mb.dones_cpu
vtrace_rho = torch.min(rho_hat, ratios_cpu)
vtrace_c = torch.min(c_hat, ratios_cpu)
vs = torch.zeros((num_trajectories * recurrence))
adv = torch.zeros((num_trajectories * recurrence))
next_values = (values_cpu[recurrence - 1::recurrence] - rewards_cpu[recurrence - 1::recurrence]) / gamma
next_vs = next_values
with timing.add_time('vtrace'):
for i in reversed(range(self.cfg.recurrence)):
rewards = rewards_cpu[i::recurrence]
dones = dones_cpu[i::recurrence]
not_done = 1.0 - dones
not_done_times_gamma = not_done * gamma
curr_values = values_cpu[i::recurrence]
curr_vtrace_rho = vtrace_rho[i::recurrence]
curr_vtrace_c = vtrace_c[i::recurrence]
delta_s = curr_vtrace_rho * (rewards + not_done_times_gamma * next_values - curr_values)
adv[i::recurrence] = curr_vtrace_rho * (rewards + not_done_times_gamma * next_vs - curr_values)
next_vs = curr_values + delta_s + not_done_times_gamma * curr_vtrace_c * (next_vs - next_values)
vs[i::recurrence] = next_vs
next_values = curr_values
targets = vs
else:
# using regular GAE
adv = mb.advantages
targets = mb.returns
adv_mean = adv.mean()
adv_std = adv.std()
adv = (adv - adv_mean) / max(1e-3, adv_std) # normalize advantage
adv = adv.to(self.device)
with timing.add_time('losses'):
policy_loss = self._policy_loss(ratio, adv, clip_ratio_low, clip_ratio_high)
exploration_loss = self.exploration_loss_func(action_distribution)
actor_loss = policy_loss + exploration_loss
epoch_actor_losses.append(actor_loss.item())
targets = targets.to(self.device)
old_values = mb.values
value_loss = self._value_loss(values, old_values, targets, clip_value)
critic_loss = value_loss
loss = actor_loss + critic_loss
high_loss = 30.0
if abs(to_scalar(policy_loss)) > high_loss or abs(to_scalar(value_loss)) > high_loss or abs(to_scalar(exploration_loss)) > high_loss:
log.warning(
'High loss value: %.4f %.4f %.4f %.4f (recommended to adjust the --reward_scale parameter)',
to_scalar(loss), to_scalar(policy_loss), to_scalar(value_loss), to_scalar(exploration_loss),
)
force_summaries = True
with timing.add_time('update'):
# update the weights
self.optimizer.zero_grad()
loss.backward()
if self.cfg.max_grad_norm > 0.0:
with timing.add_time('clip'):
torch.nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.cfg.max_grad_norm)
curr_policy_version = self.train_step # policy version before the weight update
with self.policy_lock:
self.optimizer.step()
num_sgd_steps += 1
with torch.no_grad():
with timing.add_time('after_optimizer'):
self._after_optimizer_step()
# collect and report summaries
with_summaries = self._should_save_summaries() or force_summaries
if with_summaries and not summary_this_epoch:
stats_and_summaries = self._record_summaries(AttrDict(locals()))
summary_this_epoch = True
force_summaries = False
# end of an epoch
# this will force policy update on the inference worker (policy worker)
self.policy_versions[self.policy_id] = self.train_step
new_epoch_actor_loss = np.mean(epoch_actor_losses)
loss_delta_abs = abs(prev_epoch_actor_loss - new_epoch_actor_loss)
if loss_delta_abs < early_stopping_tolerance:
early_stop = True
log.debug(
'Early stopping after %d epochs (%d sgd steps), loss delta %.7f',
epoch + 1, num_sgd_steps, loss_delta_abs,
)
break
prev_epoch_actor_loss = new_epoch_actor_loss
epoch_actor_losses = []
return stats_and_summaries
def _record_summaries(self, train_loop_vars):
var = train_loop_vars
self.last_summary_time = time.time()
stats = AttrDict()
grad_norm = sum(
p.grad.data.norm(2).item() ** 2
for p in self.actor_critic.parameters()
if p.grad is not None
) ** 0.5
stats.grad_norm = grad_norm
stats.loss = var.loss
stats.value = var.result.values.mean()
stats.entropy = var.action_distribution.entropy().mean()
stats.policy_loss = var.policy_loss
stats.value_loss = var.value_loss
stats.exploration_loss = var.exploration_loss
stats.adv_min = var.adv.min()
stats.adv_max = var.adv.max()
stats.adv_std = var.adv_std
stats.max_abs_logprob = torch.abs(var.mb.action_logits).max()
if hasattr(var.action_distribution, 'summaries'):
stats.update(var.action_distribution.summaries())
if var.epoch == self.cfg.ppo_epochs - 1 and var.batch_num == len(var.minibatches) - 1:
# we collect these stats only for the last PPO batch, or every time if we're only doing one batch, IMPALA-style
ratio_mean = torch.abs(1.0 - var.ratio).mean().detach()
ratio_min = var.ratio.min().detach()
ratio_max = var.ratio.max().detach()
# log.debug('Learner %d ratio mean min max %.4f %.4f %.4f', self.policy_id, ratio_mean.cpu().item(), ratio_min.cpu().item(), ratio_max.cpu().item())
value_delta = torch.abs(var.values - var.old_values)
value_delta_avg, value_delta_max = value_delta.mean(), value_delta.max()
# calculate KL-divergence with the behaviour policy action distribution
old_action_distribution = get_action_distribution(
self.actor_critic.action_space, var.mb.action_logits,
)
kl_old = var.action_distribution.kl_divergence(old_action_distribution)
kl_old_mean = kl_old.mean()
stats.kl_divergence = kl_old_mean
stats.value_delta = value_delta_avg
stats.value_delta_max = value_delta_max
stats.fraction_clipped = ((var.ratio < var.clip_ratio_low).float() + (var.ratio > var.clip_ratio_high).float()).mean()
stats.ratio_mean = ratio_mean
stats.ratio_min = ratio_min
stats.ratio_max = ratio_max
stats.num_sgd_steps = var.num_sgd_steps
# this caused numerical issues on some versions of PyTorch with second moment reaching infinity
adam_max_second_moment = 0.0
for key, tensor_state in self.optimizer.state.items():
adam_max_second_moment = max(tensor_state['exp_avg_sq'].max().item(), adam_max_second_moment)
stats.adam_max_second_moment = adam_max_second_moment
version_diff = var.curr_policy_version - var.mb.policy_version
stats.version_diff_avg = version_diff.mean()
stats.version_diff_min = version_diff.min()
stats.version_diff_max = version_diff.max()
for key, value in stats.items():
stats[key] = to_scalar(value)
return stats
# def _update_pbt(self):
# """To be called from the training loop, same thread that updates the model!"""
# with self.pbt_mutex:
# if self.load_policy_id is not None:
# assert self.cfg.with_pbt
# log.debug('Learner %d loads policy from %d', self.policy_id, self.load_policy_id)
# self.load_from_checkpoint(self.load_policy_id)
# self.load_policy_id = None
# if self.new_cfg is not None:
# for key, value in self.new_cfg.items():
# if self.cfg[key] != value:
# log.debug('Learner %d replacing cfg parameter %r with new value %r', self.policy_id, key, value)
# self.cfg[key] = value
# for param_group in self.optimizer.param_groups:
# param_group['lr'] = self.cfg.learning_rate
# param_group['betas'] = (self.cfg.adam_beta1, self.cfg.adam_beta2)
# log.debug('Updated optimizer lr to value %.7f, betas: %r', param_group['lr'], param_group['betas'])
# self.new_cfg = None
@staticmethod
def load_checkpoint(checkpoints, device):
if len(checkpoints) <= 0:
log.warning('No checkpoints found')
return None
else:
latest_checkpoint = checkpoints[-1]
# extra safety mechanism to recover from spurious filesystem errors
num_attempts = 3
for attempt in range(num_attempts):
try:
log.warning('Loading state from checkpoint %s...', latest_checkpoint)
checkpoint_dict = torch.load(latest_checkpoint, map_location=device)
return checkpoint_dict
except Exception:
log.exception(f'Could not load from checkpoint, attempt {attempt}')
def _load_state(self, checkpoint_dict, load_progress=True):
if load_progress:
self.train_step = checkpoint_dict['train_step']
self.env_steps = checkpoint_dict['env_steps']
self.actor_critic.load_state_dict(checkpoint_dict['model'])
self.optimizer.load_state_dict(checkpoint_dict['optimizer'])
log.info('Loaded experiment state at training iteration %d, env step %d', self.train_step, self.env_steps)
def init_model(self, timing):
self.actor_critic = create_actor_critic(self.cfg, self.obs_space, self.action_space, timing)
self.actor_critic.model_to_device(self.device)
self.actor_critic.share_memory()
def load_from_checkpoint(self, policy_id):
checkpoints = self.get_checkpoints(self.checkpoint_dir(self.cfg, policy_id))
checkpoint_dict = self.load_checkpoint(checkpoints, self.device)
if checkpoint_dict is None:
log.debug('Did not load from checkpoint, starting from scratch!')
else:
log.debug('Loading model from checkpoint')
# if we're replacing our policy with another policy (under PBT), let's not reload the env_steps
load_progress = policy_id == self.policy_id
self._load_state(checkpoint_dict, load_progress=load_progress)
def initialize(self, timing):
with timing.timeit('init'):
# initialize the Torch modules
if self.cfg.seed is None:
log.info('Starting seed is not provided')
else:
log.info('Setting fixed seed %d', self.cfg.seed)
torch.manual_seed(self.cfg.seed)
np.random.seed(self.cfg.seed)
# this does not help with a single experiment
# but seems to do better when we're running more than one experiment in parallel
torch.set_num_threads(1)
if self.cfg.device == 'gpu':
torch.backends.cudnn.benchmark = True
# we should already see only one CUDA device, because of env vars
assert torch.cuda.device_count() == 1
self.device = torch.device('cuda', index=0)
else:
self.device = torch.device('cpu')
self.init_model(timing)
self.optimizer = torch.optim.Adam(
self.actor_critic.parameters(),
self.cfg.learning_rate,
betas=(self.cfg.adam_beta1, self.cfg.adam_beta2),
eps=self.cfg.adam_eps,
)
self.load_from_checkpoint(self.policy_id)
self._broadcast_model_weights() # sync the very first version of the weights
self.train_thread_initialized.set()
def _process_training_data(self, data, timing, wait_stats=None):
self.is_training = True
buffer, batch_size, samples, env_steps = data
assert samples == batch_size * self.cfg.num_batches_per_iteration
self.env_steps += env_steps
experience_size = buffer.rewards.shape[0]
stats = dict(learner_env_steps=self.env_steps, policy_id=self.policy_id)
with timing.add_time('train'):
discarding_rate = self._discarding_rate()
self._update_pbt()
train_stats = self._train(buffer, batch_size, experience_size, timing)
if train_stats is not None:
stats['train'] = train_stats
if wait_stats is not None:
wait_avg, wait_min, wait_max = wait_stats
stats['train']['wait_avg'] = wait_avg
stats['train']['wait_min'] = wait_min
stats['train']['wait_max'] = wait_max
stats['train']['discarded_rollouts'] = self.num_discarded_rollouts
stats['train']['discarding_rate'] = discarding_rate
stats['stats'] = memory_stats('learner', self.device)
self.is_training = False
try:
self.report_queue.put(stats)
except Full:
log.warning('Could not report training stats, the report queue is full!')
def _train_loop(self):
timing = Timing()
self.initialize(timing)
wait_times = deque([], maxlen=self.cfg.num_workers)
last_cache_cleanup = time.time()
while not self.terminate:
with timing.timeit('train_wait'):
data = safe_get(self.experience_buffer_queue)
if self.terminate:
break
wait_stats = None
wait_times.append(timing.train_wait)
if len(wait_times) >= wait_times.maxlen:
wait_times_arr = np.asarray(wait_times)
wait_avg = np.mean(wait_times_arr)
wait_min, wait_max = wait_times_arr.min(), wait_times_arr.max()
# log.debug(
# 'Training thread had to wait %.5f s for the new experience buffer (avg %.5f)',
# timing.train_wait, wait_avg,
# )
wait_stats = (wait_avg, wait_min, wait_max)
self._process_training_data(data, timing, wait_stats)
self.num_batches_processed += 1
if time.time() - last_cache_cleanup > 300.0 or (not self.cfg.benchmark and self.num_batches_processed < 50):
if self.cfg.device == 'gpu':
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
last_cache_cleanup = time.time()
time.sleep(0.3)
log.info('Train loop timing: %s', timing)
del self.actor_critic
del self.device
def _experience_collection_rate_stats(self):
now = time.time()
if now - self.discarded_experience_timer > 1.0:
self.discarded_experience_timer = now
self.discarded_experience_over_time.append((now, self.num_discarded_rollouts))
def _discarding_rate(self):
if len(self.discarded_experience_over_time) <= 1:
return 0
first, last = self.discarded_experience_over_time[0], self.discarded_experience_over_time[-1]
delta_rollouts = last[1] - first[1]
delta_time = last[0] - first[0]
discarding_rate = delta_rollouts / (delta_time + EPS)
return discarding_rate
def _extract_rollouts(self, data):
data = AttrDict(data)
worker_idx, split_idx, traj_buffer_idx = data.worker_idx, data.split_idx, data.traj_buffer_idx
rollouts = []
for rollout_data in data.rollouts:
env_idx, agent_idx = rollout_data['env_idx'], rollout_data['agent_idx']
tensors = self.rollout_tensors.index((worker_idx, split_idx, env_idx, agent_idx, traj_buffer_idx))
rollout_data['t'] = tensors
rollout_data['worker_idx'] = worker_idx
rollout_data['split_idx'] = split_idx
rollout_data['traj_buffer_idx'] = traj_buffer_idx
rollouts.append(AttrDict(rollout_data))
return rollouts
# def _process_pbt_task(self, pbt_task):
# task_type, data = pbt_task
# with self.pbt_mutex:
# if task_type == PbtTask.SAVE_MODEL:
# policy_id = data
# assert policy_id == self.policy_id
# self.should_save_model = True
# elif task_type == PbtTask.LOAD_MODEL:
# policy_id, new_policy_id = data
# assert policy_id == self.policy_id
# assert new_policy_id is not None
# self.load_policy_id = new_policy_id
# elif task_type == PbtTask.UPDATE_CFG:
# policy_id, new_cfg = data
# assert policy_id == self.policy_id
# self.new_cfg = new_cfg
def _accumulated_too_much_experience(self, rollouts):
max_minibatches_to_accumulate = self.cfg.num_minibatches_to_accumulate
if max_minibatches_to_accumulate == -1:
# default value
max_minibatches_to_accumulate = 2 * self.cfg.num_batches_per_iteration
# allow the max batches to accumulate, plus the minibatches we're currently training on
max_minibatches_on_learner = max_minibatches_to_accumulate + self.cfg.num_batches_per_iteration
minibatches_currently_training = int(self.is_training) * self.cfg.num_batches_per_iteration
rollouts_per_minibatch = self.cfg.batch_size / self.cfg.rollout
# count contribution from unprocessed rollouts
minibatches_currently_accumulated = len(rollouts) / rollouts_per_minibatch
# count minibatches ready for training
minibatches_currently_accumulated += self.experience_buffer_queue.qsize() * self.cfg.num_batches_per_iteration
total_minibatches_on_learner = minibatches_currently_training + minibatches_currently_accumulated
return total_minibatches_on_learner >= max_minibatches_on_learner
def _run(self):
# workers should ignore Ctrl+C because the termination is handled in the event loop by a special msg
signal.signal(signal.SIGINT, signal.SIG_IGN)
try:
psutil.Process().nice(self.cfg.default_niceness)
except psutil.AccessDenied:
log.error('Low niceness requires sudo!')
if self.cfg.device == 'gpu':
cuda_envvars_for_policy(self.policy_id, 'learner')
torch.multiprocessing.set_sharing_strategy('file_system')
torch.set_num_threads(self.cfg.learner_main_loop_num_cores)
timing = Timing()
rollouts = []
if self.train_in_background:
self.training_thread.start()
else:
self.initialize(timing)
log.error(
'train_in_background set to False on learner %d! This is slow, use only for testing!', self.policy_id,
)
while not self.terminate:
while True:
try:
tasks = self.task_queue.get_many(timeout=0.005)
for task_type, data in tasks:
if task_type == TaskType.TRAIN:
with timing.add_time('extract'):
rollouts.extend(self._extract_rollouts(data))
# log.debug('Learner %d has %d rollouts', self.policy_id, len(rollouts))
elif task_type == TaskType.INIT:
self._init()
elif task_type == TaskType.TERMINATE:
time.sleep(0.3)
log.info('GPU learner timing: %s', timing)
self._terminate()
break
elif task_type == TaskType.PBT:
self._process_pbt_task(data)
except Empty:
break
if self._accumulated_too_much_experience(rollouts):
# if we accumulated too much experience, signal the policy workers to stop experience collection
if not self.stop_experience_collection[self.policy_id]:
self.stop_experience_collection_num_msgs += 1
# TODO: add a logger function for this
if self.stop_experience_collection_num_msgs >= 50:
log.info(
'Learner %d accumulated too much experience, stop experience collection! '
'Learner is likely a bottleneck in your experiment (%d times)',
self.policy_id, self.stop_experience_collection_num_msgs,
)
self.stop_experience_collection_num_msgs = 0
self.stop_experience_collection[self.policy_id] = True
elif self.stop_experience_collection[self.policy_id]:
# otherwise, resume the experience collection if it was stopped
self.stop_experience_collection[self.policy_id] = False
with self.resume_experience_collection_cv:
self.resume_experience_collection_num_msgs += 1
if self.resume_experience_collection_num_msgs >= 50:
log.debug('Learner %d is resuming experience collection!', self.policy_id)
self.resume_experience_collection_num_msgs = 0
self.resume_experience_collection_cv.notify_all()
with torch.no_grad():
rollouts = self._process_rollouts(rollouts, timing)
if not self.train_in_background:
while not self.experience_buffer_queue.empty():
training_data = self.experience_buffer_queue.get()
self._process_training_data(training_data, timing)
self._experience_collection_rate_stats()
if self.train_in_background:
self.experience_buffer_queue.put(None)
self.training_thread.join()
def init(self):
self.task_queue.put((TaskType.INIT, None))
self.initialized_event.wait()
def save_model(self, timeout=None):
self.model_saved_event.clear()
save_task = (PbtTask.SAVE_MODEL, self.policy_id)
self.task_queue.put((TaskType.PBT, save_task))
log.debug('Wait while learner %d saves the model...', self.policy_id)
if self.model_saved_event.wait(timeout=timeout):
log.debug('Learner %d saved the model!', self.policy_id)
else:
log.warning('Model saving request timed out!')
self.model_saved_event.clear()
def close(self):
self.task_queue.put((TaskType.TERMINATE, None))
def join(self):
join_or_kill(self.process)
|
bkup_runOnLinux.py
|
#!/usr/bin/env python
"""This script is used by ../../runOnLinux
- THIS IS A BACKUP FILE FOR THE manual sput and sput -b
This is not used anymore
"""
import threading
import warnings
import re
from test_gfe_unittest import *
class BaseGfeForLinux(BaseGfeTest):
def read_uart_out_until_stop (self,run_event,stop_event):
while (not stop_event.is_set()):
run_event.wait()
pending = self.gfe.uart_session.in_waiting
if pending:
data = self.gfe.uart_session.read(pending)
sys.stdout.write(data)
time.sleep(1)
return
def flush_uart_out (self,timeout=1):
while (True):
time.sleep (timeout)
pending = self.gfe.uart_session.in_waiting
if (not pending):
return
dump = self.gfe.uart_session.read(pending)
def interpreter_sput (self, sourceFilePath, destFilePath, isBinary=False):
###check sourceFileExist
sourceFilePath = os.path.expanduser(sourceFilePath)
if (not os.path.isfile(sourceFilePath)):
warnings.warn("%s: Cannot open or file does not exist. Press Enter to continue..." % (sourceFilePath), SyntaxWarning)
return
###Check destFileExist + delete and create file
self.gfe.uart_session.write('touch ' + destFilePath + '\r')
try:
self.check_uart_out(5,expected_contents=[], absent_contents="No such file or directory")
except:
warnings.warn("%s: Cannot open or file does not exist. Press Enter to continue..." % (destFilePath), SyntaxWarning)
return
self.gfe.uart_session.write('rm ' + destFilePath + '\r')
self.gfe.uart_session.write('touch ' + destFilePath + '\r')
fileToPut = sourceFilePath
fileFromPut = destFilePath
beginTime = time.time()
if (isBinary):
try:
subprocess.call("busybox uuencode -m {0} < {0} >{0}.enc.orig".format(sourceFilePath), shell=True)
#The encoded file has the wrong path which would cause issues when decoded. Also, using sed in-place will
#truncate the file because of flushing limits (binary file has a very very long single line)
#.enc.orig has the wrong path
subprocess.call ("sed \'s:{0}:{1}:w {0}.enc\' {0}.enc.orig > dump; rm dump".format(sourceFilePath, destFilePath) ,shell=True)
#.enc has the correct first line
subprocess.call ("sed -i 1d {0}.enc.orig".format(sourceFilePath) ,shell=True)
#.enc.orig now has the rest without the first line
subprocess.call ("cat {0}.enc.orig >> {0}.enc".format(sourceFilePath) ,shell=True)
#.enc now is ready. Should delete .enc.orig
subprocess.call ("rm {0}.enc.orig".format(sourceFilePath) ,shell=True)
print ("\n%s: Encoding successful. Now putting..." % (sourceFilePath))
except:
warnings.warn("%s: Failed to encode." % (sourceFilePath), RuntimeWarning)
return
fileToPut = sourceFilePath + '.enc'
fileFromPut = destFilePath + '.enc'
#Read source file
try:
time.sleep(0.1)
inFile = open(fileToPut, "r")
lines = inFile.readlines()
inFile.close()
except:
warnings.warn("%s: Cannot open or file does not exist. Press Enter to continue..." % (fileToPut), SyntaxWarning)
return
####MACROS
numLinesPerPacket = 50
waitBetweenLines = 0.1
flushTimeout = 0.2
#####
numPackets = ((len(lines)-1) // numLinesPerPacket ) + 1 #ceil division
print ("\n%s will be divided into %d packets."%(sourceFilePath,numPackets))
echoPrefix = "echo -n -e \""
for iPacket in range(numPackets):
iFileFromPut = fileFromPut + ".{0}".format(iPacket)
echoSuffix = "\" >> " + iFileFromPut
maxLimit = 255-len(echoPrefix)-len(echoSuffix)
iLinesLimit = min((iPacket+1)*numLinesPerPacket,len(lines))
for line in lines[iPacket*numLinesPerPacket:iLinesLimit]:
numChunks = ((len(line)-1) // maxLimit) + 1 #ceil division
for iChunk in range(0,len(line),maxLimit):
iLimit = min(iChunk+maxLimit, len(line))
self.gfe.uart_session.write(echoPrefix + line[iChunk:iLimit] + echoSuffix + '\r')
time.sleep(waitBetweenLines)
self.flush_uart_out(flushTimeout)
print (">>> Packet %d/%d done."%(iPacket+1,numPackets))
print ("\nPutting complete. Now combining the packets...")
for iPacket in range(numPackets):
self.gfe.uart_session.write( "cat {0}.{1} >> {0}".format(fileFromPut,iPacket) + '\r')
time.sleep(flushTimeout)
self.gfe.uart_session.write( "rm {0}.{1}".format(fileFromPut,iPacket) + '\r')
self.flush_uart_out(flushTimeout)
if (isBinary):
print ("\nCombining complete. Now decoding...")
subprocess.call("rm " + fileToPut ,shell=True)
self.gfe.uart_session.write('uudecode <' + fileFromPut + '\r')
time.sleep (1)
self.gfe.uart_session.write('rm ' + fileFromPut + '\r')
time.sleep (1)
print ("\nDecoding successful.")
else:
print ("\nCombining complete.")
print ("\n Transmission speed was %d bytes in %d seconds." %(os.path.getsize(sourceFilePath), time.time()-beginTime))
return
def interactive_terminal (self):
self.gfe.uart_session.write('stty -echo\r')
print ("\nStarting interactive terminal...")
stopReading = threading.Event() #event to stop the reading process in the end
runReading = threading.Event() #event to run/pause the reading process
readThread = threading.Thread(target=self.read_uart_out_until_stop, args=(runReading,stopReading))
stopReading.clear()
runReading.set()
readThread.start() #start the reading
warnings.simplefilter ("always")
formatwarning_orig = warnings.formatwarning
warnings.formatwarning = lambda message, category, filename, lineno, line=None: \
formatwarning_orig(message, category, filename, lineno, line='')
exitTerminal = False
while (not exitTerminal):
instruction = raw_input ("")
if (len(instruction)>2 and instruction[0:2]=='--'): #instruction to the interpreter
if (instruction[2:6] == 'exit'): #exit terminal and end test
exitTerminal = True
elif (instruction[2:6] == 'sput'): #copy a file from local to linux
sputbMatch = re.match(r'--sput -b (?P<sourceFilePath>[\w/.~-]+) (?P<destFilePath>[\w/.~-]+)\s*',instruction)
sputMatch = re.match(r'--sput (?P<sourceFilePath>[\w/.~-]+) (?P<destFilePath>[\w/.~-]+)\s*',instruction)
if (sputbMatch != None):
runReading.clear() #pause reading
self.interpreter_sput(sputbMatch.group('sourceFilePath'), sputbMatch.group('destFilePath'), isBinary=True)
runReading.set()
elif (sputMatch != None):
runReading.clear() #pause reading
self.interpreter_sput(sputMatch.group('sourceFilePath'), sputMatch.group('destFilePath'))
runReading.set()
else:
warnings.warn("Please use \"--sput [-b] sourceFilePath destFilePath\". Press Enter to continue...", SyntaxWarning)
else:
warnings.warn("Interpreter command not found. Press Enter to continue...", SyntaxWarning)
else:
self.gfe.uart_session.write(instruction + '\r')
time.sleep(1)
stopReading.set()
return
class RunOnLinux (TestLinux, BaseGfeForLinux):
def test_busybox_terminal (self):
# Boot busybox
self.boot_linux()
linux_boot_timeout=60
print("Running elf with a timeout of {}s".format(linux_boot_timeout))
# Check that busybox reached activation screen
self.check_uart_out(
timeout=linux_boot_timeout,
expected_contents=["Please press Enter to activate this console"])
# Send "Enter" to activate console
self.gfe.uart_session.write(b'\r')
time.sleep(1)
#start interactive terminal
self.interactive_terminal()
time.sleep(2)
return
def test_debian_terminal(self):
# Boot Debian
self.boot_linux()
linux_boot_timeout=800
print("Running elf with a timeout of {}s".format(linux_boot_timeout))
# Check that Debian booted
self.check_uart_out(
timeout=linux_boot_timeout,
expected_contents=[ "Debian GNU/Linux 10",
"login:"
])
# Login to Debian
self.gfe.uart_session.write(b'root\r')
# Check for password prompt and enter password
self.check_uart_out(timeout=5, expected_contents=["Password"])
self.gfe.uart_session.write(b'riscv\r')
# Check for command line prompt
self.check_uart_out(
timeout=15,
expected_contents=["The programs included with the Debian GNU/Linux system are free software;",
":~#"
])
time.sleep(1)
self.interactive_terminal()
time.sleep(2)
return
if __name__ == '__main__':
unittest.main()
|
upnp.py
|
import logging
import threading
from queue import Queue
from typing import Optional
try:
import miniupnpc
except ImportError:
pass
log = logging.getLogger(__name__)
class UPnP:
thread: Optional[threading.Thread] = None
queue: Queue = Queue()
def __init__(self):
def run():
try:
self.upnp = miniupnpc.UPnP()
self.upnp.discoverdelay = 30
self.upnp.discover()
self.upnp.selectigd()
keep_going = True
while keep_going:
msg = self.queue.get()
if msg[0] == "remap":
port = msg[1]
log.info(f"Attempting to enable UPnP (open up port {port})")
try:
self.upnp.deleteportmapping(port, "TCP")
except Exception as e:
log.info(f"Removal of previous portmapping failed. This does not indicate an error: {e}")
self.upnp.addportmapping(port, "TCP", self.upnp.lanaddr, port, "chinilla", "")
log.info(
f"Port {port} opened with UPnP. lanaddr {self.upnp.lanaddr} "
f"external: {self.upnp.externalipaddress()}"
)
elif msg[0] == "release":
port = msg[1]
log.info(f"UPnP, releasing port {port}")
self.upnp.deleteportmapping(port, "TCP")
log.info(f"UPnP, Port {port} closed")
elif msg[0] == "shutdown":
keep_going = False
except Exception as e:
log.info(
"UPnP failed. This is not required to run chinilla, it allows incoming connections from other "
"peers."
)
log.info(e)
self.thread = threading.Thread(target=run)
self.thread.start()
def remap(self, port):
self.queue.put(("remap", port))
def release(self, port):
self.queue.put(("release", port))
def shutdown(self):
if not self.thread:
return
self.queue.put(("shutdown",))
log.info("UPnP, shutting down thread")
self.thread.join(5)
self.thread = None
# this is here just in case the UPnP object is destroyed non-gracefully,
# e.g. via an exception before the main thread can call shutdown()
def __del__(self):
self.shutdown()
|
ex6.py
|
import os
import threading,time
l=[]
i=0
while i<3:
x=input("enter the ip address=")
l.append(x)
i=i+1
def ping(ip):
os.system("ping "+ip)
i=0
while i<len(l):
# if threading.activecount()<len(l):
t=threading.Thread(target=ping,args=(l[i],))
t.start()
i=i+1
time.sleep(20)
|
zz_reload_default_package.py
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import sublime
import sublime_plugin
import os
import sys
import shutil
import json
import time
import filecmp
import hashlib
import textwrap
import traceback
import threading
from collections import OrderedDict
skip_packing = False
_lock = threading.Lock()
try:
from PackagesManager.package_control.package_manager import PackageManager
from PackagesManager.package_control.package_disabler_iterator import IgnoredPackagesBugFixer
except ImportError:
skip_packing = True
PACKAGE_ROOT_DIRECTORY = os.path.dirname( os.path.dirname( os.path.realpath( __file__ ) ) )
SETTINGS_PACKAGE_NAME = '0_settings_loader'
SETTINGS_PACKAGE_DIRECTORY = os.path.join( PACKAGE_ROOT_DIRECTORY, SETTINGS_PACKAGE_NAME )
README_PACKAGE_FILE = os.path.join( SETTINGS_PACKAGE_DIRECTORY, 'README.md' )
DEFAULT_PACKAGE_DIRECTORY = os.path.join( PACKAGE_ROOT_DIRECTORY, 'Default' )
g_settings_files = \
[
"Default (Linux).sublime-mousemap.hide",
"Default (Linux).sublime-keymap.hide",
"Default (OSX).sublime-keymap.hide",
"Default (OSX).sublime-mousemap.hide",
"Default (Windows).sublime-mousemap.hide",
"Default (Windows).sublime-keymap.hide",
"Preferences (Linux).sublime-settings.hide",
"Preferences (OSX).sublime-settings.hide",
"Preferences (Windows).sublime-settings.hide",
"Preferences.sublime-settings.hide",
]
def compare_text_with_file(input_text, file):
"""
Return `True` when the provided text and the `file` contents are equal.
"""
if os.path.exists( file ):
with open( file, "r", encoding='utf-8' ) as file:
text = file.read()
return input_text == text
def create_reloader():
reloader_code = \
r"""
import os
import stat
import shutil
from sublime_plugin import reload_plugin
'''
Reload overridden `Default.sublime-package` files because by default, Sublime Text on start up does
not reload the overridden `Default` packages modules on `Packages/Default`.
'''
VERSION = '1.0.0'
CURRENT_FILE_NAME = os.path.basename( __file__ )
DEFAULT_PACKAGE_NAME = 'Default'
THIS_PACKAGE_ROOT = os.path.dirname( os.path.realpath( __file__ ) )
PACKAGE_ROOT_DIRECTORY = os.path.dirname( os.path.dirname( os.path.realpath( __file__ ) ) )
DEFAULT_PACKAGE_DIRECTORY = os.path.join( PACKAGE_ROOT_DIRECTORY, DEFAULT_PACKAGE_NAME )
ORIGINAL_RELOADER_PATH = os.path.join( DEFAULT_PACKAGE_DIRECTORY, CURRENT_FILE_NAME )
def safe_remove(path):
try:
os.remove( path )
except Exception as error:
print( "[zz_reload_default_package.py] Failed to remove `%s`. Error is: %s" % ( path, error) )
try:
delete_read_only_file(path)
except Exception as error:
print( "[zz_reload_default_package.py] Failed to remove `%s`. Error is: %s" % ( path, error) )
def delete_read_only_file(path):
_delete_read_only_file( None, path, None )
def _delete_read_only_file(action, name, exc):
os.chmod( name, stat.S_IWRITE )
os.remove( name )
def reload_default_package():
for file_name in os.listdir( DEFAULT_PACKAGE_DIRECTORY ):
full_path = os.path.join( DEFAULT_PACKAGE_DIRECTORY, file_name )
if not os.path.isdir( full_path ) \
and file_name != CURRENT_FILE_NAME:
if file_name.endswith( '.py' ):
plugin_name = "%s.%s" % ( DEFAULT_PACKAGE_NAME, file_name[:-3] )
reload_plugin( plugin_name )
try:
reload_default_package()
except FileNotFoundError:
pass
# Remove itself if the Default package is not found
if not os.path.exists( ORIGINAL_RELOADER_PATH ):
print("[zz_reload_default_package.py] %s Uninstalling %s... Because the %s package was not found installed at %s." % (
VERSION, THIS_PACKAGE_ROOT, DEFAULT_PACKAGE_NAME, ORIGINAL_RELOADER_PATH ) )
shutil.rmtree( THIS_PACKAGE_ROOT, onerror=_delete_read_only_file )
"""
reloader_code = textwrap.dedent(reloader_code).lstrip()
CURRENT_FILE_NAME = os.path.basename( __file__ )
RELOADER_PACKAGE_NAME = 'ZzzReloadDefaultPackage'
RELOADER_PACKAGE_DIRECTORY = os.path.join( PACKAGE_ROOT_DIRECTORY, RELOADER_PACKAGE_NAME )
RELOADER_PACKAGE_FILE = os.path.join( RELOADER_PACKAGE_DIRECTORY, CURRENT_FILE_NAME )
PYTHON_VERSION_FILE = os.path.join( RELOADER_PACKAGE_DIRECTORY, '.python-version' )
if not os.path.exists( RELOADER_PACKAGE_DIRECTORY ):
os.makedirs( RELOADER_PACKAGE_DIRECTORY )
if not compare_text_with_file(reloader_code, RELOADER_PACKAGE_FILE):
with open( PYTHON_VERSION_FILE, 'w', newline='\n', encoding='utf-8' ) as output_file:
output_file.write( '3.8' )
with open( RELOADER_PACKAGE_FILE, 'w', newline='\n', encoding='utf-8' ) as output_file:
print( "[zz_reload_default_package.py] Updating the plugin file: %s" % RELOADER_PACKAGE_FILE )
output_file.write( reloader_code )
def compute_file_hash(file_path):
""" https://stackoverflow.com/questions/22058048/hashing-a-file-in-python """
# BUF_SIZE is totally arbitrary, change for your app!
# lets read stuff in 64kb chunks!
BUF_SIZE = 65536
sha256 = hashlib.sha256()
with open( file_path, 'rb' ) as file:
while True:
data = file.read( BUF_SIZE )
if not data: break
sha256.update( data )
return sha256.hexdigest()
def check_settings_changes():
settings_package_file = os.path.join( sublime.installed_packages_path(), "%s.sublime-package" % SETTINGS_PACKAGE_NAME )
if not os.path.exists( settings_package_file ): return True
has_changed_hashes = False
hashes_cache_path = os.path.join( sublime.cache_path(), "zz_reload_default_package.json" )
clean_file_hashes = {}
loaded_file_hashes = load_data_file( hashes_cache_path )
for file_name in g_settings_files:
file_path = os.path.join( DEFAULT_PACKAGE_DIRECTORY, file_name )
current_hash = compute_file_hash( file_path )
if file_name in loaded_file_hashes:
if current_hash != loaded_file_hashes[file_name]:
has_changed_hashes = True
print( "[zz_reload_default_package.py] Hash change for setting file: %s" % file_path )
else:
has_changed_hashes = True
clean_file_hashes[file_name] = current_hash
if clean_file_hashes != loaded_file_hashes:
write_data_file( hashes_cache_path, clean_file_hashes )
return has_changed_hashes
def create_settings_loader():
notice_code = \
"""
# 0 Settings Loader
Loads a customized version of the Sublime Text settings before all the other packages.
Then it allows other packages to override the customized Sublime Text settings.
Otherwise,
if this files are loaded with the package `Default`,
they will override all the packages which have been loaded before this package.
Thread:
1. Is possible to package override a Sublime Text Default keybinding?
https://forum.sublimetext.com/t/is-possible-to-package-override-a-sublime-text-default-keybinding/31688
"""
notice_code = textwrap.dedent(notice_code).lstrip()
if not os.path.exists( SETTINGS_PACKAGE_DIRECTORY ):
os.makedirs( SETTINGS_PACKAGE_DIRECTORY )
if not compare_text_with_file(notice_code, README_PACKAGE_FILE):
with open( os.path.join( SETTINGS_PACKAGE_DIRECTORY, '.python-version' ), 'w', newline='\n', encoding='utf-8' ) as output_file:
output_file.write( '3.8' )
with open( os.path.join( SETTINGS_PACKAGE_DIRECTORY, 'loading.py' ), 'w', newline='\n', encoding='utf-8' ) as output_file:
output_file.write( 'import sys\n' )
output_file.write( '# dummy file just to confirmed when this package is loaded by Sublime Text\n' )
with open( README_PACKAGE_FILE, 'w', newline='\n', encoding='utf-8' ) as output_file:
print( "[zz_reload_default_package.py] Updating the documentation file: %s" % README_PACKAGE_FILE )
output_file.write( notice_code )
for file in g_settings_files:
full_path = os.path.join( DEFAULT_PACKAGE_DIRECTORY, file )
full_destine_path = os.path.join( SETTINGS_PACKAGE_DIRECTORY, file.rstrip('.hide') )
if os.path.exists( full_destine_path ):
# https://stackoverflow.com/questions/254350/in-python-is-there-a-concise-way-of-comparing-whether-the-contents-of-two-text
if filecmp.cmp( full_path, full_destine_path, shallow=False ):
continue
if not os.path.exists( full_path ):
print( "[zz_reload_default_package.py] Error, the source setting file `%s` does not exists!" % full_path )
continue
print( "[zz_reload_default_package.py] Updating the setting file: %s" % full_path )
shutil.copyfile( full_path, full_destine_path )
print( "" )
if skip_packing:
print( "[zz_reload_default_package.py] Warning:\n"
" Skipping packing %s because PackagesManager was not found installed..." % SETTINGS_PACKAGE_NAME )
return
manager = PackageManager()
settings_package_file = os.path.join( sublime.installed_packages_path(), "%s.sublime-package" % SETTINGS_PACKAGE_NAME )
settings_package_cache = os.path.join( sublime.cache_path(), "%s.sublime-package" % SETTINGS_PACKAGE_NAME )
if manager.create_package( SETTINGS_PACKAGE_NAME, sublime.cache_path() ):
shutil.rmtree( SETTINGS_PACKAGE_DIRECTORY )
print( "[zz_reload_default_package.py] Creating the package file %s" % settings_package_cache )
else:
print( "[zz_reload_default_package.py] Error: Could not create the package file %s" % settings_package_cache )
for package in IgnoredPackagesBugFixer( [SETTINGS_PACKAGE_NAME], "upgrade" ):
shutil.move( settings_package_cache, settings_package_file )
print( "[zz_reload_default_package.py] Finished installing the package file %s" % settings_package_file )
def run_operations():
if _lock.locked():
print( "[zz_reload_default_package.py] Cannot run because it is already running!" )
return
_lock.acquire()
try:
create_reloader()
if check_settings_changes():
create_settings_loader()
else:
print( "[zz_reload_default_package.py] No changes in any settings file!" )
except Exception:
raise
finally:
_lock.release()
class ReloadHiddenDefaultSettingsCommand(sublime_plugin.WindowCommand):
def run(self):
print( "[zz_reload_default_package.py] Running Default Package Hidden Settings Reload..." )
plugin_loaded()
def plugin_loaded():
threading.Thread( target=run_operations ).start()
def write_data_file(file_path, dictionary_data):
with open( file_path, 'w', newline='\n', encoding='utf-8' ) as output_file:
json.dump( dictionary_data, output_file, indent='\t', separators=(',', ': ') )
output_file.write('\n')
def load_data_file(file_path, wait_on_error=True):
"""
Attempt to read the file some times when there is a value error. This could happen when the
file is currently being written by Sublime Text.
"""
dictionary_data = {}
if os.path.exists( file_path ):
maximum_attempts = 3
while maximum_attempts > 0:
try:
with open( file_path, 'r', encoding='utf-8' ) as studio_channel_data:
return json.load( studio_channel_data, object_pairs_hook=OrderedDict )
except ValueError as error:
print( "[zz_reload_default_package.py] Error, maximum_attempts %d, load_data_file: %s, %s" % (
maximum_attempts, file_path, error ) )
maximum_attempts -= 1
if wait_on_error:
time.sleep( 0.1 )
if maximum_attempts < 1:
print( "[zz_reload_default_package.py] Error: maximum_attempts exausted on file_path: %s" % file_path )
else:
print( "[zz_reload_default_package.py] Error on load_data_file(1), the file '%s' does not exists! \n%s\n" % (
file_path, "".join( traceback.format_stack() ) ) )
return dictionary_data
|
get_Position_priodicaly.py
|
#!/usr/bin/python3
# C:\Work\Python\HID_Util\src\get_Position_priodicaly.py
# based on: get_FW_version.py
# date: 28-08-21
from binascii import hexlify
import sys
import argparse
import threading
from time import perf_counter as timer
from time import sleep
# NOTE: about include_dll_path for __init__.py error.
# You MUST include the next line when working with full project path structure
import include_dll_path
import hid
import os
# VENDOR_ID = 0x24b3 # Simb
# PRODUCT_ID = 0x1005 # Simb MSP430 Controller
# USB\VID_2047&PID_0302&REV_0200
VENDOR_ID = 0x2047 # Texas Instruments
PRODUCT_ID = 0x0302 # Joystick.
PRODUCT_ID_JOYSTICK = 0x0302 # Joystick.
PRODUCT_ID_ROUTER = 0x0301 # Router
PRODUCT_ID_STATION = 0x0304
PRODUCT_ID_LAP_NEW_CAMERA = 0x2005
# 2021_01_24
# USB\VID_24B3&PID_2005&REV_0200
# 0x24B3 = 9395
# 0x2005 = 8197
# VENDOR_ID = 0x24b3 # Simb
# PRODUCT_ID = 0x2005 # LAP_NEW_CAMERA.
PRODUCT_ID_types = {
0x0302: "BOARD_TYPE: Joystick/Universal",
0x0301: "BOARD_TYPE: Router/Main",
0x0304: "BOARD_TYPE: STATION",
0x0303: "BOARD_TYPE: TOOLS_MASTER",
0x0305: "BOARD_TYPE: SUITE2PRIPH",
0x0306: "BOARD_TYPE: TOOLS_SLAVE",
0x0307: "BOARD_TYPE: GBU",
0x0308: "BOARD_TYPE: LAP camera",
0x2005: "BOARD_TYPE: PRODUCT_ID_LAP_NEW_CAMERA", #board type is enforced in FW (descriptors.h)
0x1965: "yosi"
}
FILE1_PATH = "log\hid_log.csv"
# if not os.path.exists('log'):
# os.makedirs('log')
# # file1 = None
# # open recording log file:
# # file1 = open("C:\Work\Python\HID_Util\src\log\log.csv","w")
# # file1 = open(FILE1_PATH,"w")
# file1 = open("log\get_FW_version_2021_03_11__00_42.csv","w")
hid_util_fault = 0
print_every = 0
READ_SIZE = 64 # The size of the packet
READ_TIMEOUT = 2 # 2ms
WRITE_DATA = bytes.fromhex("3f3ebb00b127ff00ff00ff00ffffffff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
DEFAULT_WRITE_DATA = WRITE_DATA
WRITE_DATA_CMD_I = bytes.fromhex("3f3ebb00b127ff00ff00ff0049ff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# start streaming command:
# 3f 04 82 00 00
WRITE_DATA_CMD_START = bytes.fromhex("3f048200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
WRITE_DATA_CMD_START_ = bytes.fromhex("3f048200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# start streaming command for station 0x303:
WRITE_DATA_CMD_START_0x304 = bytes.fromhex("3f048d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# Get Board Type command:
# 01h 00h 00h 01h
WRITE_DATA_CMD_GET_BOARD_TYPE = bytes.fromhex("3f040100000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
#.........................................................##........................................
WRITE_DATA_CMD_S = bytes.fromhex("3f3ebb00b127ff00ff00ff0053ff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# 'A' - keep Alive + fast BLE update (every 20 msec)
WRITE_DATA_CMD_A = bytes.fromhex("3f3ebb00b127ff00ff00ff0041ff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
WRITE_DATA_CMD_GET_FW_VERSION = bytes.fromhex("3f040600000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# WRITE_DATA_CMD_PRIME_KEEP_ALIVE = bytes.fromhex("3f040400000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
WRITE_DATA_CMD_PRIME_KEEP_ALIVE = bytes.fromhex("3f040400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# //sends 86 00 00 01 for 10 seconds
WRITE_DATA_CMD_PRIME_GET_MOT_1_POS = bytes.fromhex("3f048600000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# moderate BLE update rate every 50 mSec by 'M' command
WRITE_DATA_CMD_M = bytes.fromhex("3f3ebb00b127ff00ff00ff004dff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# set_BSL_mode
# WRITE_DATA_CMD_B = bytes.fromhex("3f3eaa00b127ff00ff00ff004dff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
#0xAA Run BSL
WRITE_DATA_CMD_B = bytes.fromhex("3f04aa00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
SLEEP_AMOUNT = 0.002 # Read from HID every 2 milliseconds
PRINT_TIME = 1.0 # Print every 1 second
# PRINT_TIME = 0.5 # Print every 0.5 second
#PRINT_TIME = 2 # Print every 2 second
START_INDEX = 2 + 4 # Ignore the first two bytes, then skip the version (4 bytes)
# ANALOG_INDEX_LIST = list(range(START_INDEX + 2, START_INDEX + 4 * 2 + 1, 2)) + [START_INDEX + 6 * 2,]
ANALOG_INDEX_LIST = list(range(START_INDEX + 2, START_INDEX + 8 * 2 + 1, 2))
# print("ANALOG_INDEX_LIST=",ANALOG_INDEX_LIST)
# ANALOG_INDEX_LIST= [8, 10, 12, 14, 16, 18, 20, 22]
LAP_ANALOG_INDEX_LIST = list(range(2,8 * 2 + 1, 2))
COUNTER_INDEX = 2 + 22 + 18 # Ignore the first two bytes, then skip XData1 (22 bytes) and OverSample (==XDataSlave1; 18 bytes)
CMOS_INDEX = 2 + 2 # maybe + 4???
# 0 1 2 3 4 5 6 7 8 9 1011
# Received data: b'3f26 00 00 00 00 0674fc41 3f4efc70 0033a4513c5a0101210001000000650000000000000000000000167f070dd7aee89baff63fedcfcccb763acf041b00000010'
# TORQUE INSERTION
# global variables
special_cmd = 0
root = None
def main_loop(device):
do_print = True
print_time = 0.0
time = timer()
handle_time = timer()
write_time_capture = timer()
skip_write = 0
prev_counter = 0
send_stream_request_command_once = 1
global special_cmd
global WRITE_DATA
loop_counter = 0
while True:
# Reset the counter
if (do_print):
print_time = timer()
# Write to the device
# if send_stream_request_command_once == 1:
# send_stream_request_command_once = 0
# if PRODUCT_ID == PRODUCT_ID_LAP_NEW_CAMERA:
# print("enforce streaming of data with command 0x82"
# if device is attached enforce streaming of data.
# device.write(WRITE_DATA_CMD_START)
if special_cmd == 'I':
if PRODUCT_ID == PRODUCT_ID_STATION:
WRITE_DATA = WRITE_DATA_CMD_START_0x304
else:
WRITE_DATA = WRITE_DATA_CMD_START
device.write(WRITE_DATA)
print("special_cmd Start")
special_cmd = 0
# elif special_cmd == 'S':
# WRITE_DATA = WRITE_DATA_CMD_GET_BOARD_TYPE
# device.write(WRITE_DATA)
# print("special_cmd CMD_GET_BOARD_TYPE")
# # print_flag = 1
# special_cmd = 0
elif special_cmd == 'A':
# if PRODUCT_ID == PRODUCT_ID_LAP_NEW_CAMERA:
if PRODUCT_ID in PRODUCT_ID_types:
# WRITE_DATA = WRITE_DATA_CMD_PRIME_KEEP_ALIVE
WRITE_DATA = WRITE_DATA_CMD_PRIME_GET_MOT_1_POS
# WRITE_DATA = WRITE_DATA_CMD_GET_FW_VERSION
# print("special_cmd A -> WRITE_DATA_CMD_GET_FW_VERSION")
print("special_cmd A -> WRITE_DATA_CMD_PRIME_GET_MOT_1_POS")
device.write(WRITE_DATA)
else:
WRITE_DATA = WRITE_DATA_CMD_A
print("special_cmd A -> keep Alive + fast BLE update (every 20 msec)")
# special_cmd = 0
# elif special_cmd == 'M':
# WRITE_DATA = WRITE_DATA_CMD_M
# print("special_cmd M -> moderate BLE update rate every 50 mSec")
# special_cmd = 0
elif special_cmd == 'B':
WRITE_DATA = WRITE_DATA_CMD_B
device.write(WRITE_DATA)
print("special_cmd B -> set_BSL_mode --- this will stop HID communication with this GUI")
special_cmd = 0
# else:
# WRITE_DATA = DEFAULT_WRITE_DATA
if WRITE_DATA == WRITE_DATA_CMD_B:
break
cycle_time = timer() - time
# print("cycle timer: %.10f" % cycle_time)
# If not enough time has passed, sleep for SLEEP_AMOUNT seconds
sleep_time = SLEEP_AMOUNT - (cycle_time)
# Measure the time
time = timer()
loop_counter = loop_counter +1
# Sleep for 3 seconds
sleep(0.004)
if loop_counter % 10 == 0:
sleep(0.5)
# Read the packet from the device
value = device.read(READ_SIZE, timeout=READ_TIMEOUT)
# Update the GUI
if len(value) >= READ_SIZE:
# save into file:
analog = [(int(value[i + 1]) << 8) + int(value[i]) for i in LAP_ANALOG_INDEX_LIST]
channel_0 = analog[0]
channel_1 = analog[1]
channel_2 = analog[2]
channel_3 = analog[3]
channel_4 = analog[4]
counter = (int(value[COUNTER_INDEX + 1]) << 8) + int(value[COUNTER_INDEX])
count_dif = counter - prev_counter
#global file1
#if count_dif > 1 :
# L = [ str(counter),", ", str(clicker_analog), ", " , str(count_dif), " <<<<<--- " ,"\n" ]
#else:
# L = [ str(counter),", ", str(clicker_analog), ", " , str(count_dif), "\n" ]
L = [ str(channel_0),", ", str(channel_1), ", " , str(channel_2),", " , str(channel_3),", " , str(channel_4), "\n" ]
#file1.writelines(L)
# no handler for keep alive
handler(value, do_print=do_print)
# print("Received data: %s" % hexlify(value))
Handler_Called = (timer() - handle_time)
if Handler_Called > 0.002 :
# if Handler_Called > 0.02 :
#print("handler called: %.6f" % Handler_Called)
global print_every
print_every = print_every + 1
if print_every >= 500:
print_every = 0
print("time:", time, end="")
print(" Received data: %s" % hexlify(value))
# print("time: %.6f" % time)
handle_time = timer()
prev_counter = counter
# Update the do_print flag
do_print = (timer() - print_time) >= PRINT_TIME
def date2dec(x):
s = "%02x" % x
return s
def handler(value, do_print=False):
if do_print:
print("Received data: %s" % hexlify(value))
# parsing FW version response :
if value[2] == 6 and value[3] == 6 and value[4] == 0 and value[5] == 1:
print("FW friendly version: %s" % hexlify(value))
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0
# b'3f0a06060001 030004060321 d6bb2c3fc2b49c3fe877fecef602fffe5787dedfcf750cfb129efe7ffd7ed60daedefca4f9fff58efc5eb47c237eb5a93dd72f55'
print("")
print("FW version: "+str(value[6])+"." +str(value[7])+"." +str(value[8]))
print("FW date : "+date2dec(value[9])+"/" +date2dec(value[10])+"/20" +date2dec(value[11]))
print(" ")
print(" Please press <Enter> to Exit")
return # do without gui
PROGRESS_BAR_LEN = 300
LONG_PROGRESS_BAR_LEN = 590
def init_parser():
parser = argparse.ArgumentParser(
description="Read the HID data from target board.\nIf no argument is given, the program exits."
)
parser.add_argument(
"-v", "--vendor",
dest="vendor_id",
metavar="VENDOR_ID",
type=int,
nargs=1,
required=False,
help="connects to the device with the vendor ID"
)
parser.add_argument(
"-p", "--product",
dest="product_id",
metavar="PRODUCT_ID",
type=int,
nargs=1,
required=False,
help="connects to the device with that product ID"
)
parser.add_argument(
"-a", "--path",
dest="path",
metavar="PATH",
type=str,
nargs=1,
required=False,
help="connects to the device with the given path"
)
return parser
def main():
global VENDOR_ID
global PRODUCT_ID
PATH = None
# Parse the command line arguments
parser = init_parser()
args = parser.parse_args(sys.argv[1:])
# Initialize the flags according from the command line arguments
avail_vid = args.vendor_id != None
avail_pid = args.product_id != None
avail_path = args.path != None
id_mode = avail_pid and avail_vid
path_mode = avail_path
default_mode = (not avail_vid) and (not avail_pid) and (not avail_path)
if (path_mode and (avail_pid or avail_vid)):
print("The path argument can't be mixed with the ID arguments")
return
if ((not avail_path) and ((avail_pid and (not avail_vid)) or ((not avail_pid) and avail_vid))):
print("Both the product ID and the vendor ID must be given as arguments")
return
if (default_mode):
print("No arguments were given, defaulting to:")
print("VENDOR_ID = %X" % VENDOR_ID)
print("PRODUCT_ID = %X" % PRODUCT_ID)
id_mode = True
elif (id_mode):
VENDOR_ID = args.vendor_id[0]
PRODUCT_ID = args.product_id[0] #run over with 772 == 0x304
elif (path_mode):
PATH = args.path[0]
else:
raise NotImplementedError
device = None
try:
if (id_mode):
try:
print("try with default device:")
print("VENDOR_ID = %X" % VENDOR_ID)
print("PRODUCT_ID = %X" % PRODUCT_ID)
device = hid.Device(vid=VENDOR_ID, pid=PRODUCT_ID)
except:
print("wrong ID")
print(" ")
# 0x24B3 = 9395
# 0x2005 = 8197
for n in range(7):
if device is None:
try:
# print("try with other device")
VENDOR_ID = 0x24b3 # Simb
PRODUCT_ID = 0x2000 + n # LAP_NEW_CAMERA. is 0x2005
# print("VID = %X PID = %X " % VENDOR_ID, PRODUCT_ID)
print("try with PID = %X " % PRODUCT_ID)
# print("PRODUCT_ID = %X" % PRODUCT_ID)
device = hid.Device(vid=VENDOR_ID, pid=PRODUCT_ID)
# device = hid.Device(vid=0x24B3, pid=0x2005)
# print("success vid=0x24B3, pid=0x2005 !!")
except:
print("wrong ID2")
# VENDOR_ID = 2047
# PRODUCT_ID = 304
# 0x2047 = 8263
# 0x304 = 772
# 0x0301 // Product ID (PID) - base for Prime products family
for n in range(len(PRODUCT_ID_types)):
if device is None:
try:
# print("try with other device")
VENDOR_ID = 0x2047 # Texas Instrument
PRODUCT_ID = 0x301 + n # BOARD_TYPE_MAIN is 0x301
print("try with PID = %X " % PRODUCT_ID)
device = hid.Device(vid=VENDOR_ID, pid=PRODUCT_ID)
except:
print("wrong ID2")
if device is None:
print("no device attached")
else:
print("VENDOR_ID = %X" % VENDOR_ID)
print("PRODUCT_ID = %X" % PRODUCT_ID)
if PRODUCT_ID in PRODUCT_ID_types:
print(PRODUCT_ID_types[PRODUCT_ID])
global special_cmd
# if PRODUCT_ID == PRODUCT_ID_LAP_NEW_CAMERA:
if PRODUCT_ID in PRODUCT_ID_types:
special_cmd = 'A'
print("set in init: special_cmd = 'A'")
elif (path_mode):
device = hid.Device(path=PATH)
else:
raise NotImplementedError
# Create thread that calls
threading.Thread(target=main_loop, args=(device,), daemon=True).start()
global WRITE_DATA
if WRITE_DATA == WRITE_DATA_CMD_B:
print("WRITE_DATA == WRITE_DATA_CMD_B")
# print(" Recording Ended !!!")
print(" ")
# print(" Please press <Enter> to Exit")
input()
finally:
# global file1
# file1.close() #to change file access modes
if device != None:
device.close()
if __name__ == "__main__":
main()
|
benchmark_persistent_actor.py
|
#!/usr/bin/env python3
"""Benchmark for rays ownership system.
Based on https://github.com/stephanie-wang/ownership-nsdi2021-artifact/\
blob/main/recovery-microbenchmark/reconstruction.py
"""
import abc
import argparse
import csv
import numpy as np
import os
import os.path
import time
from queue import SimpleQueue as Queue
from tempfile import TemporaryDirectory
from threading import Thread
from typing import Any, Final, Generic, List, Sequence, Type, TypeVar, Union, final
import ray
from ray.cluster_utils import Cluster
from ray.node import Node
from persistent_actor import ActorState, safe_state
LARGE_ARRAY_SIZE: Final[int] = 10 * 1024 * 1024
NUM_NODES: Final[int] = 1
T = TypeVar('T')
class TestActor(abc.ABC, Generic[T]):
"""Abstract base class for actor classes used for this benchmark."""
def __init__(self, _: str = "", can_die: bool = False) -> None:
self.can_die: bool = can_die
@final
def die(self) -> None:
if not self.can_die:
return
self.can_die = False
os._exit(0)
@final
def enable_die(self) -> None:
self.can_die = True
@abc.abstractmethod
def get_data(self) -> T:
...
@abc.abstractmethod
def _process(self, arg: T) -> T:
...
@final
def process(self, delay_ms: float, arg: T) -> T:
time.sleep(delay_ms / 1000)
return self._process(arg)
@abc.abstractmethod
def to_string(self) -> str:
...
@ray.remote(max_restarts=-1, max_task_retries=-1)
class SmallActor(TestActor[int]):
def __init__(self, _: str = "", can_die: bool = False) -> None:
TestActor.__init__(self, can_die=can_die)
self.counter: int = 1
def get_data(self) -> int:
return self.counter
def _process(self, arg: int) -> int:
self.counter += arg
return self.counter
def to_string(self) -> str:
return f"{self.counter}"
@ray.remote(max_restarts=-1, max_task_retries=-1)
class SmallActorSafe(TestActor[int]):
def __init__(self, state_file_path: str, can_die: bool = False) -> None:
TestActor.__init__(self, state_file_path, can_die)
self.state: ActorState = ActorState(state_file_path)
self.counter: int = self.state.get("counter", 1)
def get_data(self) -> int:
return self.counter
@safe_state("counter")
def _process(self, arg: int) -> int:
self.counter += arg
return self.counter
def to_string(self) -> str:
return f"{self.counter}"
@ray.remote(max_restarts=-1, max_task_retries=-1)
class LargeActor(TestActor[np.ndarray]):
def __init__(self, _: str = "", can_die: bool = False) -> None:
TestActor.__init__(self, can_die=can_die)
self.array: np.ndarray = np.ones(LARGE_ARRAY_SIZE, dtype=np.uint8)
def get_data(self) -> np.ndarray:
return self.array
def _process(self, arg: np.ndarray) -> np.ndarray:
self.array += arg
return self.array
def to_string(self) -> str:
return f"{self.array}"
@ray.remote(max_restarts=-1, max_task_retries=-1)
class LargeActorSafe(TestActor[np.ndarray]):
def __init__(self, state_file_path: str, can_die: bool = False) -> None:
TestActor.__init__(self, state_file_path, can_die)
self.state: ActorState = ActorState(state_file_path)
self.array: np.ndarray = self.state.get("array", np.ones(LARGE_ARRAY_SIZE, dtype=np.uint8))
def get_data(self) -> np.ndarray:
return self.array
@safe_state("array")
def _process(self, arg: np.ndarray) -> np.ndarray:
self.array += arg
return self.array
def to_string(self) -> str:
return f"{self.array}"
def benchmark(args: argparse.Namespace, state_dir: str,) -> None:
actor_class: Type[TestActor[Any]]
nodes: List[Node] = ray.nodes()
while len(nodes) < NUM_NODES + 1:
time.sleep(1)
print(f"{len(nodes)} nodes found, waiting for nodes to join")
nodes = ray.nodes()
print("All nodes joined")
print(f"Running {args.num_rounds} rounds of {args.delay_ms} ms each")
if args.safe_state:
actor_class = LargeActorSafe if args.large else SmallActorSafe
else:
actor_class = LargeActor if args.large else SmallActor
print(f"Create {args.num_actors} actors ...")
actor_handles: Final[List["ray.ObjectRef[TestActor[Any]]"]] = [
create_actor(actor_class, state_dir, i) for i in range(args.num_actors)
]
# Force initialization of actors and print their state.
print(
"Initial state:",
' '.join(str(ray_get_single(ah.get_data.remote())) for ah in actor_handles)
)
print(f"Start running benchmark ...")
result: Queue[float] = Queue()
thread: Thread = Thread(target=run, args=(actor_handles, args.num_rounds, args.delay_ms, result))
thread.start()
if args.failure:
sleep: float = (args.num_rounds * args.num_actors * (args.delay_ms / 1000)) / 2
print(f"Failure in {sleep} seconds")
kill_actors(actor_handles, sleep, args.num_actors_kill)
thread.join()
duration: float = result.get_nowait()
print(f"Task delay {args.delay_ms} ms. Duration {duration}")
if args.output:
file_exists: bool = os.path.exists(args.output)
with open(args.output, 'a') as csvfile:
fieldnames: Sequence[str] = ['system', 'large', 'delay_ms', 'duration', 'failure']
writer: csv.DictWriter[Any] = csv.DictWriter(csvfile, fieldnames=fieldnames)
if not file_exists:
writer.writeheader()
writer.writerow({
'system': "safe_actor" if args.safe_state else "default",
'large': args.large,
'delay_ms': args.delay_ms,
'duration': duration,
'failure': args.failure,
})
if args.timeline:
ray.timeline(filename=args.timeline)
def create_actor(
actor_class: Type[TestActor[Any]],
state_dir: str,
i: int
) -> "ray.ObjectRef[TestActor[Any]]":
return actor_class.remote(f"{state_dir}{os.path.sep}{i}")
def kill_actors(
actor_handles: List["ray.ObjectRef[TestActor[Any]]"],
sleep: float,
num_actors_kill: int
) -> None:
time.sleep(sleep)
# Try to select the actors evenly distributed and rather in the middle.
# Example for num_actors_kill = 3 and num_actors = 20:
# --x------x------x---
index_step: int = len(actor_handles) // (num_actors_kill + 1)
for index in range(index_step // 2, len(actor_handles), index_step):
data: Any = ray_get_single(actor_handles[index].get_data.remote())
print(f"Killing {actor_handles[index]} ... State before killing: {data}")
# Caution: We need to ensure that an actor cannot die an infinite number
# of times! Otherwise, we get stuck in a loop, because technically, the
# die() method of the actor fails, so that it gets re-executed again and
# again.
ray_get_single(actor_handles[index].enable_die.remote())
ray_get_single(actor_handles[index].die.remote())
data = ray_get_single(actor_handles[index].get_data.remote())
print(f"Killed {actor_handles[index]}. State after killing: {data}")
def ray_get_single(ref: "ray.ObjectRef[Any]") -> Any:
result: Union[Any, List[Any]] = ray.get(ref)
return result[0] if isinstance(result, list) else result
def run(
actor_handles: List[TestActor[Any]],
num_rounds: int,
delay_ms: float,
queue: Queue[float]
) -> None:
# Actors need to be stored as variable.
# see https://github.com/ray-project/ray/issues/6265
# Build intermediate results.
# see https://github.com/ray-project/ray/issues/3644
start: float = time.time()
for i in range(num_rounds):
print(f"Run round {i + 1} ...")
current: Any = ray_get_single(actor_handles[0 if i % 2 else -1].get_data.remote())
for ah in actor_handles[1:] if i % 2 == 0 else actor_handles[-2::-1]:
current = ray_get_single(ah.process.remote(delay_ms, current))
duration: float = time.time() - start
print(' '.join(map(lambda ah: str(ray_get_single(ah.get_data.remote())), actor_handles)))
queue.put(duration)
def main() -> None:
parser: argparse.ArgumentParser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--delay-ms", required=True, type=int,
help="amount of milliseconds to delay actor method execution"
)
parser.add_argument(
"--failure", action="store_true",
help="kill actors during execution"
)
parser.add_argument(
"--large", action="store_true",
help="enable large actor state"
)
parser.add_argument(
"--num-actors", type=int, default=20,
help="number of actors to run"
)
parser.add_argument(
"--num-actors-kill", type=int, default=3,
help="number of actors to kill when --failure is specified"
)
parser.add_argument(
"--num-rounds", type=int, default=20,
help="number of rounds to run the code"
)
parser.add_argument(
"--output", type=str, default=None,
help="filename for csv data"
)
parser.add_argument(
"--safe-state", action="store_true",
help="use prototype for persistent actor state"
)
parser.add_argument(
"--timeline", type=str, default=None,
help="output filename for timeline"
)
args: argparse.Namespace = parser.parse_args()
# see https://docs.ray.io/en/latest/auto_examples/testing-tips.html\
# #tip-4-create-a-mini-cluster-with-ray-cluster-utils-cluster
# Start a head-node for the cluster.
cluster: Cluster = Cluster(
initialize_head=True,
head_node_args={
"num_cpus": 4,
}
)
print(f"Started local cluster on {cluster.address}")
for _ in range(NUM_NODES):
node: Node = cluster.add_node()
print(f"Added node {node.node_ip_address}:{node.node_manager_port}")
print(ray.init(address=cluster.address))
with TemporaryDirectory() as state_dir:
benchmark(args, state_dir)
ray.shutdown()
if __name__ == "__main__":
main()
|
stonks.py
|
import asyncio
import datetime
import io
import json
import os
from os import path
import threading
import time
import matplotlib.pyplot as plt
import numpy as np
import schedule
import discord
from discord.ext import commands
LOAD_STORE = threading.Lock()
def load(uid):
if path.exists(f"data/stonks/{uid}"):
with open(f"data/stonks/{uid}", "r") as f:
return json.load(f)
else:
return {'buy': {'price': None, 'quantity': None},
'price': {d: {'am': None, 'pm': None} for d in ['mon', 'tue', 'wed', 'thu', 'fri', 'sat']}}
def store(uid, data):
with open(f"data/stonks/{uid}", "w") as f:
json.dump(data, f)
def day_norm(val):
dmap = {'mon': ['monday'],
'tue': ['tues', 'tuesday'],
'wed': ['weds', 'wednesday', 'wednessday'],
'thu': ['thur', 'thurs', 'thursday'],
'fri': ['friday'],
'sat': ['saturday'],
'sun': ['sunday']}
# Creates the reverse day map, where each key maps to itself, and every entry in the value list maps to it's key
idmap = {k:k for k in dmap.keys()}
idmap.update({v:k for k, vl in dmap.items() for v in vl})
val = val.lower().strip()
if val in idmap:
return idmap[val]
else:
raise ValueError
def time_norm(val):
tmap = {'am': ['morn', 'morning', 'day'],
'pm': ['afternoon', 'evening', 'night']}
# Creates the reverse time map, where each key maps to itself, and every entry in the value list maps to it's key
itmap = {k: k for k in tmap.keys()}
itmap.update({v: k for k, vl in tmap.items() for v in vl})
val = val.lower().strip()
if val in itmap:
return itmap[val]
else:
raise ValueError
def ax_config(ax):
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.set_xticks(range(12))
ax.set_xlim(-0.5, 11.5)
ax.set_xlabel('date')
ax.set_xticklabels(['mon - am',
'mon - pm',
'tue - am',
'tue - pm',
'wed - am',
'wed - pm',
'thu - am',
'thu - pm',
'fri - am',
'fri - pm',
'sat - am',
'sat - pm'])
ax.tick_params(axis='x', labelrotation=90)
ax.set_ylabel('price (bells)')
def data_to_nparr(data):
return np.array([
np.nan if data['price'][d][t] is None else data['price'][d][t]
for d in ['mon', 'tue', 'wed', 'thu', 'fri', 'sat']
for t in ['am', 'pm']
])
def plot_single(data, f, hline=None):
with plt.xkcd():
fig = plt.figure(figsize=(10, 7))
ax = fig.add_axes((0.1, 0.2, 0.8, 0.7))
ax_config(ax)
d = data_to_nparr(data)
x = np.arange(0, len(d))
mask = np.isfinite(d)
line, = ax.plot(x[mask], d[mask], ls="--")
ax.plot(x, d, marker=".", color=line.get_color())
if data['buy']['price'] is not None:
ax.hlines(data['buy']['price'], -0.5, 11.5, linestyles='dotted')
ax.text(11.5, data['buy']['price'], str(data['buy']['price']), alpha=0.5, ha="left", va="center")
if hline is not None:
ax.hlines(hline, -0.5, 11.5, linestyles='dotted', color='r')
ax.text(11.5, hline, str(hline), alpha=0.5, ha="left", va="center")
for i, v in enumerate(d):
if np.isnan(v):
continue
ax.text(i-0.1, v, str(int(v)), alpha=0.5, ha="right", va="center")
fig.savefig(f, format="png")
plt.close(fig)
def plot_multi(data, f):
with plt.xkcd():
fig = plt.figure(figsize=(13, 7))
ax = fig.add_axes((0.1, 0.2, 0.6, 0.7))
ax_config(ax)
for name, _data in data.items():
d = data_to_nparr(_data)
x = np.arange(0, len(d))
mask = np.isfinite(d)
line, = ax.plot(x[mask], d[mask], ls="--")
ax.plot(x, d, marker=".", color=line.get_color(), label=name)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
fig.savefig(f, format="png")
plt.close(fig)
class Stonks(commands.Cog):
def __init__(self):
def stonk_rollover():
with LOAD_STORE:
with open('data/stonks/log', 'a') as lg:
for pth in os.listdir('data/stonks'):
try:
_ = int(pth)
except:
continue
with open(f'data/stonks/{pth}') as f:
data = json.load(f)
data['userid'] = pth
data['date'] = datetime.date.today().strftime('%Y-%m-%d')
json.dump(data, lg)
lg.write('\n')
os.remove(f'data/stonks/{pth}')
self.stonk_rollover = stonk_rollover
schedule.every().sunday.at("04:00").do(stonk_rollover)
def sch_runner():
while True:
schedule.run_pending()
time.sleep(60)
self.sch_thr = threading.Thread(target=sch_runner, daemon=True)
self.sch_thr.start()
@commands.command()
async def buy(self, ctx: commands.Context, price: int, quantity: int):
post = await ctx.send(
content=f"Ok lets get you setup!\n"
f"You bought {quantity} nips for {price} bells each?\n"
f"If that's right, hit the ✅ react to save!\n"
f"If I got my figures twisted, hit the 🔁 react to swap those numbers around.\n"
f"If you just want to bail hit the ❌ react.")
await post.add_reaction('✅')
await post.add_reaction('🔁')
await post.add_reaction('❌')
def chk(reaction, user):
return (str(reaction.emoji) in ['❌', '🔁', '✅'] and
user == ctx.author and
reaction.message.id == post.id)
try:
react, _ = await ctx.bot.wait_for("reaction_add", check=chk, timeout=300)
except asyncio.TimeoutError:
await post.delete()
return
if str(react.emoji) == "❌":
await post.edit(content="Ok, see you later!", delete_after=60)
elif str(react.emoji) == "🔁":
price, quantity = quantity, price
with LOAD_STORE:
data = load(ctx.author.id)
data['buy']['price'] = price
data['buy']['quantity'] = quantity
store(ctx.author.id, data)
await post.edit(
content=f"Ok awesome! "
f"Got you setup this week with a haul of {quantity} nips for {price} each. "
f"You have {quantity * price} bells riding on this week, hope it goes well!",
delete_after=600)
@commands.command()
async def price(self, ctx: commands.Context, day: str, time: str, price: int):
try:
day = day_norm(day)
if day == "sun":
await ctx.send(
content="Did you mean to say Sunday? "
"If so you probably want the +buy command instead for buying new nips.")
return
except ValueError:
await ctx.send(
content="I'm sorry, I couldn't recognise what day of the week you were saying. "
"Try saying something like mon, tue, wed, thu, fri or sat.")
return
try:
time = time_norm(time)
except ValueError:
await ctx.send(
content="I'm sorry, I couldn't recognise what time you said. Try saying something like am or pm.")
return
if price <= 0:
await ctx.send(
content="I'm sorry, you seem to be trying to set a price of 0 or less, that shouldn't be possible.")
return
with LOAD_STORE:
data = load(ctx.author.id)
data['price'][day][time] = price
store(ctx.author.id, data)
if data['buy']['price'] is not None:
diff = price - data['buy']['price']
total = diff * data['buy']['quantity']
await ctx.send(
content=f"Thanks!\n"
f"You set a price of {price} bells for nips on {day} {time}.\n\n"
f"If you sell your stalks today you will make a profit of {diff} bells per nip, "
f"for a total profit of {total} bells!")
else:
await ctx.send(content=f"Thanks!\n"
f"You set a price of {price} bells for nips on {day} {time}.")
@commands.command(hidden=True)
async def summary(self, ctx: commands.Context):
pass
@commands.command()
async def prophet(self, ctx: commands.Context):
with LOAD_STORE:
data = load(ctx.author.id)
fseq = "-" + "-".join([
"" if data['price'][d][t] is None else str(data['price'][d][t])
for d in ['mon', 'tue', 'wed', 'thu', 'fri', 'sat']
for t in ['am', 'pm']
])
embed = discord.Embed(title=f"**{ctx.author}'s** Market Predictions",
url=f"https://ac-turnip.com/share?f={fseq}")
embed.set_image(url=f"https://ac-turnip.com/p-{fseq}.png")
embed.description=f"https://ac-turnip.com/p-{fseq}.png"
await ctx.send(embed=embed)
@commands.command()
async def graph(self, ctx: commands.Context, other: discord.Member = None):
with LOAD_STORE:
data = load(ctx.author.id)
tmp = io.BytesIO()
if other is not None:
with LOAD_STORE:
odata = load(other.id)
plot_single(odata, tmp, hline=data['buy']['price'])
else:
plot_single(data, tmp)
tmp.seek(0)
await ctx.send(file=discord.File(tmp, filename="stonks.png"))
@commands.command()
@commands.guild_only()
async def graphall(self, ctx: commands.Context):
with LOAD_STORE:
data = {}
for pth in os.listdir('data/stonks'):
try:
uid = int(pth)
except:
continue
if ctx.guild.get_member(uid) is not None:
data[ctx.guild.get_member(uid).name] = load(uid)
tmp = io.BytesIO()
plot_multi(data, tmp)
tmp.seek(0)
await ctx.send(file=discord.File(tmp, filename="stonks.png"))
|
screenshot.py
|
#!/usr/bin/env python
# @license
# Copyright 2020 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for creating screenshots with Neuroglancer.
The Neuroglancer state may be specified either by a URL or by a path to a JSON
state file.
Rendering requires a web browser. By default, a headless chromedriver is
started in the background. It is also possible to use non-headless chromedriver
or a manually-opened browser.
There are several methods by which the screenshot image may be rendered:
1. The state can be rendered directly as a single frame by Neuroglancer. This
is the simplest and fastest method and works for most states.
2. If the output image size exceeds what Neuroglancer/the browser can support
(usually about 4096x4096), tiled rendering can be used. In this case,
Neuroglancer will render the image as multiple tiles which are assembled
automatically into a single image. This is enabled automatically if the
requested image size exceeds the specified tile dimensions. All normal
functionality is supported, except for the "show_slices" option whereby
cross-section panels are also shown in the 3-d view. Manually-specified
cross sections via the "cross_sections" property are supported, however.
3. If a very large number of 3-d objects are to be rendered, it may be
impossible for Neuroglancer to render them all simultaneously due to memory
limits. The `--segment-shard-size` option may be specified to enable a
special rendering mode in which subsets of the objects are rendered
independently and then combined together into a single image. Depth
information is used to combine the images together. Currently, transparent
rendering of objects is not supported, though. As the final image is
produced incrementally, the state is saved in a `.npz` file, which allows
resuming if the screenshot process is interrupted. To avoid resuming if you
change options, delete the `.npz` file.
Tips:
- The Neuroglancer UI controls are not shown, and in the case of multi-panel
layouts, there is no border between panels. In most cases it is desirable to
capture a single-panel layout.
- The layer side panel and statistics panel, if open, will be closed for the
screenshot.
- The specified image dimensions will be used, rather than the dimensions of
your browser window. This, in combination with the removal of the normal
Neuroglancer UI controls, means that the field of view may differ somewhat.
- The axis lines and volume bounding boxes will be shown if they are enabled in
the Neuroglancer state. If you don't want them in the screenshot, you should
disable them in the Neuroglancer state. You may also use the
`--hide-axis-lines` and `--hide-default-annotations` options. In most cases
it is desirable to hide the axis lines and default annotations.
- The scale bars will be shown if they are enabled in the Neuroglancer state.
If you specify a large image size, you may want to increase the size of the
scale bar, using the `--scale-bar-scale` option.
"""
import argparse
import collections
import datetime
import itertools
import copy
import os
import threading
import time
import PIL
import numpy as np
import neuroglancer
import neuroglancer.cli
import neuroglancer.webdriver
def _get_total_segments(state):
num_segments = 0
for layer in state.layers:
if not isinstance(layer.layer, neuroglancer.SegmentationLayer):
continue
num_segments += len(layer.segments)
return num_segments
def _should_shard_segments(state, segment_shard_size):
return _get_total_segments(state) > segment_shard_size
def _calculate_num_shards(state, segment_shard_size):
total_segments = _get_total_segments(state)
return -(-total_segments // segment_shard_size)
def _get_sharded_states(state, segment_shard_size, reverse_bits):
if reverse_bits:
sort_key = lambda x: int('{:064b}'.format(x)[::-1], 2)
else:
sort_key = None
num_shards = _calculate_num_shards(state, segment_shard_size)
for shard_i in range(num_shards):
new_state = copy.deepcopy(state)
cum_retained = 0
cum_skipped = segment_shard_size * shard_i
for i, layer in enumerate(new_state.layers):
if not isinstance(layer.layer, neuroglancer.SegmentationLayer):
continue
segments = sorted(layer.segments, key=sort_key)
num_to_skip = min(cum_skipped, len(segments))
segments = segments[num_to_skip:]
cum_skipped += num_to_skip
num_to_retain = min(segment_shard_size - cum_retained, len(segments))
cum_retained += num_to_retain
layer.segments = set(segments[:num_to_retain])
yield new_state
class TileGenerator:
def __init__(self, shape, tile_shape):
self.tile_shape = tuple(tile_shape)
self.shape = tuple(shape)
self.tile_grid_shape = tuple(-(-self.shape[i] // self.tile_shape[i]) for i in range(2))
self.tile_shape = tuple(-(-self.shape[i] // self.tile_grid_shape[i]) for i in range(2))
self.num_tiles = self.tile_grid_shape[0] * self.tile_grid_shape[1]
def get_tile_states(self, state):
for tile_y in range(self.tile_grid_shape[1]):
for tile_x in range(self.tile_grid_shape[0]):
x_offset = tile_x * self.tile_shape[0]
y_offset = tile_y * self.tile_shape[1]
tile_width = min(self.tile_shape[0], self.shape[0] - x_offset)
tile_height = min(self.tile_shape[1], self.shape[1] - y_offset)
new_state = copy.deepcopy(state)
new_state.partial_viewport = [
x_offset / self.shape[0], y_offset / self.shape[1],
tile_width / self.shape[0], tile_height / self.shape[1]
]
params = {
'tile_x': tile_x,
'tile_y': tile_y,
'x_offset': x_offset,
'y_offset': y_offset,
'tile_width': tile_width,
'tile_height': tile_height,
}
yield params, new_state
class ShardedTileGenerator(TileGenerator):
def __init__(self, state, segment_shard_size, reverse_bits, **kwargs):
super(ShardedTileGenerator, self).__init__(**kwargs)
self.state = state
self.reverse_bits = reverse_bits
self.total_segments = _get_total_segments(self.state)
self.segment_shard_size = segment_shard_size
self.num_shards = _calculate_num_shards(self.state, self.segment_shard_size)
self.num_tiles *= self.num_shards
def get_states(self):
for shard_i, state in enumerate(
_get_sharded_states(self.state,
self.segment_shard_size,
reverse_bits=self.reverse_bits)):
for params, state in self.get_tile_states(state):
params['segment_shard'] = shard_i
yield params, state
CaptureScreenshotRequest = collections.namedtuple(
'CaptureScreenshotRequest',
['state', 'description', 'config_callback', 'response_callback', 'include_depth'])
def buffered_iterator(base_iter, lock, buffer_size):
while True:
with lock:
buffered_items = list(itertools.islice(base_iter, buffer_size))
if not buffered_items: break
for item in buffered_items:
yield item
def capture_screenshots(viewer, request_iter, refresh_browser_callback, refresh_browser_timeout, num_to_prefetch=1):
prefetch_buffer = list(itertools.islice(request_iter, num_to_prefetch + 1))
while prefetch_buffer:
with viewer.config_state.txn() as s:
s.show_ui_controls = False
s.show_panel_borders = False
del s.prefetch[:]
for i, request in enumerate(prefetch_buffer[1:]):
s.prefetch.append(neuroglancer.PrefetchState(state=request.state, priority=num_to_prefetch - i))
request = prefetch_buffer[0]
request.config_callback(s)
viewer.set_state(request.state)
print('%s [%s] Requesting screenshot' % (
datetime.datetime.now().strftime('%Y-%m-%dT%H:%M%S.%f'),
request.description,
))
last_statistics_time = time.time()
def statistics_callback(statistics):
nonlocal last_statistics_time
last_statistics_time = time.time()
total = statistics.total
print(
'%s [%s] Screenshot in progress: %6d/%6d chunks loaded (%10d bytes), %3d downloading'
% (
datetime.datetime.now().strftime('%Y-%m-%dT%H:%M%S.%f'),
request.description,
total.visible_chunks_gpu_memory,
total.visible_chunks_total,
total.visible_gpu_memory,
total.visible_chunks_downloading,
))
event = threading.Event()
screenshot = None
def result_callback(s):
nonlocal screenshot
screenshot = s.screenshot
event.set()
viewer.async_screenshot(
result_callback,
include_depth=request.include_depth,
statistics_callback=statistics_callback,
)
def get_timeout():
return max(0, last_statistics_time + refresh_browser_timeout - time.time())
while True:
if event.wait(get_timeout()):
break
if get_timeout() > 0:
continue
last_statistics_time = time.time()
refresh_browser_callback()
request.response_callback(screenshot)
del prefetch_buffer[0]
next_request = next(request_iter, None)
if next_request is not None:
prefetch_buffer.append(next_request)
class MultiCapturer:
def __init__(self, shape, include_depth, output, config_callback, num_to_prefetch, checkpoint_interval=60):
self.include_depth = include_depth
self.checkpoint_interval = checkpoint_interval
self.config_callback = config_callback
self.num_to_prefetch = num_to_prefetch
self.output = output
self._processed = set()
self.state_file = output + '.npz'
self.temp_state_file = self.state_file + '.tmp'
self.image_array = np.zeros((shape[1], shape[0], 4), dtype=np.uint8)
if self.include_depth:
self.depth_array = np.zeros((shape[1], shape[0]), dtype=np.float32)
self._load_state()
self._add_image_lock = threading.Lock()
self._last_save_time = time.time()
self._save_state_in_progress = threading.Event()
self._save_state_in_progress.set()
self._num_states_processed = 0
self._start_time = time.time()
def _load_state(self):
if not os.path.exists(self.state_file):
return
with np.load(self.state_file, allow_pickle=True) as f:
if self.include_depth:
self.depth_array = f['depth']
self.image_array = f['image']
self._processed = set(f['processed'].ravel()[0])
def _save_state(self, save_image=False):
with self._add_image_lock:
processed = set(self._processed)
with open(self.temp_state_file, 'wb') as f:
save_arrays = {
'image': self.image_array,
'processed': processed,
}
if self.include_depth:
save_arrays['depth'] = self.depth_array
np.savez_compressed(f, **save_arrays)
os.replace(self.temp_state_file, self.state_file)
if save_image:
self._save_image()
def _save_state_async(self, save_image=False):
print('Starting checkpointing')
def func():
try:
self._save_state()
print('Done checkpointing')
finally:
self._save_state_in_progress.set()
threading.Thread(target=func, daemon=True).start()
def _save_image(self):
im = PIL.Image.fromarray(self.image_array)
im.save(self.output)
def _add_image(self, params, screenshot):
with self._add_image_lock:
tile_image = screenshot.image_pixels
tile_selector = np.s_[params['y_offset']:params['y_offset'] + params['tile_height'],
params['x_offset']:params['x_offset'] + params['tile_width']]
if self.include_depth:
tile_depth = screenshot.depth_array
depth_array_part = self.depth_array[tile_selector]
mask = np.logical_and(np.logical_or(tile_depth != 0, depth_array_part == 0),
tile_depth >= depth_array_part)
depth_array_part[mask] = tile_depth[mask]
else:
mask = Ellipsis
self.image_array[tile_selector][mask] = tile_image[mask]
self._processed.add(self._get_description(params))
self._num_states_processed += 1
elapsed = time.time() - self._start_time
print('%4d tiles rendered in %5d seconds: %.1f seconds/tile' %
(self._num_states_processed, elapsed, elapsed / self._num_states_processed))
def _maybe_save_state(self):
if not self._save_state_in_progress.is_set(): return
with self._add_image_lock:
if self._last_save_time + self.checkpoint_interval < time.time():
self._last_save_time = time.time()
self._save_state_in_progress.clear()
self._save_state_async(save_image=False)
def _get_description(self, params):
segment_shard = params.get('segment_shard')
if segment_shard is not None:
prefix = 'segment_shard=%d ' % (segment_shard, )
else:
prefix = ''
return '%stile_x=%d tile_y=%d' % (prefix, params['tile_x'], params['tile_y'])
def _make_capture_request(self, params, state):
description = self._get_description(params)
if description in self._processed: return None
def config_callback(s):
s.viewer_size = (params['tile_width'], params['tile_height'])
self.config_callback(s)
def response_callback(screenshot):
self._add_image(params, screenshot)
self._maybe_save_state()
return CaptureScreenshotRequest(state=state,
description=self._get_description(params),
config_callback=config_callback,
response_callback=response_callback,
include_depth=self.include_depth)
def _get_capture_screenshot_request_iter(self, state_iter):
for params, state in state_iter:
request = self._make_capture_request(params, state)
if request is not None: yield request
def capture(self, viewers, state_iter, refresh_browser_timeout, save_depth, buffer_size):
request_iter = self._get_capture_screenshot_request_iter(state_iter)
threads = []
buffer_lock = threading.Lock()
for viewer, refresh_browser_callback in viewers:
def capture_func(viewer, refresh_browser_callback):
viewer_request_iter = buffered_iterator(base_iter=request_iter,
lock=buffer_lock,
buffer_size=buffer_size)
capture_screenshots(
viewer=viewer,
request_iter=viewer_request_iter,
num_to_prefetch=self.num_to_prefetch,
refresh_browser_timeout=refresh_browser_timeout,
refresh_browser_callback=refresh_browser_callback,
)
t = threading.Thread(target=capture_func, args=(viewer, refresh_browser_callback))
t.start()
threads.append(t)
for t in threads:
t.join()
if not self._save_state_in_progress.is_set():
print('Waiting for previous save state to complete')
self._save_state_in_progress.wait()
if save_depth:
self._save_state()
else:
self._save_image()
if os.path.exists(self.state_file):
os.remove(self.state_file)
def capture_image(viewers, args, state):
def config_callback(s):
s.scale_bar_options.scale_factor = args.scale_bar_scale
segment_shard_size = args.segment_shard_size
tile_parameters = dict(
shape=(args.width, args.height),
tile_shape=(args.tile_width, args.tile_height),
)
if segment_shard_size is not None and _should_shard_segments(state, segment_shard_size):
gen = ShardedTileGenerator(state=state,
segment_shard_size=segment_shard_size,
reverse_bits=args.sort_segments_by_reversed_bits,
**tile_parameters)
num_states = gen.num_tiles
state_iter = gen.get_states()
include_depth = True
else:
gen = TileGenerator(**tile_parameters)
num_states = gen.num_tiles
state_iter = gen.get_tile_states(state)
include_depth = False
capturer = MultiCapturer(
shape=tile_parameters['shape'],
include_depth=include_depth,
output=args.output,
config_callback=config_callback,
num_to_prefetch=args.prefetch,
checkpoint_interval=args.checkpoint_interval,
)
num_output_shards = args.num_output_shards
tiles_per_output_shard = args.tiles_per_output_shard
output_shard = args.output_shard
if (output_shard is None) != (num_output_shards is None and tiles_per_output_shard is None):
raise ValueError(
'--output-shard must be specified in combination with --num-output-shards or --tiles-per-output-shard'
)
if output_shard is not None:
if num_output_shards is not None:
if num_output_shards < 1:
raise ValueError('Invalid --num-output-shards: %d' % (num_output_shards, ))
states_per_shard = -(-num_states // num_output_shards)
else:
if tiles_per_output_shard < 1:
raise ValueError('Invalid --tiles-per-output-shard: %d' % (tiles_per_output_shard, ))
num_output_shards = -(-num_states // tiles_per_output_shard)
states_per_shard = tiles_per_output_shard
if output_shard < 0 or output_shard >= num_output_shards:
raise ValueError('Invalid --output-shard: %d' % (output_shard, ))
print('Total states: %d, Number of output shards: %d' % (num_states, num_output_shards))
state_iter = itertools.islice(state_iter, states_per_shard * output_shard,
states_per_shard * (output_shard + 1))
else:
states_per_shard = num_states
capturer.capture(
viewers=viewers,
state_iter=state_iter,
refresh_browser_timeout=args.refresh_browser_timeout,
save_depth=output_shard is not None,
buffer_size=max(1, states_per_shard // (args.jobs * 4)),
)
def run(args):
neuroglancer.cli.handle_server_arguments(args)
state = args.state
state.selected_layer.visible = False
state.statistics.visible = False
if args.layout is not None:
state.layout = args.layout
if args.show_axis_lines is not None:
state.show_axis_lines = args.show_axis_lines
if args.show_default_annotations is not None:
state.show_default_annotations = args.show_default_annotations
if args.projection_scale_multiplier is not None:
state.projection_scale *= args.projection_scale_multiplier
state.gpu_memory_limit = args.gpu_memory_limit
state.system_memory_limit = args.system_memory_limit
state.concurrent_downloads = args.concurrent_downloads
if args.no_webdriver:
viewers = [neuroglancer.Viewer() for _ in range(args.jobs)]
print('Open the following URLs to begin rendering')
for viewer in viewers:
print(viewer)
def refresh_browser_callback():
print('Browser unresponsive, consider reloading')
capture_image([(viewer, refresh_browser_callback) for viewer in viewers], args, state)
else:
def _make_webdriver():
webdriver = neuroglancer.webdriver.Webdriver(
headless=args.headless,
docker=args.docker_chromedriver,
debug=args.debug_chromedriver,
)
def refresh_browser_callback():
print('Browser unresponsive, reloading')
webdriver.reload_browser()
return webdriver, refresh_browser_callback
webdrivers = [_make_webdriver() for _ in range(args.jobs)]
try:
capture_image([(webdriver.viewer, refresh_browser_callback)
for webdriver, refresh_browser_callback in webdrivers], args, state)
finally:
for webdriver, _ in webdrivers:
try:
webdriver.__exit__()
except:
pass
def main(args=None):
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
neuroglancer.cli.add_state_arguments(ap, required=True)
ap.add_argument('--segment-shard-size',
type=int,
help='Maximum number of segments to render simultaneously. '
'If the number of selected segments exceeds this number, '
'multiple passes will be used (transparency not supported).')
ap.add_argument('--sort-segments-by-reversed-bits',
action='store_true',
help='When --segment-shard-size is also specified, normally segment ids are ordered numerically before being partitioned into shards. If segment ids are spatially correlated, then this can lead to slower and more memory-intensive rendering. If --sort-segments-by-reversed-bits is specified, segment ids are instead ordered by their bit reversed values, which may avoid the spatial correlation.')
ap.add_argument('output', help='Output path of screenshot file in PNG format.')
ap.add_argument('--prefetch', type=int, default=1, help='Number of states to prefetch.')
ap.add_argument('--output-shard', type=int, help='Output shard to write.')
output_shard_group = ap.add_mutually_exclusive_group(required=False)
output_shard_group.add_argument('--num-output-shards',
type=int,
help='Number of output shards.')
output_shard_group.add_argument('--tiles-per-output-shard',
type=int,
help='Number of tiles per output shard.')
ap.add_argument('--checkpoint-interval',
type=float,
default=60,
help='Interval in seconds at which to save checkpoints.')
ap.add_argument(
'--refresh-browser-timeout',
type=int,
default=60,
help=
'Number of seconds without receiving statistics while capturing a screenshot before browser is considered unresponsive.'
)
ap.add_argument('--width', type=int, default=3840, help='Width in pixels of screenshot.')
ap.add_argument('--height', type=int, default=2160, help='Height in pixels of screenshot.')
ap.add_argument(
'--tile-width',
type=int,
default=4096,
help=
'Width in pixels of single tile. If total width is larger, the screenshot will be captured as multiple tiles.'
)
ap.add_argument(
'--tile-height',
type=int,
default=4096,
help=
'Height in pixels of single tile. If total height is larger, the screenshot will be captured as multiple tiles.'
)
ap.add_argument('--no-webdriver',
action='store_true',
help='Do not open browser automatically via webdriver.')
ap.add_argument('--no-headless',
dest='headless',
action='store_false',
help='Use non-headless webdriver.')
ap.add_argument(
'--docker-chromedriver',
action='store_true',
help='Run Chromedriver with options suitable for running inside docker')
ap.add_argument(
'--debug-chromedriver',
action='store_true',
help='Enable debug logging in Chromedriver')
ap.add_argument('--hide-axis-lines',
dest='show_axis_lines',
action='store_false',
help='Override showAxisLines setting in state.')
ap.add_argument('--hide-default-annotations',
action='store_false',
dest='show_default_annotations',
help='Override showDefaultAnnotations setting in state.')
ap.add_argument('--projection-scale-multiplier',
type=float,
help='Multiply projection view scale by specified factor.')
ap.add_argument('--system-memory-limit',
type=int,
default=3 * 1024 * 1024 * 1024,
help='System memory limit')
ap.add_argument('--gpu-memory-limit',
type=int,
default=3 * 1024 * 1024 * 1024,
help='GPU memory limit')
ap.add_argument('--jobs',
'-j',
type=int,
default=1,
help='Number of browsers to use concurrently. '
'This may improve performance at the cost of greater memory usage. '
'On a 64GiB 16 hyperthread machine, --jobs=6 works well.')
ap.add_argument('--concurrent-downloads', type=int, default=32, help='Concurrent downloads')
ap.add_argument('--layout', type=str, help='Override layout setting in state.')
ap.add_argument('--scale-bar-scale', type=float, help='Scale factor for scale bar', default=1)
run(ap.parse_args(args))
if __name__ == '__main__':
main()
|
_queue_writer.py
|
import multiprocessing
import queue
import threading
from typing import Generic, Optional, Tuple
import torch
from pytorch_pfn_extras.writing._writer_base import (
Writer, _TargetType, _SaveFun, _TaskFun, _Worker, _FileSystem,
)
from pytorch_pfn_extras.writing._simple_writer import SimpleWriter
_QueUnit = Optional[Tuple[
_TaskFun, str, str, _TargetType, Optional[_SaveFun], bool]]
class QueueWriter(Writer, Generic[_Worker]):
"""Base class of queue snapshot writers.
This class is a base class of snapshot writers that use a queue.
A Queue is created when this class is constructed, and every time when
``__call__`` is invoked, a snapshot task is put into the queue.
Args:
savefun: Callable object which is passed to the :meth:`create_task`
if the task is ``None``. It takes three arguments: the output file
path, the serialized dictionary object, and the optional keyword
arguments.
fs: FileSystem abstracting interface to implement all the operations.
optional, defaults to None
out_dir: str. Specifies the directory this writer will use.
It takes precedence over the one specified in `__call__`
optional, defaults to None
task: Callable object. Its ``__call__`` must have a same interface to
``Writer.__call__``. This object is directly put into the queue.
.. seealso::
- :meth:`pytorch_pfn_extras.training.extensions.snapshot`
"""
def __init__(
self,
savefun: _SaveFun = torch.save,
fs: _FileSystem = None,
out_dir: Optional[str] = None,
task: Optional[_TaskFun] = None,
) -> None:
super().__init__(fs=fs, out_dir=out_dir)
self._started = False
self._finalized = False
if task is None:
self._task = self.create_task(savefun)
else:
self._task = task
self._queue = self.create_queue()
self._consumer: _Worker = self.create_consumer(self._queue)
self._consumer.start()
self._started = True
def __call__(
self,
filename: str,
out_dir: str,
target: _TargetType,
*,
savefun: Optional[_SaveFun] = None,
append: bool = False
) -> None:
assert not self._finalized
self._queue.put(
(self._task, filename, out_dir, target, savefun, append))
def create_task(self, savefun: _SaveFun) -> _TaskFun:
return SimpleWriter(savefun=savefun)
def create_queue(self) -> 'queue.Queue[_QueUnit]':
raise NotImplementedError
def create_consumer(self, q: 'queue.Queue[_QueUnit]') -> _Worker:
raise NotImplementedError
def consume(self, q: 'queue.Queue[_QueUnit]') -> None:
while True:
task = q.get()
if task is None:
q.task_done()
return
else:
task[0](
task[1], task[2], task[3], savefun=task[4], append=task[5])
q.task_done()
def finalize(self) -> None:
if self._started:
if not self._finalized:
self._queue.put(None)
self._queue.join()
self._consumer.join()
self._started = False
self._finalized = True
class ThreadQueueWriter(QueueWriter[threading.Thread]):
"""Snapshot writer that uses a thread queue.
This class creates a thread and a queue by :mod:`threading` and
:mod:`queue` modules
respectively. The thread will be a consumer of the queue, and the main
thread will be a producer of the queue.
.. seealso::
- :meth:`pytorch_pfn_extras.training.extensions.snapshot`
"""
def __init__(
self,
savefun: _SaveFun = torch.save,
fs: _FileSystem = None,
out_dir: Optional[str] = None,
task: Optional[_TaskFun] = None
) -> None:
super().__init__(savefun=savefun, fs=fs, task=task, out_dir=out_dir)
def create_queue(self) -> 'queue.Queue[_QueUnit]':
return queue.Queue()
def create_consumer(self, q: 'queue.Queue[_QueUnit]') -> threading.Thread:
return threading.Thread(target=self.consume, args=(q,))
class ProcessQueueWriter(QueueWriter[multiprocessing.Process]):
"""Snapshot writer that uses process queue.
This class creates a process and a queue by :mod:`multiprocessing` module.
The process will be a consumer of this queue, and the main process will be
a producer of this queue.
.. note::
Forking a new process from MPI process might be danger. Consider using
:class:`ThreadQueueWriter` instead of ``ProcessQueueWriter`` if you are
using MPI.
.. seealso::
- :meth:`pytorch_pfn_extras.training.extensions.snapshot`
"""
def __init__(
self,
savefun: _SaveFun = torch.save,
fs: _FileSystem = None,
out_dir: Optional[str] = None,
task: Optional[_TaskFun] = None
) -> None:
super().__init__(savefun=savefun, fs=fs, out_dir=out_dir, task=task)
def create_queue(self) -> 'queue.Queue[_QueUnit]':
return multiprocessing.JoinableQueue()
def create_consumer(self, q: 'queue.Queue[_QueUnit]') -> multiprocessing.Process:
return multiprocessing.Process(target=self.consume, args=(q,))
|
server.py
|
import socket
import threading
from time import sleep
import colorgb
class Server:
"""A class that implements the server side.
-----------
Parameters :
- ip: :class:`localhost/127.0.0.1` | you cannot change server ip because server ip always use localhost.
- port: :class:`int` | The server port.
- record_conversation: :class:`False/True` | Record all client conversation in your server (if `True`, client will get warning message that your server is record all client conversation).
"""
def __init__(self, port:int, record_conversation=False):
host = '127.0.0.1'
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((host, port))
server.listen()
print()
print("Server Information :")
print(f"Status : {colorgb.fore('Online', 'lgreen')}")
print(f"IP : {host} (use your network IP address for client connect to your server)")
print(f"PORT : {port}")
if record_conversation == True:
rc = colorgb.fore(True, 'lred')
else:
rc = colorgb.fore(False, 'lgreen')
print(f"Record Conversation : {rc}")
print()
self.print_log = record_conversation
self.server = server
self.welcome_msg = None
def set_welcome_message(self, message:str):
"""
Set welcome message when user join (Can only be seen by the user).
-----
Parameter :
- message: `str`
"""
message = f"\n{message}\n"
self.welcome_msg = message
print(f"Welcome message :")
print(message)
print()
def start(self):
"""Nothing, just function to start the server.
-----------
"""
clients = []
nicknames = []
server = self.server
def broadcast(message):
for client in clients:
client.send(message)
def handle(client):
while True:
try:
message = client.recv(1024)
broadcast(message)
if self.print_log == True:
print(message.decode('UTF-8'))
else:
pass
except:
index = clients.index(client)
clients.remove(client)
client.close()
nickname = nicknames[index]
broadcast(f'{nickname} left!'.encode('UTF-8'))
print(f'{nickname} left')
nicknames.remove(nickname)
break
def receive():
while True:
client, address = server.accept()
print(f"Connected with {str(address)}")
client.send('NICK'.encode('UTF-8'))
nickname = client.recv(1024).decode('UTF-8')
nicknames.append(nickname)
clients.append(client)
print("New Client : {}".format(nickname))
client.send('Connected to server!'.encode('UTF-8'))
broadcast(f"{nickname} joined!".encode('UTF-8'))
if self.welcome_msg == None:
pass
else:
client.send(self.welcome_msg.encode('UTF-8'))
if self.print_log == True:
msg = colorgb.fore("Warning, the server may record your conversation.", "lred")
client.send(f"\n{msg}\n\n".encode('UTF-8'))
else:
pass
thread = threading.Thread(target=handle, args=(client,))
thread.start()
receive()
|
scheduler.py
|
import time
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime, timedelta
from multiprocessing import Queue, Process
from threading import Thread
import schedule
from scylla.config import get_config
from scylla.database import ProxyIP
from scylla.jobs import validate_proxy_ip
from scylla.loggings import logger
from scylla.providers import *
from scylla.worker import Worker
FEED_FROM_DB_INTERVAL_MINUTES = 30
def fetch_ips(q: Queue, validator_queue: Queue):
logger.debug('fetch_ips...')
worker = Worker()
while True:
try:
provider: BaseProvider = q.get()
provider_name = provider.__class__.__name__
logger.debug('Get a provider from the provider queue: ' + provider_name)
for url in provider.urls():
try:
html = worker.get_html(url, render_js=provider.should_render_js())
except Exception as e:
logger.error("worker.get_html failed: ", e)
continue
if html:
proxies = provider.parse(html)
for p in proxies:
validator_queue.put(p)
# logger.debug('Put new proxy ip into queue: {}'.format(p.__str__()))
logger.info(
' {}: feed {} potential proxies into the validator queue'.format(provider_name, len(proxies))
)
except (KeyboardInterrupt, InterruptedError, SystemExit):
worker.stop()
logger.info('worker_process exited.')
break
def validate_ips(validator_queue: Queue, validator_pool: ThreadPoolExecutor):
while True:
try:
proxy: ProxyIP = validator_queue.get()
validator_pool.submit(validate_proxy_ip, p=proxy)
except (KeyboardInterrupt, SystemExit):
break
def cron_schedule(scheduler, only_once=False):
"""
:param scheduler: the Scheduler instance
:param only_once: flag for testing
"""
def feed():
scheduler.feed_providers()
def feed_from_db():
# TODO: better query (order by attempts)
proxies = ProxyIP.select().where(ProxyIP.updated_at > datetime.now() - timedelta(days=14))
for p in proxies:
scheduler.validator_queue.put(p)
logger.debug('Feed {} proxies from the database for a second time validation'.format(len(proxies)))
# feed providers at the very beginning
scheduler.feed_providers()
schedule.every(10).minutes.do(feed)
schedule.every(FEED_FROM_DB_INTERVAL_MINUTES).minutes.do(feed_from_db)
logger.info('Start python scheduler')
flag = True
# After 1 minute, try feed_from_db() for the first time
wait_time_for_feed_from_db = 1 if only_once else 60
time.sleep(wait_time_for_feed_from_db)
feed_from_db()
while flag:
try:
schedule.run_pending()
if only_once:
flag = False
else:
time.sleep(60)
except (KeyboardInterrupt, InterruptedError):
logger.info('Stopping python scheduler')
break
class Scheduler(object):
def __init__(self):
self.worker_queue = Queue()
self.validator_queue = Queue()
self.worker_process = None
self.validator_thread = None
self.cron_thread = None
self.validator_pool = ThreadPoolExecutor(max_workers=int(get_config('validation_pool', default='31')))
def start(self):
"""
Start the scheduler with processes for worker (fetching candidate proxies from different providers),
and validator threads for checking whether the fetched proxies are able to use.
"""
logger.info('Scheduler starts...')
self.cron_thread = Thread(target=cron_schedule, args=(self,), daemon=True)
self.worker_process = Process(target=fetch_ips, args=(self.worker_queue, self.validator_queue))
self.validator_thread = Thread(target=validate_ips, args=(self.validator_queue, self.validator_pool))
self.cron_thread.daemon = True
self.worker_process.daemon = True
self.validator_thread.daemon = True
self.cron_thread.start()
self.worker_process.start() # Python will wait for all process finished
logger.info('worker_process started')
self.validator_thread.start()
logger.info('validator_thread started')
def join(self):
"""
Wait for worker processes and validator threads
"""
while (self.worker_process and self.worker_process.is_alive()) or (
self.validator_thread and self.validator_thread.is_alive()):
try:
self.worker_process.join()
self.validator_thread.join()
except (KeyboardInterrupt, SystemExit):
break
def feed_providers(self):
logger.debug('feed {} providers...'.format(len(all_providers)))
for provider in all_providers:
self.worker_queue.put(provider())
def stop(self):
self.worker_queue.close()
self.worker_process.terminate()
# self.validator_thread.terminate() # TODO: 'terminate' the thread using a flag
self.validator_pool.shutdown(wait=False)
|
phone_util.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Utilities for phone embedding network."""
import csv
import json
import os.path
import queue
import sys
import threading
from typing import Any, Callable, Dict, Iterable, List, TypeVar, Tuple
from absl import flags
import numpy as np
from extras.python import frontend
FLAGS = flags.FLAGS
KT = TypeVar('KT')
VT_in = TypeVar('VT_in') # pylint:disable=invalid-name
VT_out = TypeVar('VT_out') # pylint:disable=invalid-name
def _map_dict_values(fun: Callable[[VT_in], VT_out],
d: Dict[KT, VT_in]) -> Dict[KT, VT_out]:
return {k: fun(v) for k, v in d.items()}
def get_main_module_flags_dict() -> Dict[str, Any]:
"""Gets dict of flags that were defined in the main module."""
return{f.name: f.value for f in FLAGS.flags_by_module_dict()[sys.argv[0]]}
def get_phone_label_filename(wav_file: str) -> str:
"""Gets the phone label filename associated with `wav_file`."""
return os.path.splitext(wav_file)[0] + '.phn'
def get_phone_times(phn_file: Any) -> List[Tuple[int, int, str]]:
"""Gets endpoint times for each phone in a recording.
Reads phone endpoint times from .phn file. The .phn file has a simple text
format as used in TIMIT. Each row gives start and end sample indices and label
for one phone, '<start> <end> <label>'.
Args:
phn_file: String or file-like object.
Returns:
List of 3-tuples (start, end, label) where `start` and `end` are sample
indices where the phone is active, and `label` is a phone string.
"""
def _read(f):
"""Read .phn CSV data from file object `f`."""
try:
reader = csv.reader(f, delimiter=' ', quotechar='"')
results = []
for row in reader:
if len(row) != 3:
continue
start, end, label = row
results.append((int(start), int(end), label))
return results
except (IOError, UnicodeDecodeError, TypeError) as e:
# If reading fails, reraise with the filename for more context.
name = getattr(f, 'name', '(no name)')
raise IOError(f'Error reading .phn file {name}: {e}')
if isinstance(phn_file, str):
with open(phn_file, 'rt') as f:
return _read(f)
else:
return _read(phn_file)
def run_frontend(carl: frontend.CarlFrontend,
audio_samples: np.ndarray) -> np.ndarray:
"""Reset and run CarlFrontend on audio samples.
Convenience function to reset and run a CarlFrontend on some audio samples,
zero-padding as necessary to get a whole number of blocks.
Args:
carl: CarlFrontend.
audio_samples: 1D array of audio samples of any length. [It is zero padded
if necessary to make a whole number of blocks.]
Returns:
2D array of shape [num_frames, frontend.block_size] of frames.
"""
carl.reset()
audio_samples = np.asarray(audio_samples, dtype=np.float32)
# Zero pad to a whole number of blocks.
padding = (-len(audio_samples)) % carl.block_size
audio_samples = np.append(audio_samples, np.zeros(padding, np.float32))
return carl.process_samples(audio_samples)
ItemT = TypeVar('ItemT')
def run_in_parallel(items: Iterable[ItemT],
num_threads: int,
fun: Callable[[ItemT], Any]) -> None:
"""Run tasks concurrently on multiple threads.
This function conceptually runs the for loop
for item in items:
fun(item)
with multiple threads. Note that `fun` must release the GIL to actually get
performance benefits [https://wiki.python.org/moin/GlobalInterpreterLock].
Args:
items: Iterable.
num_threads: Integer, number of worker threads.
fun: Function taking one item as its input.
"""
stop_worker = object()
# This implementation follows the example in the Queue documentation:
# https://docs.python.org/3/library/queue.html#queue.Queue.join
def _worker():
"""One worker thread."""
while True:
item = q.get()
if item is stop_worker:
break
fun(item)
q.task_done()
q = queue.Queue()
threads = []
for _ in range(num_threads):
t = threading.Thread(target=_worker)
t.start()
threads.append(t)
for item in items:
q.put(item)
q.join() # Block until all tasks are done.
for _ in range(num_threads): # Stop workers.
q.put(stop_worker)
for t in threads:
t.join()
def balance_weights(example_counts: Dict[str, int],
fg_classes: Iterable[str],
fg_balance_exponent: float,
bg_fraction: float,
bg_balance_exponent: float) -> Dict[str, float]:
"""Computes weights to partially normalize for class imbalance.
Compute balancing weights from example counts. The weights partially normalize
for class imbalance, keeping some bias in favor of more frequent classes, like
L. S. Yaeger, B. J. Webb, R. F. Lyon. "Combining neural networks and
context-driven search for online, printed handwriting recognition in the
Newton." AI Magazine 19.1 (1998): 73-73.
http://dicklyon.com/tech/Mondello/AIMag-Lyon.pdf
Args:
example_counts: Dict, where `examples_counts[label]` is the number of
available examples for class `label`.
fg_classes: List of strings, class labels that are in the "foreground".
Classes not in this list are "background".
fg_balance_exponent: Float, an exponent between 0.0 and 1.0 for balancing
foreground classes. A value of 1.0 implies full normalization.
bg_fraction: Float between 0.0 and 1.0, the fraction of the balanced dataset
to devote to background classes.
bg_balance_exponent: Float, balancing exponent for background classes.
Returns:
weights dict, where `weights[label]` is a value between 0.0 and 1.0, the
fraction of examples of class `label` that should be retained.
"""
# Split phones to "foreground" classes of interest and "background" classes.
fg_classes = list(fg_classes)
bg_classes = list(set(example_counts.keys()) - set(fg_classes))
fg_counts = np.array([example_counts[k] for k in fg_classes])
bg_counts = np.array([example_counts[k] for k in bg_classes])
fg_weights = np.maximum(0, fg_counts)**-fg_balance_exponent
bg_weights = np.maximum(0, bg_counts)**-bg_balance_exponent
bg_total = fg_weights.dot(fg_counts) * (bg_fraction / (1.0 - bg_fraction))
# Normalize bg_weights such that background examples add up to bg_total.
bg_weights *= bg_total / bg_weights.dot(bg_counts)
weights = np.concatenate((fg_weights, bg_weights))
weights /= weights.max() # Rescale max weight to 1.0.
return dict(zip(fg_classes + bg_classes, weights))
class Dataset:
"""Dataset for phone embedding model."""
def __init__(self,
examples: Dict[str, np.ndarray],
metadata: Dict[str, Any]):
"""Constructor.
Args:
examples: Dict, where `examples[phone]` is a 3D array of examples with
label `phone` of shape (num_examples, num_frames, num_channels).
metadata: Dict.
"""
self.examples = examples
self.metadata = metadata
self._validate()
self._drop_empty_classes()
def _validate(self) -> None:
"""Validates example array shapes."""
for k, v in self.examples.items():
if not isinstance(v, np.ndarray):
raise TypeError(f'"{k}": expected numpy array, {type(v)} found')
elif v.shape[1:] != (self.num_frames, self.num_channels):
raise ValueError(f'"{k}": shape {v.shape} mismatches expected shape '
f'Nx{self.num_frames}x{self.num_channels}')
elif not np.all(np.isfinite(v)):
raise ValueError(f'"{k}": array has nonfinite value')
def _drop_empty_classes(self) -> None:
"""Drop empty classes from `self.examples`."""
self.examples = {k: v for k, v in self.examples.items() if len(v)}
@property
def num_frames(self) -> int:
return int(self.metadata['num_frames_left_context'] + 1)
@property
def num_channels(self) -> int:
return int(self.metadata['num_channels'])
@property
def example_counts(self) -> Dict[str, int]:
return _map_dict_values(len, self.examples)
def get_xy_arrays(self,
phones: Iterable[str],
shuffle: bool = False) -> Tuple[np.ndarray, np.ndarray]:
"""Gets dataset as a pair of arrays x and y, as in sklearn or tf.estimator.
This function converts `example_dict` to two numpy arrays x and y, of
observations and labels, as used in sklearn and tf.estimator. The x array
is created by concatenating `example_dict[phones[i]]` along the first axis
while the y is a 1D array of the same length of corresponding label indices,
x = concatenate([example_dict[phones[0]], example_dict[phones[1]], ...])
y = [0, 0, 0, 0, ... 0, 1, 1, 1, 1, ... 1, ...]
Args:
phones: List of phone labels. This determines which phones are included
in the output and the enumeration of label indices in y.
shuffle: Bool. If true, the output is shuffled.
Returns:
(x, y) 2-tuple of numpy arrays.
"""
x, y = [], []
for i, phone in enumerate(phones):
if phone in self.examples:
x.append(self.examples[phone].astype(np.float32))
y.append(np.full(len(self.examples[phone]), i, dtype=np.int32))
x = np.concatenate(x)
y = np.concatenate(y)
if shuffle:
i = np.random.permutation(len(x))
x, y = x[i], y[i]
return x, y
def subsample(self, fraction: Dict[str, float]) -> None:
"""Subsamples examples according to `fraction`.
This function randomly subsamples `examples[phone]` according to
`fraction[phone]`. For instance if weights = {'ae': 0.6, 'sil': 0.1}, then
the subsampling retains 60% of 'ae' examples and 10% of 'sil' examples. This
is useful to compensate for class imbalance.
If `examples[phone]` becomes empty after subsampling, it is deleted from the
dict. If a particular phone is not in `fraction`, zero is assumed and
`examples[phone]` is deleted.
Args:
fraction: Dict. `fraction[phone]` is a value between 0.0 and 1.0
specifying the fraction of examples to keep for label `phone`.
Raises:
ValueError: If fraction is invalid.
"""
for phone in self.examples:
fraction_phone = fraction.get(phone, 0.0)
if not 0.0 <= fraction_phone <= 1.0:
raise ValueError(f'fraction["{phone}"] = {fraction_phone} is not '
'between 0.0 and 1.0')
count = len(self.examples[phone])
subsampled_count = int(round(fraction_phone * count))
i = np.random.permutation(count)[:subsampled_count]
self.examples[phone] = self.examples[phone][i]
self._drop_empty_classes()
def split(self, fraction: float) -> Tuple['Dataset', 'Dataset']:
"""Split and return two Datasets.
Args:
fraction: Float, a fraction between 0.0 and 1.0.
Returns:
2-tuple of two Datasets. The first has a random sampling of `fraction` of
the examples for each class, and the second has the other examples.
"""
if not 0.0 <= fraction <= 1.0:
raise ValueError(f'fraction = {fraction} is not between 0.0 and 1.0')
examples_a = {}
examples_b = {}
for phone in self.examples:
count = len(self.examples[phone])
split_count = int(round(fraction * count))
i = np.random.permutation(count)
examples_a[phone] = self.examples[phone][i[:split_count]]
examples_b[phone] = self.examples[phone][i[split_count:]]
return (Dataset(examples_a, self.metadata),
Dataset(examples_b, self.metadata))
def write_npz(self, npz_file: Any) -> None:
"""Writes Dataset to .npz file.
Args:
npz_file: String or file-like object.
"""
self._validate()
self._drop_empty_classes()
contents = _map_dict_values(lambda v: v.astype(np.float32), self.examples)
contents['dataset_metadata'] = json.dumps(self.metadata).encode('utf8')
np.savez(npz_file, **contents)
def read_dataset_npz(npz_file: Any) -> Dataset:
"""Reads Dataset from .npz file.
Args:
npz_file: String or writeable file-like object.
Returns:
Dataset.
"""
contents = np.load(npz_file)
if 'dataset_metadata' not in contents.files:
raise ValueError('dataset_metadata missing from NPZ file')
metadata = json.loads(contents['dataset_metadata'].item().decode('utf8'))
examples = {
k: v.astype(np.float32)
for k, v in contents.items()
if k != 'dataset_metadata'
}
return Dataset(examples, metadata)
|
randomHotBaronClick.py
|
from random import choice
from time import sleep
from win32api import keybd_event, GetAsyncKeyState
from win32con import KEYEVENTF_KEYUP
from tkinter import StringVar, Button,Label,Entry,Tk,DoubleVar
from queue import Queue
from threading import Thread
from os import path
import json
root = Tk()
FileGUI=StringVar()
timeBetweenPresses=DoubleVar()
timeBetweenPresses.set(.01)
keys=StringVar()
print(path.exists("config.json"))
if path.exists("config.json"):
with open("config.json") as f:
data = json.load(f)
print(data)
print(data["default"])
keys.set(data["default"])
else:
keys.set("55555566667789")
row=0
keysLB=Label(root, text="Key Weights")
timeLB=Label(root, text="Delay After Click")
ButtonMasherLB=Label(root, text="Delay After Click")
root.title("Madhatter's Button Masher")
current = set()
pressKey=False
lastUsed=keys.get()
class controller:
def __init__(self):
self.keyControlq=Queue()
self.stopQueue=Queue()
self.prevLeftClick=False
self.prevRightClick=False
self.startLiseners()
def selectRandomKey(self):
global lastUsed
key=choice(keys.get())
lastUsed=keys.get()
sleep(timeBetweenPresses.get())
keybd_event(VK_CODE[key],0,0,0)
sleep(0.01)
keybd_event(VK_CODE[key],0 ,KEYEVENTF_KEYUP ,0)
def startLiseners(self):
self.hotkeys=Thread(target=self.hotkeyListener)
self.hotkeys.start()
self.click=Thread(target=self.clickListener)
self.click.start()
def clickListener(self):
while self.stopQueue.empty():
sleep(0.01)
if not self.keyControlq.empty():
leftClickState=GetAsyncKeyState(VK_CODE["leftClick"])
rightClickState=GetAsyncKeyState(VK_CODE["rightClick"])
if self.prevRightClick and not rightClickState:
print("Right Click Released")
self.selectRandomKey()
if self.prevLeftClick and not leftClickState:
print("Right Click Released")
self.selectRandomKey()
self.prevLeftClick=leftClickState
self.prevRightClick = rightClickState
print("click listener stopped")
def hotkeyListener(self):
depressed=False
while self.stopQueue.empty():
sleep(0.01)
shift=GetAsyncKeyState(VK_CODE['shift'])
r=GetAsyncKeyState(VK_CODE['r'])
keyCombo=shift and r
if keyCombo:
if not depressed:
print("hotkey Toggle")
self.toggle()
depressed=True
shift=GetAsyncKeyState(VK_CODE['shift'])
lastUsed=keys.get()
elif depressed:
depressed=False
print("key listener stopped")
def toggle(self):
if self.keyControlq.empty():
self.keyControlq.put("toggleKeyPressing")
timeEntry.config({"background": "Green"})
else:
timeEntry.config({"background": "White"})
with self.keyControlq.mutex:
self.keyControlq.queue.clear()
def close(self):
self.stopQueue.put("stop")
ctr=controller()
VK_CODE = {'leftClick':0x01,
'rightClick':0x02,
'backspace':0x08,
'shift':0x10,
'0':0x30,
'1':0x31,
'2':0x32,
'3':0x33,
'4':0x34,
'5':0x35,
'6':0x36,
'7':0x37,
'8':0x38,
'9':0x39,
'a':0x41,
'b':0x42,
'c':0x43,
'd':0x44,
'e':0x45,
'f':0x46,
'g':0x47,
'h':0x48,
'i':0x49,
'j':0x4A,
'k':0x4B,
'l':0x4C,
'm':0x4D,
'n':0x4E,
'o':0x4F,
'p':0x50,
'q':0x51,
'r':0x52,
's':0x53,
't':0x54,
'u':0x55,
'v':0x56,
'w':0x57,
'x':0x58,
'y':0x59,
'z':0x5A,
'+':0xBB,
',':0xBC,
'-':0xBD,
'.':0xBE,
'/':0xBF,
'`':0xC0,
';':0xBA,
'[':0xDB,
'\\':0xDC,
']':0xDD,
"'":0xDE,
'`':0xC0}
keysEntry = Entry(root,textvariable=keys)
timeEntry = Entry(root,textvariable=timeBetweenPresses)
keysLB.grid(row=row,column=0)
keysEntry.grid(row=row,column=1)
row+=1
timeLB.grid(row=row,column=0)
timeEntry.grid(row=row,column=1)
row+=1
startStop=Button(root,text="Start/Stop (shift+r)",command=ctr.toggle)
startStop.grid(row=row,column=1)
root.mainloop()
ctr.close()
save_dict={"default":lastUsed}
print(save_dict)
with open("config.json", 'w') as json_file:
json.dump(save_dict, json_file)
|
main.py
|
import logging
from datetime import datetime
from functools import partial
from multiprocessing import Process, Queue
from time import time
from typing import List, Tuple
from asciimatics.effects import Print
from asciimatics.event import Event, KeyboardEvent
from asciimatics.exceptions import ResizeScreenError, StopApplication
from asciimatics.scene import Scene
from asciimatics.screen import Screen
from skunkbooth.data.defaults import LOG_FILE, PIC_DIR
from skunkbooth.utils.asciiGen import Blocks
from skunkbooth.utils.filterManager import filterManager
from skunkbooth.utils.frames import (
FilterFrame, GalleryFrame, ImageSelectionModel, MainFrame, PreviewFrame
)
from skunkbooth.utils.videoManager import videoManager
from skunkbooth.utils.webcam import Webcam
# Initialize logger
logging.basicConfig(
filename=LOG_FILE,
filemode="w",
level=logging.INFO,
format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s'
)
def global_shortcuts(event: Event) -> None:
"""Event handler for global shortcuts"""
ctrlQCode = Screen.ctrl('q')
ctrlWCode = Screen.ctrl('w')
if isinstance(event, KeyboardEvent):
c = event.key_code
# Stop on q, esc, ctrl+q and ctrl+w
if c in (Screen.KEY_ESCAPE, ord('q'), ctrlQCode, ctrlWCode):
raise StopApplication("User pressed quit")
def main() -> None:
"""Main driver function"""
# Video saving
vidBuf = Queue(32767)
vid = Process(target=videoManager, args=[vidBuf])
vid.start()
def toggleFlag(flag: List[int]) -> None:
"""Temp function for toggling video recording from inside screen"""
flag[0] = not flag[0]
# re-initialize VideoIO for new file name
if flag[0]:
VID_FILE = f"{PIC_DIR}/Video-{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.avi"
logging.info(f"Recording new video - {VID_FILE}")
vidBuf.put(VID_FILE)
else:
logging.info("Recording stopped.")
TOP_MARGIN = 4
image_selection = ImageSelectionModel()
record = [True]
toggleRecord = partial(toggleFlag, record)
screen = Screen.open(unicode_aware=True)
logging.info(
"Screen initialized Height:{} Width:{}".format(screen.height-8, screen.width)
)
# last_scene = None
filters = filterManager()
converter = Blocks(screen.height, screen.width, uni=True, fill_background=True)
def CamDimensions(height: int, width: int) -> Tuple[int, int, int]:
"""Calculate dimensions for vertical squeeze screen sizes"""
if width / height >= 4:
height -= 8
var_dim = int(height * 4) # Max width is around twice height in most cases
offset = int(width / 2 - var_dim / 2.5 - width / 5)
return (height, var_dim, offset)
# Add margins of 1/6x,y if no vertical squeeze
height = int(height * 2 / 3)
width = int(width * 2 / 3)
return (height, width, 2)
(webcam_height, webcam_width, offset) = CamDimensions(screen.height, screen.width)
logging.info(
"Webcam Height:{} Webcam Width:{} Offset:{}".format(
webcam_height, webcam_width, offset
)
)
webcam = Webcam(converter, filters, webcam_height, webcam_width)
effects = []
camera_effect = Print(screen, webcam, y=TOP_MARGIN - 1, x=int(
screen.width / 6) + offset, transparent=False)
effects.append(MainFrame(screen, webcam, toggleRecord, camera_effect))
fFrame = FilterFrame(screen, filters)
scenes = [
Scene(effects, -1, name="Main"),
Scene([GalleryFrame(screen, model=image_selection)], -1, name="Gallery"),
Scene([fFrame], -1, name="Filters"),
Scene([PreviewFrame(screen, model=image_selection)], -1, name="Preview")
]
screen.set_scenes(scenes, unhandled_input=global_shortcuts)
b = a = 0
frame = 1/40
while True:
try:
if screen.has_resized():
screen.close()
screen = Screen.open(unicode_aware=True)
effects = []
(webcam_height, webcam_width, offset) = CamDimensions(
screen.height, screen.width
)
webcam.resize(webcam_height, webcam_width)
converter.resize(screen.height, screen.width)
camera_effect = Print(screen, webcam, y=TOP_MARGIN - 1, x=int(
screen.width / 6) + offset)
record = [True]
effects.append(MainFrame(screen, webcam, partial(toggleFlag, record), camera_effect))
fNext = FilterFrame(screen, filters, data=fFrame._data)
fFrame = fNext
scenes = [
Scene(effects, -1, name="Main"),
Scene([GalleryFrame(screen, model=image_selection)], -1, name="Gallery"),
Scene([fFrame], -1, name="Filters"),
Scene([PreviewFrame(screen, model=image_selection)], -1, name="Preview")
]
screen.set_scenes(scenes, unhandled_input=global_shortcuts)
screen.draw_next_frame()
if webcam.image is not None and record[0]:
vidBuf.put(webcam.image)
b = time()
if b - a < frame:
screen.wait_for_input(a - b + frame)
else:
screen.wait_for_input(0)
a = b
except ResizeScreenError:
logging.info("Resizing screen")
# last_scene = e.scene
except (StopApplication, KeyboardInterrupt):
vidBuf.put(None)
logging.info("Stopping application")
screen.close()
if not vidBuf.empty(): # TODO: Make this nicer than a print statement
logging.info("Program stopped, saving remaining video")
print("Saving video...")
vid.join()
quit(0)
if __name__ == "__main__":
main()
|
utils.py
|
"""Utilities shared by tests."""
import collections
import contextlib
import io
import logging
import os
import re
import selectors
import socket
import socketserver
import sys
import tempfile
import threading
import time
import unittest
import weakref
from unittest import mock
from http.server import HTTPServer
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from asyncio import base_events
from asyncio import events
from asyncio import format_helpers
from asyncio import futures
from asyncio import tasks
from asyncio.log import logger
from test import support
def data_file(filename):
if hasattr(support, 'TEST_HOME_DIR'):
fullname = os.path.join(support.TEST_HOME_DIR, filename)
if os.path.isfile(fullname):
return fullname
fullname = os.path.join(os.path.dirname(__file__), filename)
if os.path.isfile(fullname):
return fullname
raise FileNotFoundError(filename)
ONLYCERT = data_file('ssl_cert.pem')
ONLYKEY = data_file('ssl_key.pem')
SIGNED_CERTFILE = data_file('keycert3.pem')
SIGNING_CA = data_file('pycacert.pem')
PEERCERT = {
'OCSP': ('http://testca.pythontest.net/testca/ocsp/',),
'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',),
'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Nov 28 19:09:06 2027 GMT',
'notBefore': 'Jan 19 19:09:06 2018 GMT',
'serialNumber': '82EDBF41C880919C',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
def simple_server_sslcontext():
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(ONLYCERT, ONLYKEY)
server_context.check_hostname = False
server_context.verify_mode = ssl.CERT_NONE
return server_context
def simple_client_sslcontext(*, disable_verify=True):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.check_hostname = False
if disable_verify:
client_context.verify_mode = ssl.CERT_NONE
return client_context
def dummy_ssl_context():
if ssl is None:
return None
else:
return ssl.SSLContext(ssl.PROTOCOL_TLS)
def run_briefly(loop):
async def once():
pass
gen = once()
t = loop.create_task(gen)
# Don't log a warning if the task is not done after run_until_complete().
# It occurs if the loop is stopped or if a task raises a BaseException.
t._log_destroy_pending = False
try:
loop.run_until_complete(t)
finally:
gen.close()
def run_until(loop, pred, timeout=30):
deadline = time.time() + timeout
while not pred():
if timeout is not None:
timeout = deadline - time.time()
if timeout <= 0:
raise futures.TimeoutError()
loop.run_until_complete(tasks.sleep(0.001, loop=loop))
def run_once(loop):
"""Legacy API to run once through the event loop.
This is the recommended pattern for test code. It will poll the
selector once and run all callbacks scheduled in response to I/O
events.
"""
loop.call_soon(loop.stop)
loop.run_forever()
class SilentWSGIRequestHandler(WSGIRequestHandler):
def get_stderr(self):
return io.StringIO()
def log_message(self, format, *args):
pass
class SilentWSGIServer(WSGIServer):
request_timeout = 2
def get_request(self):
request, client_addr = super().get_request()
request.settimeout(self.request_timeout)
return request, client_addr
def handle_error(self, request, client_address):
pass
class SSLWSGIServerMixin:
def finish_request(self, request, client_address):
# The relative location of our test directory (which
# contains the ssl key and certificate files) differs
# between the stdlib and stand-alone asyncio.
# Prefer our own if we can find it.
here = os.path.join(os.path.dirname(__file__), '..', 'tests')
if not os.path.isdir(here):
here = os.path.join(os.path.dirname(os.__file__),
'test', 'test_asyncio')
keyfile = os.path.join(here, 'ssl_key.pem')
certfile = os.path.join(here, 'ssl_cert.pem')
context = ssl.SSLContext()
context.load_cert_chain(certfile, keyfile)
ssock = context.wrap_socket(request, server_side=True)
try:
self.RequestHandlerClass(ssock, client_address, self)
ssock.close()
except OSError:
# maybe socket has been closed by peer
pass
class SSLWSGIServer(SSLWSGIServerMixin, SilentWSGIServer):
pass
def _run_test_server(*, address, use_ssl=False, server_cls, server_ssl_cls):
def app(environ, start_response):
status = '200 OK'
headers = [('Content-type', 'text/plain')]
start_response(status, headers)
return [b'Test message']
# Run the test WSGI server in a separate thread in order not to
# interfere with event handling in the main thread
server_class = server_ssl_cls if use_ssl else server_cls
httpd = server_class(address, SilentWSGIRequestHandler)
httpd.set_app(app)
httpd.address = httpd.server_address
server_thread = threading.Thread(
target=lambda: httpd.serve_forever(poll_interval=0.05))
server_thread.start()
try:
yield httpd
finally:
httpd.shutdown()
httpd.server_close()
server_thread.join()
if hasattr(socket, 'AF_UNIX'):
class UnixHTTPServer(socketserver.UnixStreamServer, HTTPServer):
def server_bind(self):
socketserver.UnixStreamServer.server_bind(self)
self.server_name = '127.0.0.1'
self.server_port = 80
class UnixWSGIServer(UnixHTTPServer, WSGIServer):
request_timeout = 2
def server_bind(self):
UnixHTTPServer.server_bind(self)
self.setup_environ()
def get_request(self):
request, client_addr = super().get_request()
request.settimeout(self.request_timeout)
# Code in the stdlib expects that get_request
# will return a socket and a tuple (host, port).
# However, this isn't true for UNIX sockets,
# as the second return value will be a path;
# hence we return some fake data sufficient
# to get the tests going
return request, ('127.0.0.1', '')
class SilentUnixWSGIServer(UnixWSGIServer):
def handle_error(self, request, client_address):
pass
class UnixSSLWSGIServer(SSLWSGIServerMixin, SilentUnixWSGIServer):
pass
def gen_unix_socket_path():
with tempfile.NamedTemporaryFile() as file:
return file.name
@contextlib.contextmanager
def unix_socket_path():
path = gen_unix_socket_path()
try:
yield path
finally:
try:
os.unlink(path)
except OSError:
pass
@contextlib.contextmanager
def run_test_unix_server(*, use_ssl=False):
with unix_socket_path() as path:
yield from _run_test_server(address=path, use_ssl=use_ssl,
server_cls=SilentUnixWSGIServer,
server_ssl_cls=UnixSSLWSGIServer)
@contextlib.contextmanager
def run_test_server(*, host='127.0.0.1', port=0, use_ssl=False):
yield from _run_test_server(address=(host, port), use_ssl=use_ssl,
server_cls=SilentWSGIServer,
server_ssl_cls=SSLWSGIServer)
def make_test_protocol(base):
dct = {}
for name in dir(base):
if name.startswith('__') and name.endswith('__'):
# skip magic names
continue
dct[name] = MockCallback(return_value=None)
return type('TestProtocol', (base,) + base.__bases__, dct)()
class TestSelector(selectors.BaseSelector):
def __init__(self):
self.keys = {}
def register(self, fileobj, events, data=None):
key = selectors.SelectorKey(fileobj, 0, events, data)
self.keys[fileobj] = key
return key
def unregister(self, fileobj):
return self.keys.pop(fileobj)
def select(self, timeout):
return []
def get_map(self):
return self.keys
class TestLoop(base_events.BaseEventLoop):
"""Loop for unittests.
It manages self time directly.
If something scheduled to be executed later then
on next loop iteration after all ready handlers done
generator passed to __init__ is calling.
Generator should be like this:
def gen():
...
when = yield ...
... = yield time_advance
Value returned by yield is absolute time of next scheduled handler.
Value passed to yield is time advance to move loop's time forward.
"""
def __init__(self, gen=None):
super().__init__()
if gen is None:
def gen():
yield
self._check_on_close = False
else:
self._check_on_close = True
self._gen = gen()
next(self._gen)
self._time = 0
self._clock_resolution = 1e-9
self._timers = []
self._selector = TestSelector()
self.readers = {}
self.writers = {}
self.reset_counters()
self._transports = weakref.WeakValueDictionary()
def time(self):
return self._time
def advance_time(self, advance):
"""Move test time forward."""
if advance:
self._time += advance
def close(self):
super().close()
if self._check_on_close:
try:
self._gen.send(0)
except StopIteration:
pass
else: # pragma: no cover
raise AssertionError("Time generator is not finished")
def _add_reader(self, fd, callback, *args):
self.readers[fd] = events.Handle(callback, args, self, None)
def _remove_reader(self, fd):
self.remove_reader_count[fd] += 1
if fd in self.readers:
del self.readers[fd]
return True
else:
return False
def assert_reader(self, fd, callback, *args):
if fd not in self.readers:
raise AssertionError(f'fd {fd} is not registered')
handle = self.readers[fd]
if handle._callback != callback:
raise AssertionError(
f'unexpected callback: {handle._callback} != {callback}')
if handle._args != args:
raise AssertionError(
f'unexpected callback args: {handle._args} != {args}')
def assert_no_reader(self, fd):
if fd in self.readers:
raise AssertionError(f'fd {fd} is registered')
def _add_writer(self, fd, callback, *args):
self.writers[fd] = events.Handle(callback, args, self, None)
def _remove_writer(self, fd):
self.remove_writer_count[fd] += 1
if fd in self.writers:
del self.writers[fd]
return True
else:
return False
def assert_writer(self, fd, callback, *args):
assert fd in self.writers, 'fd {} is not registered'.format(fd)
handle = self.writers[fd]
assert handle._callback == callback, '{!r} != {!r}'.format(
handle._callback, callback)
assert handle._args == args, '{!r} != {!r}'.format(
handle._args, args)
def _ensure_fd_no_transport(self, fd):
if not isinstance(fd, int):
try:
fd = int(fd.fileno())
except (AttributeError, TypeError, ValueError):
# This code matches selectors._fileobj_to_fd function.
raise ValueError("Invalid file object: "
"{!r}".format(fd)) from None
try:
transport = self._transports[fd]
except KeyError:
pass
else:
raise RuntimeError(
'File descriptor {!r} is used by transport {!r}'.format(
fd, transport))
def add_reader(self, fd, callback, *args):
"""Add a reader callback."""
self._ensure_fd_no_transport(fd)
return self._add_reader(fd, callback, *args)
def remove_reader(self, fd):
"""Remove a reader callback."""
self._ensure_fd_no_transport(fd)
return self._remove_reader(fd)
def add_writer(self, fd, callback, *args):
"""Add a writer callback.."""
self._ensure_fd_no_transport(fd)
return self._add_writer(fd, callback, *args)
def remove_writer(self, fd):
"""Remove a writer callback."""
self._ensure_fd_no_transport(fd)
return self._remove_writer(fd)
def reset_counters(self):
self.remove_reader_count = collections.defaultdict(int)
self.remove_writer_count = collections.defaultdict(int)
def _run_once(self):
super()._run_once()
for when in self._timers:
advance = self._gen.send(when)
self.advance_time(advance)
self._timers = []
def call_at(self, when, callback, *args, context=None):
self._timers.append(when)
return super().call_at(when, callback, *args, context=context)
def _process_events(self, event_list):
return
def _write_to_self(self):
pass
def MockCallback(**kwargs):
return mock.Mock(spec=['__call__'], **kwargs)
class MockPattern(str):
"""A regex based str with a fuzzy __eq__.
Use this helper with 'mock.assert_called_with', or anywhere
where a regex comparison between strings is needed.
For instance:
mock_call.assert_called_with(MockPattern('spam.*ham'))
"""
def __eq__(self, other):
return bool(re.search(str(self), other, re.S))
class MockInstanceOf:
def __init__(self, type):
self._type = type
def __eq__(self, other):
return isinstance(other, self._type)
def get_function_source(func):
source = format_helpers._get_function_source(func)
if source is None:
raise ValueError("unable to get the source of %r" % (func,))
return source
class TestCase(unittest.TestCase):
@staticmethod
def close_loop(loop):
executor = loop._default_executor
if executor is not None:
executor.shutdown(wait=True)
loop.close()
def set_event_loop(self, loop, *, cleanup=True):
assert loop is not None
# ensure that the event loop is passed explicitly in asyncio
events.set_event_loop(None)
if cleanup:
self.addCleanup(self.close_loop, loop)
def new_test_loop(self, gen=None):
loop = TestLoop(gen)
self.set_event_loop(loop)
return loop
def unpatch_get_running_loop(self):
events._get_running_loop = self._get_running_loop
def setUp(self):
self._get_running_loop = events._get_running_loop
events._get_running_loop = lambda: None
self._thread_cleanup = support.threading_setup()
def tearDown(self):
self.unpatch_get_running_loop()
events.set_event_loop(None)
# Detect CPython bug #23353: ensure that yield/yield-from is not used
# in an except block of a generator
self.assertEqual(sys.exc_info(), (None, None, None))
self.doCleanups()
support.threading_cleanup(*self._thread_cleanup)
support.reap_children()
@contextlib.contextmanager
def disable_logger():
"""Context manager to disable asyncio logger.
For example, it can be used to ignore warnings in debug mode.
"""
old_level = logger.level
try:
logger.setLevel(logging.CRITICAL+1)
yield
finally:
logger.setLevel(old_level)
def mock_nonblocking_socket(proto=socket.IPPROTO_TCP, type=socket.SOCK_STREAM,
family=socket.AF_INET):
"""Create a mock of a non-blocking socket."""
sock = mock.MagicMock(socket.socket)
sock.proto = proto
sock.type = type
sock.family = family
sock.gettimeout.return_value = 0.0
return sock
|
Tamura.py
|
import numpy as np
from util import Util as u
import cv2
from threading import Thread, Lock, Event
from queue import Queue
import time
class TamFeat(object):
q = Queue(maxsize=4)
def __init__(self, img):
t = time.time()
(self.__coarseness, varCrs) = self.__generateCoarseness(img)
print("Coarseness Calc-Time : %f secs\n" % (time.time() - t))
(self.__contrast, self.__kurtosis, varCon) = self.__generateContrastAndKurtosis(img)
self.__img_hor_x = cv2.filter2D(img, -1, np.array([[1,1,1],[0,0,0],[-1,-1,-1]], dtype=np.int16))
self.__img_vert_y = cv2.filter2D(img, -1, np.array([[-1,0,1],[-1,0,1],[-1,0,1]], dtype=np.int16))
self.__delg_img = np.round((np.add(self.__img_hor_x, self.__img_vert_y, dtype=float) * 0.5)).astype(np.int8)
self.__theta_img = np.tanh(np.divide((self.__img_vert_y).astype(float), (self.__img_hor_x).astype(float), dtype=float, out=np.zeros_like((self.__img_vert_y).astype(float)), where=self.__img_hor_x != 0)) + (float(np.pi) / 2.0)
(self.__linelikeness, varLin) = self.__generateLineLikeness(self.__delg_img, self.__theta_img)
(self.__directionality, varDir) = self.__generateDirectionality(self.__delg_img, self.__theta_img)
self.__regularity = self.__generateRegularity(np.sqrt(varCrs), np.sqrt(varDir), np.sqrt(varCon), np.sqrt(varLin))
self.__roughness = self.__generateRoughness(self.__coarseness, self.__contrast)
def __generateCoarseness(self, src_img):
def __tds_opt(tds, mode='s'):
for t in tds:
if (mode == 's'):
t.start()
else:
t.join()
lock = Lock()
sbest = np.zeros(src_img.shape, np.uint32, 'C')
for x in range(0, (src_img.shape)[0], 1):
for y in range(0, (src_img.shape)[1], 1):
emax = np.empty(0, np.dtype([('E', float), ('K', int)]), 'C')
#print((x,y))
for k in range(1, 7, 1):
tds = [Thread(target=self.__nebAvg, name='Cor0', args=(x + np.float_power(2, k-1), y, k, src_img, lock, Event(), 0)), Thread(target=self.__nebAvg, name='Cor1', args=(x - np.float_power(2, k-1), y, k, src_img, lock, Event(), 1)), Thread(target=self.__nebAvg, name='Cor2', args=(x, y + np.float_power(2, k-1), k, src_img, lock, Event(), 2)), Thread(target=self.__nebAvg, name='Cor3', args=(x, y - np.float_power(2, k-1), k, src_img, lock, Event(), 3))]
__tds_opt(tds)
__tds_opt(tds, 'j')
nbavgs = self.__getFromQueue()
emax = np.insert(emax, emax.size, (np.abs(nbavgs[0] - nbavgs[1]), k-1), 0)
emax = np.insert(emax, emax.size, (np.abs(nbavgs[2] - nbavgs[3]), k-1), 0)
#emax = np.insert(emax, emax.size, (np.abs(self.__nebAvg(x + np.float_power(2, k-1), y, k, src_img) - self.__nebAvg(x - np.float_power(2, k-1), y, k, src_img)), k-1), 0)
#emax = np.insert(emax, emax.size, (np.abs(self.__nebAvg(x, y + np.float_power(2, k-1), k, src_img) - self.__nebAvg(x, y - np.float_power(2, k-1), k, src_img)), k-1), 0)
emax.sort(axis=0, kind='mergesort', order='E')
sbest[x, y] = np.float_power(2, (emax[emax.size-1])[1])
varCrs = self.__generateVariance(u.getArrayOfGrayLevelsWithFreq(sbest, lvldtype=np.uint32), np.mean(sbest, axis=None, dtype=float))
return ((float(np.sum(sbest, axis=None, dtype=float) / float(sbest.size))), varCrs)
def __nebAvg(self, x, y, k, src_img, lck, evt, pos):
lck.acquire()
avg = 0.0
const = np.float_power(2, k-1)
xh = int(np.round(x + const - 1))
xl = int(np.round(x - const))
yh = int(np.round(y + const - 1))
yl = int(np.round(y - const))
(xl, xh, yl, yh) = self.__checkSigns(xl, xh, yl, yh, src_img.shape)
for r in range(xl, xh, 1):
for c in range(yl, yh, 1):
avg = avg + (float(src_img[r, c]) / float(np.float_power(2, 2*k)))
(TamFeat.q).put((avg, pos))
lck.release()
evt.set()
#return avg
def __getFromQueue(self):
nbavgs = [0.0, 0.0, 0.0, 0.0]
while ((TamFeat.q).empty() == False):
item = (TamFeat.q).get()
nbavgs[ item[1] ] = item[0]
(TamFeat.q).task_done()
(TamFeat.q).join()
return nbavgs
def __checkSigns(self, xl, xh, yl, yh, shape):
if (xl < 0):
xl = 0
if (xl > shape[0]):
xl = shape[0]
if (xh < 0):
xh = 0
if (xh > shape[0]):
xh = shape[0]
if (yl < 0):
yl = 0
if (yl > shape[1]):
yl = shape[1]
if (yh < 0):
yh = 0
if (yh > shape[1]):
yh = shape[1]
return (xl, xh, yl, yh)
def __generateContrastAndKurtosis(self, src_img):
glvlwthfreq = u.getArrayOfGrayLevelsWithFreq(src_img)
m = np.mean(src_img, axis=None, dtype=float)
variance = self.__generateVariance(glvlwthfreq, m)
kurtosis = 0.0
for tup in glvlwthfreq:
kurtosis = kurtosis + (np.float_power((float(tup[0]) - m), 4) * (float(tup[1]) / float(src_img.size)))
kurtosis = kurtosis / np.float_power(variance, 2)
contrast = float(np.sqrt(variance)) / np.float_power(kurtosis, 0.25)
return (contrast, kurtosis, variance)
def __generateVariance(self, matlvls, m):
gls = np.ascontiguousarray(matlvls['glvl'], dtype=float)
frq = np.ascontiguousarray(matlvls['freq'], dtype=float)
totpix = frq.sum(axis=None, dtype=float)
variance = 0.0
for g in range(0, matlvls.size, 1):
variance = variance + (np.float_power((gls[g] - m), 2) * (frq[g] / totpix))
return variance
def __generateLineLikeness(self, delg_img, theta_img, d=4, t=12):
dirlevels = u.getArrayOfGrayLevelsWithFreq(theta_img, lvldtype=float)
ditfctcm = np.zeros((dirlevels.size, dirlevels.size), dtype=np.uint32, order='C')
for i in range(0, (theta_img.shape)[0], 1):
for j in range(0, (theta_img.shape)[1], 1):
if (np.fabs(delg_img[i,j]) > t):
x = int(np.round(np.fabs(d * np.cos(theta_img[i, j]))))
y = int(np.round(np.fabs(d * np.sin(theta_img[i, j]))))
if ((x < 0) | (x >= (theta_img.shape)[0]) | (y < 0) | (y >= (theta_img.shape)[1])):
continue
else:
if ((theta_img[x, y] > (theta_img[i, j] - 1)) & (theta_img[x, y] < (theta_img[i, j] + 1))):
idx1, idx2 = u.search(dirlevels, theta_img[i, j], 0, dirlevels.size-1), u.search(dirlevels, theta_img[x, y], 0, dirlevels.size-1)
ditfctcm[idx1, idx2] = ditfctcm[idx1, idx2] + 1
else:
continue
varLin = self.__generateVariance(u.getArrayOfGrayLevelsWithFreq(ditfctcm, lvldtype=np.uint32), np.mean(ditfctcm, axis=None, dtype=float))
return (self.__lineLikenessSubPart(ditfctcm, dirlevels), varLin)
def __lineLikenessSubPart(self, ditfctcm, dirlevels):
dir = 0.0
for i in range(0, (ditfctcm.shape)[0], 1):
for j in range(0, (ditfctcm.shape)[0], 1):
dir = dir + float(ditfctcm[i, j]) * np.cos((((dirlevels[i])[0] - (dirlevels[j])[0]) * 2.0 * np.pi) / dirlevels.size)
dir = dir / ditfctcm.sum(axis=None, dtype=float)
return dir
def __generateDirectionality(self, delg_img, theta_img, t=12):
temp = np.zeros_like(theta_img)
for i in range(0, (delg_img.shape)[0], 1):
for j in range(0, (delg_img.shape)[1], 1):
if (delg_img[i, j] > t):
temp[i, j] = theta_img[i, j]
varDir = self.__generateVariance(u.getArrayOfGrayLevelsWithFreq(temp, lvldtype=float), np.mean(temp, axis=None, dtype=float))
return ((1 / np.sqrt(varDir)), varDir)
def __generateRegularity(self, sdCrs, sdDir, sdCon, sdLin, r=0.4):
return (1 - (r * (sdCrs + sdDir + sdCon + sdLin)))
def __generateRoughness(self, coarseness, contrast):
return (contrast + coarseness)
def getCoarseness(self):
return self.__coarseness
def getContrast(self):
return self.__contrast
def getKurtosis(self):
return self.__kurtosis
def getPrewittHorizontalEdgeImg(self):
return self.__img_hor_x
def getPrewittVerticalEdgeImg(self):
return self.__img_vert_y
def getCombinedPrewittImg(self):
return (self.__delg_img).astype(np.uint8)
def getPrewittDirFactOfImg(self):
return self.__theta_img
def getLineLikeness(self):
return self.__linelikeness
def getDirectionality(self):
return self.__directionality
def getRegularity(self):
return self.__regularity
def getRoughness(self):
return self.__roughness
|
pixrc.py
|
#!/usr/bin/env python
import ConfigParser
import datetime
import errno
import logd
import logging
import logging.config
import optparse
import os
import serial
import simple_stats
import socket
import struct
import sys
import threading
import time
logging.config.fileConfig("/etc/sololink.conf")
logger = logging.getLogger("pixrc")
logger.info("starting (20141114_1132)")
config = ConfigParser.SafeConfigParser()
config.read("/etc/sololink.conf")
rcDsmDev = config.get("solo", "rcDsmDev")
rcDsmBaud = config.getint("solo", "rcDsmBaud")
# Only accept RC packets from one of these
rcSourceIps = config.get("solo", "rcSourceIps").split(",")
# RC packets arrive on this port
rcDestPort = config.getint("solo", "rcDestPort")
logger.info("accept RC packets from %s on port %d",
rcSourceIps.__str__(), rcDestPort)
CHANNEL_BITS = 11
CHANNEL_MASK = 0x7ff
SOCKET_TIMEOUT = 0.4
DSM_INTERVAL = 0.020
dsmBeat = 0
rcBeat = 0
# Debug only; enable/disable sending packets to Pixhawk
DSM_SEND_ENABLE = True
rcChansOut = None
# What we send Pixhawk if we stop receiving RC packets
# PWM values: [ throttle, roll, pitch, yaw ]
rcFailsafeChans = [ 0, 1500, 1500, 1500 ]
logger.info("RC failsafe packet is %s", rcFailsafeChans.__str__())
# RC packets are received as a timestamp, sequence number, then channel data.
# * timestamp is microseconds since some epoch (64 bits)
# * sequence simply increments each packet (16 bits) and can be used to detect
# missed or out of order packets.
# * channel data is a PWM value per channel (16 bits)
#
# All data is received little-endian. A typical packet is as follows (8
# channels, all 1500):
#
# timestamp seq ch0 ch1 ch2 ch3 ch4 ch5 ch6 ch7
# |----- 3966001072 ----| 3965 1500 1500 1500 1500 1500 1500 1500 1500
# b0 5f 64 ec 00 00 00 00 7d 0f dc 05 dc 05 dc 05 dc 05 dc 05 dc 05 dc 05 dc 05
# DSM is sent in 16-byte chunks, each contain 8 2-byte words:
#
# Word Data
# 0 0x00AB (magic)
# 1..7 (chNum << 11) | (chData & 0x7ff)
#
# For more than 7 channels, another packet is sent, starting with the magic.
# 16 bytes are always sent; unused channel slots are filled with 0xffff.
#
# Each word is sent big-endian. A typical byte stream might be as follows
# (8 channels, all =750):
#
# magic --0-- --1-- --2-- --3-- --4-- --5-- --6--
# 00 ab 02 ee 0a ee 12 ee 1a ee 22 ee 2a ee 32 ee
#
# magic --7-- --x-- --x-- --x-- --x-- --x-- --x--
# 00 ab 3a ee ff ff ff ff ff ff ff ff ff ff ff ff
#
# Note that the arriving UDP packet is PWM values in microseconds.
# Those are converted to 11-bit channel values by shifting down one bit.
def rcUnpack(packedData):
"""Unpack RC packet.
Returns a tuple (timestamp, sequence, channels[]).
"""
# Length of received packet determines how many channels there
# are: packet is 8 + 2 + (2 * numChans) bytes
# Sanity-check received packet
dataLen = len(packedData)
if dataLen < 10 or (dataLen & 1) != 0:
logger.warn("rcUnpack: malformed packet received (length = %d)", dataLen)
return None
# require 4..14 channels
numChans = (dataLen - 10) / 2
if numChans < 4 or numChans > 14:
logger.warn("rcUnpack: malformed packet received (%d channels)", numChans)
return None
timestamp, sequence = struct.unpack("<QH", packedData[:10])
channels = [ ]
for i in range(10, dataLen, 2):
channels.extend(struct.unpack("<H", packedData[i:i+2]))
return timestamp, sequence, channels
def dsmPack(channels):
"""Pack channels into DSM packet."""
if channels is None:
return None
dsmPacket = ""
channelsLeft = len(channels)
channelNum = 0
while channelsLeft > 0:
dsmPacket += struct.pack(">H", 171)
# pack 7 channels before needing another magic
for c in range(0, 7):
if channelsLeft > 0:
# channel value is 1000...2000
# needs to be 174...1874
value = channels[channelNum] # 1000...2000
value = value * 1700 / 1000 - 1526
if(value < 0):
value = 0
if(value > 2000):
value = 2000;
chan = (channelNum << CHANNEL_BITS) | (value & CHANNEL_MASK)
dsmPacket += struct.pack(">H", chan)
channelsLeft -= 1
channelNum += 1
else:
dsmPacket += struct.pack(">H", 65535)
return dsmPacket
def dsmSend(devName, baudRate):
global dsmBeat
logger.info("dsmSend running")
if opts.sim:
logger.info("not sending to pixhawk")
else:
logger.info("opening %s at %d", devName, baudRate)
try:
serialPort = serial.Serial(devName, baudRate)
except serial.SerialException as excOpen:
logger.error(excOpen.__str__())
return
pixOutLogger = None
#pixOutLogger = logd.PixOutLogger()
while True:
dsmBeat += 1
rcDsmLock.acquire()
dsmBytes = dsmPack(rcChansOut)
rcDsmLock.release()
if dsmBytes is None:
logger.debug("dsmSend: None")
else:
logger.debug("dsmSend: %s",
[hex(ord(c)) for c in dsmBytes].__str__())
if dsmBytes is not None and not opts.sim:
if pixOutLogger:
pixOutLogger.log_packet(dsmBytes)
serialPort.write(dsmBytes)
time.sleep(DSM_INTERVAL)
def rcReceive(udpPortNum):
global rcBeat
global rcChansOut
logger.info("rcReceive running")
stats = simple_stats.SimpleStats()
pixInLogger = None
#pixInLogger = logd.PixInLogger()
# Open socket
logger.info("rcReceive: listening on port %d", udpPortNum)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("", udpPortNum))
# We require the timestamp on the RC packet to be greater than the
# previous one, or the packet is dropped. However, if we see enough "old"
# packets in a row, we assume the timestamp has restarted, and start
# accepting packets again.
rcTimePrev = None
oldPktCnt = 0
oldPktRestart = 5 # 100 msec at 20 msec / packet
nextWarnTime = None
warnInterval = datetime.timedelta(seconds = 5)
# The sequence number is used to detect and count dropped packets. We
# increment dropCnt for "small" gaps in the sequence (incrementing it by
# the number of packets dropped), and increment discCnt for "large" gaps
# (discontinuities). goodCnt is incremented for packets receive in sequence.
rcSeqPrev = None
dropCnt = 0
goodCnt = 0
discCnt = 0
discGap = 5
# Packets received less than this much time after the previous one are
# discarded, under the assumption that it is part of a burst. The last
# packet in a burst is discarded (even if it is not old), and then the
# next one is the first used.
pktIntervalMin = datetime.timedelta(milliseconds=2)
# Log packet statistics periodically
logInterval = datetime.timedelta(seconds = 10)
logTime = datetime.datetime.now() + logInterval
rcBeatTime = None
rcBeatIntervalMax = datetime.timedelta(seconds = 0.040)
nowDsm = None
sock.settimeout(SOCKET_TIMEOUT)
while True:
rcBeat += 1
# The following rcBeatInterval stuff was added because we were getting
# "errors" when checking for rcBeat to increment at least every 1.5
# times the socket timeout. The error check was loosened up and the
# interval measurement added to see just how bad it is.
if rcBeatTime is not None:
rcBeatInterval = datetime.datetime.now() - rcBeatTime
if rcBeatIntervalMax < rcBeatInterval:
rcBeatIntervalMax = rcBeatInterval
logger.info("rcBeatIntervalMax is now %f seconds",
rcBeatIntervalMax.total_seconds())
rcBeatTime = datetime.datetime.now()
try:
rcBytes, addr = sock.recvfrom(256)
except socket.timeout:
now = datetime.datetime.now()
if nextWarnTime is None or now >= nextWarnTime:
logger.warn("socket timeout: sending failsafe packet")
nextWarnTime = now + warnInterval
rcTime = 0
rcSeq = 0
rcChans = rcFailsafeChans
rcTimePrev = rcTime
else:
nowDsmLast = nowDsm
nowDsm = datetime.datetime.now()
now = nowDsm
if pixInLogger:
pixInLogger.log_packet(rcBytes)
if nowDsmLast is not None:
delta_us = (nowDsm - nowDsmLast).total_seconds() * 1000000
stats.update(delta_us)
if nextWarnTime is not None:
logger.info("received packet after timeout")
nextWarnTime = None
# only accept RC packets from Artoo
if not addr[0] in rcSourceIps:
logger.warn("packet from %s ignored", addr[0])
continue
rcTime, rcSeq, rcChans = rcUnpack(rcBytes)
# Check sequence - just require that the timestamp is increasing.
# But... if we see enough "old" packets in a row (oldPktRestart),
# we assume the timestamp has started over and start accepting
# packets again.
if rcTimePrev is not None and \
rcTime <= rcTimePrev and \
oldPktCnt < oldPktRestart:
logger.warn("old packet ignored (%s <= %s)",
rcTime.__str__(), rcTimePrev.__str__())
oldPktCnt += 1
continue
rcTimePrev = rcTime
oldPktCnt = 0
# The packet is later than the previous one; look for missed
# packets (diagnostic).
# 64K packets wraps after about 21m 50s; test with wrap at 256 (5s)
rcSeqMax = 65536
#rcSeqMax = 256
#rcSeq = rcSeq & (rcSeqMax - 1)
if rcSeqPrev is None:
rcSeqPrev = rcSeq
else:
# mod-64K subtract
gap = rcSeq - rcSeqPrev
if gap < 0:
gap += rcSeqMax
if gap == 1:
goodCnt += 1
else:
if gap <= discGap:
dropCnt += (gap - 1)
else:
discCnt += 1
logger.info("gap=%d good=%d drop=%d disc=%d",
gap, goodCnt, dropCnt, discCnt)
rcSeqPrev = rcSeq
logger.debug("%s %s %s",
rcTime.__str__(), rcSeq.__str__(), rcChans.__str__())
if now > logTime:
logger.info("good=%d drop=%d disc=%d", goodCnt, dropCnt, discCnt)
count = stats.count()
if count > 0:
logger.info("n=%d avg=%0.0f min=%0.0f max=%0.0f stdev=%0.1f",
count, stats.average(),
stats.min(), stats.max(), stats.stdev())
stats.reset()
logTime += logInterval
# Make new RC data available to dsmSend thread. rcChans was either set
# to the new RC data if we got it, or to the failsafe packet if not.
rcDsmLock.acquire()
rcChansOut = rcChans
rcDsmLock.release()
parser = optparse.OptionParser("pixrc.py [options]")
parser.add_option("--sim", action="store_true", default=False,
help="do not send to Pixhawk")
(opts, args) = parser.parse_args()
os.nice(-20)
# Don't let the RC receive thread update the RC data while the DSM send thread
# is using it
rcDsmLock = threading.Lock()
if DSM_SEND_ENABLE:
sender = threading.Thread(name = "dsmSend", target = dsmSend, args = (rcDsmDev, rcDsmBaud))
sender.daemon = True
sender.start()
receiver = threading.Thread(name = "rcReceive", target = rcReceive, args = (rcDestPort, ))
receiver.daemon = True
receiver.start()
# When this module exits, the threads will be killed.
# This loop watches to see that both threads are still running, and if either
# stops, it exits, causing init to restart this module.
pollSleep = max(DSM_INTERVAL, SOCKET_TIMEOUT) * 5
# * 1.5 resulted in occasional errors; added rcBeatInterval logging
while True:
oldDsmBeat = dsmBeat
oldRcBeat = rcBeat
time.sleep(pollSleep)
if DSM_SEND_ENABLE:
if dsmBeat == oldDsmBeat:
logger.error("dsmSend thread appears to be dead; exiting")
logger.info("dsmBeat=%d pollSleep=%d", dsmBeat, pollSleep)
sys.exit(1)
if rcBeat == oldRcBeat:
logger.error("rcReceive thread appears to be dead; exiting")
logger.info("rcBeat=%d pollSleep=%d", rcBeat, pollSleep)
sys.exit(1)
|
utils.py
|
import gc
import json
import string
import orjson
import torch
import pickle
import shutil
import time
from tqdm import tqdm
import multiprocessing
from pathlib import Path
from termcolor import colored
from functools import lru_cache
from nltk.stem.snowball import SnowballStemmer
PUNCS = set(string.punctuation) - {'-'}
STEMMER = SnowballStemmer('porter', ignore_stopwords=False)
@lru_cache(maxsize=100000)
def stem_word(w):
return STEMMER.stem(w)
def stem_cand(c):
return ' '.join([stem_word(w) for w in c.split()]).lower()
def get_device(gpu):
return torch.device('cpu' if gpu is None else f'cuda:{gpu}')
def mean(nums):
return sum(nums) / len(nums)
def get_batches(input_list, batch_size):
return [input_list[i: i + batch_size] for i in range(0, len(input_list), batch_size)]
def get_possible_spans(word_idxs, num_wordpieces, max_word_gram, max_subword_gram):
possible_spans = []
num_words = len(word_idxs)
max_gram = min(max_word_gram, num_words)
for len_span in range(max_gram, 1, -1):
for i in range(num_words - len_span + 1):
l_idx = word_idxs[i]
r_idx = word_idxs[i + len_span] - 1 if i + len_span < num_words else num_wordpieces - 1
if r_idx - l_idx + 1 <= max_subword_gram:
possible_spans.append((l_idx, r_idx))
return possible_spans
class KMP:
@staticmethod
def kmp(main_str, pattern):
"""
Kmp algorithm to get the begin index of slot in text if matching
return: List[int] a list of begin index
"""
results = []
if not main_str or not pattern:
return results
nex = KMP.get_next(pattern)
i = 0 # the pointer of main_str
j = 0 # the pointer of pattern
while i < len(main_str):
while i < len(main_str) and j < len(pattern):
if j == -1 or main_str[i] == pattern[j]:
i += 1
j += 1
else:
j = nex[j]
if j == len(pattern): # matched
results.append(i - j)
i += 1
j = 0
else:
break
return results
@staticmethod
def get_next(pattern):
"""
"""
nex = [0] * len(pattern)
nex[0] = -1
i = 0
j = -1
while i < len(pattern) - 1: # len(pattern)-1防止越界,因为nex前面插入了-1
if j == -1 or pattern[i] == pattern[j]:
i += 1
j += 1
nex[i] = j # 这是最大的不同:记录next[i]
else:
j = nex[j]
return nex
class Log:
@staticmethod
def info(message):
print(colored(message, 'green'))
class String:
@staticmethod
def removeprefix(s: str, prefix: str) -> str:
return s[len(prefix):] if s.startswith(prefix) else s[:]
def removesuffix(s: str, suffix: str) -> str:
return s[:-len(suffix)] if suffix and s.endswith(suffix) else s[:]
class IO:
@staticmethod
def is_valid_file(filepath):
filepath = Path(filepath)
return filepath.exists() and filepath.stat().st_size > 0
def load(path):
raise NotImplementedError
def dump(data, path):
raise NotImplementedError
# class Json(IO):
# @staticmethod
# def load(path):
# with open(path) as rf:
# data = json.load(rf)
# return data
# @staticmethod
# def loads(jsonline):
# return json.loads(jsonline)
# @staticmethod
# def dump(data, path):
# with open(path, 'w') as wf:
# json.dump(data, wf, indent=4, ensure_ascii=False)
# @staticmethod
# def dumps(data):
# return json.dumps(data, ensure_ascii=False)
class OrJson(IO):
@staticmethod
def load(path):
with open(path) as rf:
data = orjson.loads(rf.read())
return data
@staticmethod
def loads(jsonline):
return orjson.loads(jsonline)
@staticmethod
def dump(data, path):
with open(path, 'w') as wf:
wf.write(orjson.dumps(data, option=orjson.OPT_INDENT_2 | orjson.OPT_NON_STR_KEYS).decode())
@staticmethod
def dumps(data):
return orjson.dumps(data, option=orjson.OPT_NON_STR_KEYS).decode()
Json = OrJson
class JsonLine(IO):
@staticmethod
def load(path, use_tqdm=False):
with open(path) as rf:
lines = rf.read().splitlines()
if use_tqdm:
lines = tqdm(lines, ncols=100, desc='Load JsonLine')
return [json.loads(l) for l in lines]
@staticmethod
def dump(instances, path):
assert type(instances) == list
lines = [json.dumps(d, ensure_ascii=False) for d in instances]
with open(path, 'w') as wf:
wf.write('\n'.join(lines))
class OrJsonLine(IO):
@staticmethod
def load(path):
with open(path) as rf:
lines = rf.read().splitlines()
return [orjson.loads(l) for l in lines]
@staticmethod
def dump(instances, path):
assert type(instances) == list
lines = [orjson.dumps(d, option=orjson.OPT_NON_STR_KEYS).decode() for d in instances]
with open(path, 'w') as wf:
wf.write('\n'.join(lines))
class TextFile(IO):
@staticmethod
def load(path):
with open(path) as rf:
text = rf.read()
return text
@staticmethod
def readlines(path, skip_empty_line=False):
with open(path) as rf:
lines = rf.read().splitlines()
if skip_empty_line:
return [l for l in lines if l]
return lines
@staticmethod
def dump(text, path):
with open(path, 'w') as wf:
wf.write(text)
@staticmethod
def dumplist(target_list, path):
with open(path, 'w') as wf:
wf.write('\n'.join([str(o) for o in target_list]) + '\n')
class Pickle:
@staticmethod
def load(path):
with open(path, 'rb') as rf:
gc.disable()
data = pickle.load(rf)
gc.enable()
return data
@staticmethod
def dump(data, path):
with open(path, 'wb') as wf:
gc.disable()
pickle.dump(data, wf, protocol=4)
gc.enable()
@staticmethod
def batch_dump(instances, dirpath, num_files=10):
assert type(instances) == list
dirpath = Path(dirpath)
if dirpath.exists():
shutil.rmtree(dirpath)
dirpath.mkdir(exist_ok=True)
num_instances = len(instances)
batch_size = num_instances // num_files
threads = []
print('start batch dumping...', end='')
time1 = time.perf_counter()
for i in range(0, num_instances, batch_size):
filepath = dirpath / str(len(threads))
thread = multiprocessing.Process(target=Pickle.dump, args=(instances[i: i + batch_size], filepath))
threads.append(thread)
for t in threads:
t.start()
for t in threads:
t.join()
time2 = time.perf_counter()
print(f'OK in {time2-time1:.1f} secs')
class Process:
@staticmethod
def par(func, iterables, num_processes, desc=''):
pool = multiprocessing.Pool(processes=num_processes)
results = []
for r in tqdm(pool.imap(func=func, iterable=iterables), total=len(iterables), ncols=100, desc=desc):
results.append(r)
pool.close()
pool.join()
return results
if __name__ == '__main__':
print(OrJson.dumps({1: 2, 3: 'sheaf'}))
|
coap.py
|
import logging.config
import os
import random
import socket
import threading
import time
from coapthon import defines
from coapthon.layers.blocklayer import BlockLayer
from coapthon.layers.messagelayer import MessageLayer
from coapthon.layers.observelayer import ObserveLayer
from coapthon.layers.requestlayer import RequestLayer
from coapthon.messages.message import Message
from coapthon.messages.request import Request
from coapthon.messages.response import Response
from coapthon.serializer import Serializer
import collections
__author__ = 'Giacomo Tanganelli'
logger = logging.getLogger(__name__)
class CoAP(object):
"""
Client class to perform requests to remote servers.
"""
def __init__(self, server, starting_mid, callback, sock=None, cb_ignore_read_exception=None, cb_ignore_write_exception=None):
"""
Initialize the client.
:param server: Server address for incoming connections
:param callback:the callback function to be invoked when a response is received
:param starting_mid: used for testing purposes
:param sock: if a socket has been created externally, it can be used directly
:param cb_ignore_read_exception: Callback function to handle exception raised during the socket read operation
:param cb_ignore_write_exception: Callback function to handle exception raised during the socket write operation
"""
self._currentMID = starting_mid
self._server = server
self._callback = callback
self._cb_ignore_read_exception = cb_ignore_read_exception
self._cb_ignore_write_exception = cb_ignore_write_exception
self.stopped = threading.Event()
self.to_be_stopped = []
self._messageLayer = MessageLayer(self._currentMID)
self._blockLayer = BlockLayer()
self._observeLayer = ObserveLayer()
self._requestLayer = RequestLayer(self)
addrinfo = socket.getaddrinfo(self._server[0], None)[0]
if sock is not None:
self._socket = sock
elif addrinfo[0] == socket.AF_INET:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
else:
self._socket = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._receiver_thread = None
def close(self):
"""
Stop the client.
"""
self.stopped.set()
for event in self.to_be_stopped:
event.set()
if self._receiver_thread is not None:
self._receiver_thread.join()
self._socket.close()
@property
def current_mid(self):
"""
Return the current MID.
:return: the current mid
"""
return self._currentMID
@current_mid.setter
def current_mid(self, c):
"""
Set the current MID.
:param c: the mid to set
"""
assert isinstance(c, int)
self._currentMID = c
def send_message(self, message):
"""
Prepare a message to send on the UDP socket. Eventually set retransmissions.
:param message: the message to send
"""
if isinstance(message, Request):
request = self._requestLayer.send_request(message)
request = self._observeLayer.send_request(request)
request = self._blockLayer.send_request(request)
transaction = self._messageLayer.send_request(request)
self.send_datagram(transaction.request)
if transaction.request.type == defines.Types["CON"]:
self._start_retransmission(transaction, transaction.request)
elif isinstance(message, Message):
message = self._observeLayer.send_empty(message)
message = self._messageLayer.send_empty(None, None, message)
self.send_datagram(message)
@staticmethod
def _wait_for_retransmit_thread(transaction):
"""
Only one retransmit thread at a time, wait for other to finish
"""
if hasattr(transaction, 'retransmit_thread'):
while transaction.retransmit_thread is not None:
logger.debug("Waiting for retransmit thread to finish ...")
time.sleep(0.01)
continue
def _send_block_request(self, transaction):
"""
A former request resulted in a block wise transfer. With this method, the block wise transfer
will be continued, including triggering of the retry mechanism.
:param transaction: The former transaction including the request which should be continued.
"""
transaction = self._messageLayer.send_request(transaction.request)
# ... but don't forget to reset the acknowledge flag
transaction.request.acknowledged = False
self.send_datagram(transaction.request)
if transaction.request.type == defines.Types["CON"]:
self._start_retransmission(transaction, transaction.request)
def send_datagram(self, message):
"""
Send a message over the UDP socket.
:param message: the message to send
"""
host, port = message.destination
logger.debug("send_datagram - " + str(message))
serializer = Serializer()
raw_message = serializer.serialize(message)
try:
self._socket.sendto(raw_message, (host, port))
except Exception as e:
if self._cb_ignore_write_exception is not None and isinstance(self._cb_ignore_write_exception, collections.Callable):
if not self._cb_ignore_write_exception(e, self):
raise
# if you're explicitly setting that you don't want a response, don't wait for it
# https://tools.ietf.org/html/rfc7967#section-2.1
for opt in message.options:
if opt.number == defines.OptionRegistry.NO_RESPONSE.number:
if opt.value == 26:
return
if self._receiver_thread is None or not self._receiver_thread.isAlive():
self._receiver_thread = threading.Thread(target=self.receive_datagram)
self._receiver_thread.daemon = True
self._receiver_thread.start()
def _start_retransmission(self, transaction, message):
"""
Start the retransmission task.
:type transaction: Transaction
:param transaction: the transaction that owns the message that needs retransmission
:type message: Message
:param message: the message that needs the retransmission task
"""
with transaction:
if message.type == defines.Types['CON']:
future_time = random.uniform(defines.ACK_TIMEOUT, (defines.ACK_TIMEOUT * defines.ACK_RANDOM_FACTOR))
transaction.retransmit_stop = threading.Event()
self.to_be_stopped.append(transaction.retransmit_stop)
transaction.retransmit_thread = threading.Thread(target=self._retransmit,
name=str('%s-Retry-%d' % (threading.current_thread().name, message.mid)),
args=(transaction, message, future_time, 0))
transaction.retransmit_thread.start()
def _retransmit(self, transaction, message, future_time, retransmit_count):
"""
Thread function to retransmit the message in the future
:param transaction: the transaction that owns the message that needs retransmission
:param message: the message that needs the retransmission task
:param future_time: the amount of time to wait before a new attempt
:param retransmit_count: the number of retransmissions
"""
with transaction:
logger.debug("retransmit loop ... enter")
while retransmit_count <= defines.MAX_RETRANSMIT \
and (not message.acknowledged and not message.rejected) \
and not transaction.retransmit_stop.isSet():
transaction.retransmit_stop.wait(timeout=future_time)
if not message.acknowledged and not message.rejected and not transaction.retransmit_stop.isSet():
retransmit_count += 1
future_time *= 2
if retransmit_count < defines.MAX_RETRANSMIT:
logger.debug("retransmit loop ... retransmit Request")
self.send_datagram(message)
if message.acknowledged or message.rejected:
message.timeouted = False
else:
logger.warning("Give up on message {message}".format(message=message.line_print))
message.timeouted = True
# Inform the user, that nothing was received
self._callback(None)
try:
self.to_be_stopped.remove(transaction.retransmit_stop)
except ValueError:
pass
transaction.retransmit_stop = None
transaction.retransmit_thread = None
logger.debug("retransmit loop ... exit")
def receive_datagram(self):
"""
Receive datagram from the UDP socket and invoke the callback function.
"""
logger.debug("Start receiver Thread")
while not self.stopped.isSet():
self._socket.settimeout(0.1)
try:
datagram, addr = self._socket.recvfrom(1152)
except socket.timeout: # pragma: no cover
continue
except Exception as e: # pragma: no cover
if self._cb_ignore_read_exception is not None and isinstance(self._cb_ignore_read_exception, collections.Callable):
if self._cb_ignore_read_exception(e, self):
continue
return
else: # pragma: no cover
if len(datagram) == 0:
logger.debug("Exiting receiver Thread due to orderly shutdown on server end")
return
serializer = Serializer()
try:
host, port = addr
except ValueError:
host, port, tmp1, tmp2 = addr
source = (host, port)
message = serializer.deserialize(datagram, source)
if isinstance(message, Response):
logger.debug("receive_datagram - " + str(message))
transaction, send_ack = self._messageLayer.receive_response(message)
if transaction is None: # pragma: no cover
continue
self._wait_for_retransmit_thread(transaction)
if send_ack:
self._send_ack(transaction)
self._blockLayer.receive_response(transaction)
if transaction.block_transfer:
self._send_block_request(transaction)
continue
elif transaction is None: # pragma: no cover
self._send_rst(transaction)
return
self._observeLayer.receive_response(transaction)
if transaction.notification: # pragma: no cover
ack = Message()
ack.type = defines.Types['ACK']
ack = self._messageLayer.send_empty(transaction, transaction.response, ack)
self.send_datagram(ack)
self._callback(transaction.response)
else:
self._callback(transaction.response)
elif isinstance(message, Message):
self._messageLayer.receive_empty(message)
logger.debug("Exiting receiver Thread due to request")
def _send_ack(self, transaction):
"""
Sends an ACK message for the response.
:param transaction: transaction that holds the response
"""
ack = Message()
ack.type = defines.Types['ACK']
if not transaction.response.acknowledged:
ack = self._messageLayer.send_empty(transaction, transaction.response, ack)
self.send_datagram(ack)
def _send_rst(self, transaction): # pragma: no cover
"""
Sends an RST message for the response.
:param transaction: transaction that holds the response
"""
rst = Message()
rst.type = defines.Types['RST']
if not transaction.response.acknowledged:
rst = self._messageLayer.send_empty(transaction, transaction.response, rst)
self.send_datagram(rst)
|
http.py
|
# -*- coding: utf-8 -*-
"""
This module contains some helpers to deal with the real http
world.
"""
import threading
import logging
import select
import socket
import time
import os
import six
import webob
from six.moves import http_client
from waitress.server import TcpWSGIServer
def get_free_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
ip, port = s.getsockname()
s.close()
ip = os.environ.get('WEBTEST_SERVER_BIND', '127.0.0.1')
return ip, port
def check_server(host, port, path_info='/', timeout=3, retries=30):
"""Perform a request until the server reply"""
if retries < 0:
return 0
conn = http_client.HTTPConnection(host, port, timeout=timeout)
time.sleep(.3)
for i in range(retries):
try:
conn.request('GET', path_info)
res = conn.getresponse()
return res.status
except (socket.error, http_client.HTTPException):
time.sleep(.3)
return 0
class StopableWSGIServer(TcpWSGIServer):
"""StopableWSGIServer is a TcpWSGIServer which run in a separated thread.
This allow to use tools like casperjs or selenium.
Server instance have an ``application_url`` attribute formated with the
server host and port.
"""
was_shutdown = False
def __init__(self, application, *args, **kwargs):
super(StopableWSGIServer, self).__init__(self.wrapper, *args, **kwargs)
self.runner = None
self.test_app = application
self.application_url = 'http://%s:%s/' % (self.adj.host, self.adj.port)
def wrapper(self, environ, start_response):
"""Wrap the wsgi application to override some path:
``/__application__``: allow to ping the server.
``/__file__?__file__={path}``: serve the file found at ``path``
"""
if '__file__' in environ['PATH_INFO']:
req = webob.Request(environ)
resp = webob.Response()
resp.content_type = 'text/html; charset=UTF-8'
filename = req.params.get('__file__')
if os.path.isfile(filename):
body = open(filename, 'rb').read()
body = body.replace(six.b('http://localhost/'),
six.b('http://%s/' % req.host))
resp.body = body
else:
resp.status = '404 Not Found'
return resp(environ, start_response)
elif '__application__' in environ['PATH_INFO']:
return webob.Response('server started')(environ, start_response)
return self.test_app(environ, start_response)
def run(self):
"""Run the server"""
try:
self.asyncore.loop(.5, map=self._map)
except select.error:
if not self.was_shutdown:
raise
def shutdown(self):
"""Shutdown the server"""
# avoid showing traceback related to asyncore
self.was_shutdown = True
self.logger.setLevel(logging.FATAL)
while self._map:
triggers = list(self._map.values())
for trigger in triggers:
trigger.handle_close()
self.maintenance(0)
self.task_dispatcher.shutdown()
return True
@classmethod
def create(cls, application, **kwargs):
"""Start a server to serve ``application``. Return a server
instance."""
host, port = get_free_port()
if 'port' not in kwargs:
kwargs['port'] = port
if 'host' not in kwargs:
kwargs['host'] = host
if 'expose_tracebacks' not in kwargs:
kwargs['expose_tracebacks'] = True
server = cls(application, **kwargs)
server.runner = threading.Thread(target=server.run)
server.runner.start()
return server
def wait(self, retries=30):
"""Wait until the server is started"""
running = check_server(self.adj.host, self.adj.port,
'/__application__', retries=retries)
if running:
return True
try:
self.shutdown()
finally:
return False
|
search.py
|
# -*- coding: utf-8 -*-
"""
chemspipy.search
~~~~~~~~~~~~~~~~
A wrapper for asynchronous search requests.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import datetime
import logging
import threading
import time
from six.moves import range
from . import errors, objects, utils
log = logging.getLogger(__name__)
# TODO: Use Sequence abc metaclass?
class Results(object):
"""Container class to perform a search on a background thread and hold the results when ready."""
def __init__(self, cs, searchfunc, searchargs, raise_errors=False, max_requests=40):
"""Generally shouldn't be instantiated directly. See :meth:`~chemspipy.api.ChemSpider.search` instead.
:param ChemSpider cs: ``ChemSpider`` session.
:param function searchfunc: Search function that returns a transaction ID.
:param tuple searchargs: Arguments for the search function.
:param bool raise_errors: If True, raise exceptions. If False, store on ``exception`` property.
:param int max_requests: Maximum number of times to check if search results are ready.
"""
log.debug('Results init')
self._cs = cs
self._raise_errors = raise_errors
self._max_requests = max_requests
self._status = 'Created'
self._exception = None
self._qid = None
self._message = None
self._start = None
self._end = None
self._results = []
self._searchthread = threading.Thread(name='SearchThread', target=self._search, args=(cs, searchfunc, searchargs))
self._searchthread.start()
def _search(self, cs, searchfunc, searchargs):
"""Perform the search and retrieve the results."""
log.debug('Searching in background thread')
self._start = datetime.datetime.utcnow()
try:
self._qid = searchfunc(*searchargs)
log.debug('Setting qid: %s' % self._qid)
for _ in range(self._max_requests):
log.debug('Checking status: %s' % self._qid)
status = cs.filter_status(self._qid)
self._status = status['status']
self._message = status.get('message', '')
log.debug(status)
time.sleep(0.2)
if status['status'] == 'Complete':
break
elif status['status'] in {'Failed', 'Unknown', 'Suspended', 'Not Found'}:
raise errors.ChemSpiPyServerError('Search Failed: %s' % status.get('message', ''))
else:
raise errors.ChemSpiPyTimeoutError('Search took too long')
log.debug('Search success!')
self._end = datetime.datetime.utcnow()
if status['count'] > 0:
self._results = [objects.Compound(cs, csid) for csid in cs.filter_results(self._qid)]
log.debug('Results: %s', self._results)
elif not self._message:
self._message = 'No results found'
except Exception as e:
# Catch and store exception so we can raise it in the main thread
self._exception = e
self._end = datetime.datetime.utcnow()
if self._status == 'Created':
self._status = 'Failed'
def ready(self):
"""Return True if the search finished.
:rtype: bool
"""
return not self._searchthread.is_alive()
def success(self):
"""Return True if the search finished with no errors.
:rtype: bool
"""
return self.ready() and not self._exception
def wait(self):
"""Block until the search has completed and optionally raise any resulting exception."""
log.debug('Waiting for search to finish')
self._searchthread.join()
if self._exception and self._raise_errors:
raise self._exception
@property
def status(self):
"""Current status string returned by ChemSpider.
:return: 'Unknown', 'Created', 'Scheduled', 'Processing', 'Suspended', 'PartialResultReady', 'ResultReady'
:rtype: string
"""
return self._status
@property
def exception(self):
"""Any Exception raised during the search. Blocks until the search is finished."""
self.wait() # TODO: If raise_errors=True this will raise the exception when trying to access it?
return self._exception
@property
def qid(self):
"""Search query ID.
:rtype: string
"""
return self._qid
@property
def message(self):
"""A contextual message about the search. Blocks until the search is finished.
:rtype: string
"""
self.wait()
return self._message
@property
def count(self):
"""The number of search results. Blocks until the search is finished.
:rtype: int
"""
return len(self)
@property
def duration(self):
"""The time taken to perform the search. Blocks until the search is finished.
:rtype: :py:class:`datetime.timedelta`
"""
self.wait()
return self._end - self._start
@utils.memoized_property
def sdf(self):
"""Get an SDF containing all the search results.
:return: SDF containing the search results.
:rtype: bytes
"""
self.wait()
return self._cs.filter_results_sdf(self._qid)
def __getitem__(self, index):
"""Get a single result or a slice of results. Blocks until the search is finished.
This means a Results instance can be treated like a normal Python list. For example::
cs.search('glucose')[2]
cs.search('glucose')[0:2]
An IndexError will be raised if the index is greater than the total number of results.
"""
self.wait()
return self._results.__getitem__(index)
def __len__(self):
self.wait()
return self._results.__len__()
def __iter__(self):
self.wait()
return iter(self._results)
def __repr__(self):
if self.success():
return 'Results(%s)' % self._results
else:
return 'Results(%s)' % self.status
|
main.py
|
import requests
import threading
import time
import random
from contextlib import contextmanager
import logging
class Crawler:
""" Requests urls in chunks and then calls the processor func to process the chunk of responses...
This is done so that the processor func does not have to worry about writing to the db too often.
Requests are randomized a little bit - to avoid being blocked (not sure if this even helps though).
"""
def __init__(self, urls, processor_func, pagination_func=None, response_chunk_size=32, cookies=None):
self.urls = urls
self.processor_func = processor_func
self.pagination_func = pagination_func
self.response_chunk = []
self.response_chunk_size = response_chunk_size
self.cookies = cookies if cookies else {}
def run(self):
""" Start crawling... configured on init. """
error_count = 0
while len(self.urls) >= 1 and error_count < 10:
try:
response = self.make_request(self.urls.pop())
self.response_chunk.append(response)
with self.process_response_chunk():
self.wait_before_next_request()
if self.pagination_func:
try:
next_url = self.pagination_func(response)
if type(next_url) == str:
self.urls.append(self.pagination_func(response))
except:
error_count += 1
logging.exception("Error in pagination func")
continue
error_count = 0
except requests.exceptions.HTTPError or requests.exceptions.Timeout as e:
error_count += 1
logging.exception(e)
if error_count == 10:
raise ValueError("PANIC - 10 Errors occurred in crawler")
def make_request(self, url):
tries = 0
while tries < 10:
try:
response = requests.get(url, headers=self.request_headers(), cookies=self.cookies, timeout=5)
response.raise_for_status()
return response
except requests.exceptions.Timeout:
tries += 1
raise requests.exceptions.Timeout
@contextmanager
def process_response_chunk(self):
if len(self.response_chunk) >= self.response_chunk_size \
or (len(self.response_chunk) > 0 and len(self.urls) == 0):
processor_func_thread = threading.Thread(target=self.processor_func, args=[self.response_chunk])
processor_func_thread.start()
yield
processor_func_thread.join(timeout=5)
self.response_chunk = []
else:
yield
@staticmethod
def wait_before_next_request():
time.sleep(random.choices([
random.randint(60, 90),
random.randint(30, 40),
random.randint(5, 10),
random.randint(2, 3),
random.randint(1, 2),
random.randint(0, 1),
0
], weights=[
2,
10,
10,
100,
200,
500,
1000
], k=1)[0])
@staticmethod
def request_headers():
return {
"User-Agent":
random.choice([
"Mozilla/5.0 (Linux; U; Android 2.2) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile "
"Safari/533.1",
"Mozilla/5.0 (Linux; Android 4.2.1; en-us; Nexus 5 Build/JOP40D) AppleWebKit/535.19 (KHTML, "
"like Gecko; googleweblight) Chrome/38.0.1025.166 Mobile Safari/535.19",
"Mozilla/5.0 (Linux; Android 6.0.1; RedMi Note 5 Build/RB3N5C; wv) AppleWebKit/537.36 (KHTML, "
"like Gecko) Version/4.0 Chrome/68.0.3440.91 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 7.1.2; AFTMM Build/NS6265; wv) AppleWebKit/537.36 (KHTML, like Gecko) "
"Version/4.0 Chrome/70.0.3538.110 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 6.0; LG-H631 Build/MRA58K) AppleWebKit/537.36 (KHTML, like Gecko) "
"Version/4.0 Chrome/38.0.2125.102 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 7.1.2; AFTMM Build/NS6264; wv) AppleWebKit/537.36 (KHTML, like Gecko) "
"Version/4.0 Chrome/59.0.3071.125 Mobile Safari/537.36",
"Mozilla/5.0 (iPhone; CPU iPhone OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) "
"Mobile/15E148",
"Mozilla/5.0 (iPad; CPU OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) "
"Mobile/15E148",
"Mozilla/5.0 (iPhone; CPU iPhone OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) "
"Version/12.1 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (iPhone; CPU iPhone OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) "
"Mobile/15E148",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/74.0.3729.169 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 "
"Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/605.1.15 (KHTML, like Gecko) "
"Version/11.1.2 Safari/605.1.15",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 "
"Safari/537.36 "
]),
"Accept-Language":
random.choice([
"de",
"de,en;q=0.5",
"de-CH,en;q=0.5",
"en-US,en;q=0.5"
]),
"Accept-Encoding":
random.choice([
"gzip, deflate, br, compress",
"gzip, deflate, br"
]),
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,"
"*/*;q=0.8, application/signed-exchange;v=b3;q=0.9"
}
|
test_client.py
|
from __future__ import annotations
import asyncio
import functools
import gc
import inspect
import logging
import os
import pickle
import random
import subprocess
import sys
import threading
import traceback
import types
import warnings
import weakref
import zipfile
from collections import deque
from collections.abc import Generator
from contextlib import contextmanager, suppress
from functools import partial
from operator import add
from threading import Semaphore
from time import sleep
from typing import Any
import psutil
import pytest
import yaml
from tlz import concat, first, identity, isdistinct, merge, pluck, valmap
import dask
import dask.bag as db
from dask import delayed
from dask.optimization import SubgraphCallable
from dask.utils import parse_timedelta, stringify, tmpfile
from distributed import (
CancelledError,
Executor,
LocalCluster,
Nanny,
TimeoutError,
Worker,
fire_and_forget,
get_client,
get_worker,
performance_report,
profile,
secede,
)
from distributed.client import (
Client,
Future,
_get_global_client,
as_completed,
default_client,
ensure_default_client,
futures_of,
get_task_metadata,
temp_default_client,
tokenize,
wait,
)
from distributed.cluster_dump import load_cluster_dump
from distributed.comm import CommClosedError
from distributed.compatibility import LINUX, WINDOWS
from distributed.core import Status
from distributed.metrics import time
from distributed.objects import HasWhat, WhoHas
from distributed.scheduler import (
COMPILED,
CollectTaskMetaDataPlugin,
KilledWorker,
Scheduler,
)
from distributed.sizeof import sizeof
from distributed.utils import is_valid_xml, mp_context, sync, tmp_text
from distributed.utils_test import (
TaskStateMetadataPlugin,
_UnhashableCallable,
async_wait_for,
asyncinc,
captured_logger,
cluster,
dec,
div,
double,
gen_cluster,
gen_test,
geninc,
get_cert,
inc,
map_varying,
nodebug,
popen,
pristine_loop,
randominc,
save_sys_modules,
slowadd,
slowdec,
slowinc,
throws,
tls_only_security,
varying,
wait_for,
)
pytestmark = pytest.mark.ci1
@gen_cluster(client=True)
async def test_submit(c, s, a, b):
x = c.submit(inc, 10, key="x")
assert not x.done()
assert isinstance(x, Future)
assert x.client is c
result = await x
assert result == 11
assert x.done()
y = c.submit(inc, 20, key="y")
z = c.submit(add, x, y)
result = await z
assert result == 11 + 21
s.validate_state()
@gen_cluster(client=True)
async def test_map(c, s, a, b):
L1 = c.map(inc, range(5))
assert len(L1) == 5
assert isdistinct(x.key for x in L1)
assert all(isinstance(x, Future) for x in L1)
result = await L1[0]
assert result == inc(0)
assert len(s.tasks) == 5
L2 = c.map(inc, L1)
result = await L2[1]
assert result == inc(inc(1))
assert len(s.tasks) == 10
# assert L1[0].key in s.tasks[L2[0].key]
total = c.submit(sum, L2)
result = await total
assert result == sum(map(inc, map(inc, range(5))))
L3 = c.map(add, L1, L2)
result = await L3[1]
assert result == inc(1) + inc(inc(1))
L4 = c.map(add, range(3), range(4))
results = await c.gather(L4)
assert results == list(map(add, range(3), range(4)))
def f(x, y=10):
return x + y
L5 = c.map(f, range(5), y=5)
results = await c.gather(L5)
assert results == list(range(5, 10))
y = c.submit(f, 10)
L6 = c.map(f, range(5), y=y)
results = await c.gather(L6)
assert results == list(range(20, 25))
s.validate_state()
@gen_cluster(client=True)
async def test_map_empty(c, s, a, b):
L1 = c.map(inc, [], pure=False)
assert len(L1) == 0
results = await c.gather(L1)
assert results == []
@gen_cluster(client=True)
async def test_map_keynames(c, s, a, b):
futures = c.map(inc, range(4), key="INC")
assert all(f.key.startswith("INC") for f in futures)
assert isdistinct(f.key for f in futures)
futures2 = c.map(inc, [5, 6, 7, 8], key="INC")
assert [f.key for f in futures] != [f.key for f in futures2]
keys = ["inc-1", "inc-2", "inc-3", "inc-4"]
futures = c.map(inc, range(4), key=keys)
assert [f.key for f in futures] == keys
@gen_cluster(client=True)
async def test_map_retries(c, s, a, b):
args = [
[ZeroDivisionError("one"), 2, 3],
[4, 5, 6],
[ZeroDivisionError("seven"), ZeroDivisionError("eight"), 9],
]
x, y, z = c.map(*map_varying(args), retries=2)
assert await x == 2
assert await y == 4
assert await z == 9
x, y, z = c.map(*map_varying(args), retries=1, pure=False)
assert await x == 2
assert await y == 4
with pytest.raises(ZeroDivisionError, match="eight"):
await z
x, y, z = c.map(*map_varying(args), retries=0, pure=False)
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 4
with pytest.raises(ZeroDivisionError, match="seven"):
await z
@gen_cluster(client=True)
async def test_map_batch_size(c, s, a, b):
result = c.map(inc, range(100), batch_size=10)
result = await c.gather(result)
assert result == list(range(1, 101))
result = c.map(add, range(100), range(100), batch_size=10)
result = await c.gather(result)
assert result == list(range(0, 200, 2))
# mismatch shape
result = c.map(add, range(100, 200), range(10), batch_size=2)
result = await c.gather(result)
assert result == list(range(100, 120, 2))
@gen_cluster(client=True)
async def test_custom_key_with_batches(c, s, a, b):
"""Test of <https://github.com/dask/distributed/issues/4588>"""
futs = c.map(
lambda x: x**2,
range(10),
batch_size=5,
key=[str(x) for x in range(10)],
)
assert len(futs) == 10
await wait(futs)
@gen_cluster(client=True)
async def test_compute_retries(c, s, a, b):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
# Sanity check for varying() use
x = c.compute(delayed(varying(args))())
with pytest.raises(ZeroDivisionError, match="one"):
await x
# Same retries for all
x = c.compute(delayed(varying(args))(), retries=1)
with pytest.raises(ZeroDivisionError, match="two"):
await x
x = c.compute(delayed(varying(args))(), retries=2)
assert await x == 3
args.append(4)
x = c.compute(delayed(varying(args))(), retries=2)
assert await x == 3
@gen_cluster(client=True)
async def test_compute_retries_annotations(c, s, a, b):
# Per-future retries
xargs = [ZeroDivisionError("one"), ZeroDivisionError("two"), 30, 40]
yargs = [ZeroDivisionError("five"), ZeroDivisionError("six"), 70]
zargs = [80, 90, 100]
with dask.annotate(retries=2):
x = delayed(varying(xargs))()
y = delayed(varying(yargs))()
x, y = c.compute([x, y], optimize_graph=False)
gc.collect()
assert await x == 30
with pytest.raises(ZeroDivisionError, match="five"):
await y
x = delayed(varying(xargs))()
with dask.annotate(retries=2):
y = delayed(varying(yargs))()
z = delayed(varying(zargs))()
x, y, z = c.compute([x, y, z], optimize_graph=False)
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 70
assert await z == 80
def test_retries_get(c):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = delayed(varying(args))()
assert x.compute(retries=5) == 3
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = delayed(varying(args))()
with pytest.raises(ZeroDivisionError):
x.compute()
@gen_cluster(client=True)
async def test_compute_persisted_retries(c, s, a, b):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
# Sanity check
x = c.persist(delayed(varying(args))())
fut = c.compute(x)
with pytest.raises(ZeroDivisionError, match="one"):
await fut
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=1)
with pytest.raises(ZeroDivisionError, match="two"):
await fut
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=2)
assert await fut == 3
args.append(4)
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=3)
assert await fut == 3
@gen_cluster(client=True)
async def test_persist_retries(c, s, a, b):
# Same retries for all
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = c.persist(delayed(varying(args))(), retries=1)
x = c.compute(x)
with pytest.raises(ZeroDivisionError, match="two"):
await x
x = c.persist(delayed(varying(args))(), retries=2)
x = c.compute(x)
assert await x == 3
@gen_cluster(client=True)
async def test_persist_retries_annotations(c, s, a, b):
# Per-key retries
xargs = [ZeroDivisionError("one"), ZeroDivisionError("two"), 30, 40]
yargs = [ZeroDivisionError("five"), ZeroDivisionError("six"), 70]
zargs = [80, 90, 100]
x = delayed(varying(xargs))()
with dask.annotate(retries=2):
y = delayed(varying(yargs))()
z = delayed(varying(zargs))()
x, y, z = c.persist([x, y, z], optimize_graph=False)
x, y, z = c.compute([x, y, z])
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 70
assert await z == 80
@gen_cluster(client=True)
async def test_retries_dask_array(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = c.compute(x.sum(), retries=2)
y = await future
assert y == 100
@gen_cluster(client=True)
async def test_future_repr(c, s, a, b):
pd = pytest.importorskip("pandas")
x = c.submit(inc, 10)
y = c.submit(pd.DataFrame, {"x": [1, 2, 3]})
await x
await y
for func in [repr, lambda x: x._repr_html_()]:
assert str(x.key) in func(x)
assert str(x.status) in func(x)
assert str(x.status) in repr(c.futures[x.key])
assert "int" in func(x)
assert "pandas" in func(y)
assert "DataFrame" in func(y)
@gen_cluster(client=True)
async def test_future_tuple_repr(c, s, a, b):
da = pytest.importorskip("dask.array")
y = da.arange(10, chunks=(5,)).persist()
f = futures_of(y)[0]
for func in [repr, lambda x: x._repr_html_()]:
for k in f.key:
assert str(k) in func(f)
@gen_cluster(client=True)
async def test_Future_exception(c, s, a, b):
x = c.submit(div, 1, 0)
result = await x.exception()
assert isinstance(result, ZeroDivisionError)
x = c.submit(div, 1, 1)
result = await x.exception()
assert result is None
def test_Future_exception_sync(c):
x = c.submit(div, 1, 0)
assert isinstance(x.exception(), ZeroDivisionError)
x = c.submit(div, 1, 1)
assert x.exception() is None
@gen_cluster(client=True)
async def test_Future_release(c, s, a, b):
# Released Futures should be removed timely from the Client
x = c.submit(div, 1, 1)
await x
x.release()
await asyncio.sleep(0)
assert not c.futures
x = c.submit(slowinc, 1, delay=0.5)
x.release()
await asyncio.sleep(0)
assert not c.futures
x = c.submit(div, 1, 0)
await x.exception()
x.release()
await asyncio.sleep(0)
assert not c.futures
def test_Future_release_sync(c):
# Released Futures should be removed timely from the Client
x = c.submit(div, 1, 1)
x.result()
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
x = c.submit(slowinc, 1, delay=0.8)
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
x = c.submit(div, 1, 0)
x.exception()
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
def test_short_tracebacks(loop, c):
tblib = pytest.importorskip("tblib")
future = c.submit(div, 1, 0)
try:
future.result()
except Exception:
_, _, tb = sys.exc_info()
tb = tblib.Traceback(tb).to_dict()
n = 0
while tb is not None:
n += 1
tb = tb["tb_next"]
assert n < 5
@gen_cluster(client=True)
async def test_map_naming(c, s, a, b):
L1 = c.map(inc, range(5))
L2 = c.map(inc, range(5))
assert [x.key for x in L1] == [x.key for x in L2]
L3 = c.map(inc, [1, 1, 1, 1])
assert len({x._state for x in L3}) == 1
L4 = c.map(inc, [1, 1, 1, 1], pure=False)
assert len({x._state for x in L4}) == 4
@gen_cluster(client=True)
async def test_submit_naming(c, s, a, b):
a = c.submit(inc, 1)
b = c.submit(inc, 1)
assert a._state is b._state
c = c.submit(inc, 1, pure=False)
assert c.key != a.key
@gen_cluster(client=True)
async def test_exceptions(c, s, a, b):
x = c.submit(div, 1, 2)
result = await x
assert result == 1 / 2
x = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
await x
x = c.submit(div, 10, 2) # continues to operate
result = await x
assert result == 10 / 2
@gen_cluster()
async def test_gc(s, a, b):
c = await Client(s.address, asynchronous=True)
x = c.submit(inc, 10)
await x
assert s.tasks[x.key].who_has
x.__del__()
await async_wait_for(
lambda: x.key not in s.tasks or not s.tasks[x.key].who_has, timeout=0.3
)
await c.close()
def test_thread(c):
x = c.submit(inc, 1)
assert x.result() == 2
x = c.submit(slowinc, 1, delay=0.3)
with pytest.raises(TimeoutError):
x.result(timeout="10 ms")
assert x.result() == 2
def test_sync_exceptions(c):
x = c.submit(div, 10, 2)
assert x.result() == 5
y = c.submit(div, 10, 0)
try:
y.result()
assert False
except ZeroDivisionError:
pass
z = c.submit(div, 10, 5)
assert z.result() == 2
@gen_cluster(client=True)
async def test_gather(c, s, a, b):
x = c.submit(inc, 10)
y = c.submit(inc, x)
result = await c.gather(x)
assert result == 11
result = await c.gather([x])
assert result == [11]
result = await c.gather({"x": x, "y": [y]})
assert result == {"x": 11, "y": [12]}
@gen_cluster(client=True)
async def test_gather_mismatched_client(c, s, a, b):
c2 = await Client(s.address, asynchronous=True)
x = c.submit(inc, 10)
y = c2.submit(inc, 5)
with pytest.raises(ValueError, match="Futures created by another client"):
await c.gather([x, y])
@gen_cluster(client=True)
async def test_gather_lost(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
y = c.submit(inc, 1, workers=b.address)
await a.close()
with pytest.raises(Exception):
await c.gather([x, y])
def test_gather_sync(c):
x = c.submit(inc, 1)
assert c.gather(x) == 2
y = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
c.gather([x, y])
[xx] = c.gather([x, y], errors="skip")
assert xx == 2
@gen_cluster(client=True)
async def test_gather_strict(c, s, a, b):
x = c.submit(div, 2, 1)
y = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
await c.gather([x, y])
[xx] = await c.gather([x, y], errors="skip")
assert xx == 2
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_gather_skip(c, s, a):
x = c.submit(div, 1, 0, priority=10)
y = c.submit(slowinc, 1, delay=0.5)
with captured_logger(logging.getLogger("distributed.scheduler")) as sched:
with captured_logger(logging.getLogger("distributed.client")) as client:
L = await c.gather([x, y], errors="skip")
assert L == [2]
assert not client.getvalue()
assert not sched.getvalue()
@gen_cluster(client=True)
async def test_limit_concurrent_gathering(c, s, a, b):
futures = c.map(inc, range(100))
await c.gather(futures)
assert len(a.outgoing_transfer_log) + len(b.outgoing_transfer_log) < 100
@gen_cluster(client=True)
async def test_get(c, s, a, b):
future = c.get({"x": (inc, 1)}, "x", sync=False)
assert isinstance(future, Future)
result = await future
assert result == 2
futures = c.get({"x": (inc, 1)}, ["x"], sync=False)
assert isinstance(futures[0], Future)
result = await c.gather(futures)
assert result == [2]
futures = c.get({}, [], sync=False)
result = await c.gather(futures)
assert result == []
result = await c.get(
{("x", 1): (inc, 1), ("x", 2): (inc, ("x", 1))}, ("x", 2), sync=False
)
assert result == 3
def test_get_sync(c):
assert c.get({"x": (inc, 1)}, "x") == 2
def test_no_future_references(c):
from weakref import WeakSet
ws = WeakSet()
futures = c.map(inc, range(10))
ws.update(futures)
del futures
import gc
gc.collect()
start = time()
while list(ws):
sleep(0.01)
assert time() < start + 30
def test_get_sync_optimize_graph_passes_through(c):
bag = db.range(10, npartitions=3).map(inc)
dask.compute(bag.sum(), optimize_graph=False)
@gen_cluster(client=True)
async def test_gather_errors(c, s, a, b):
def f(a, b):
raise TypeError
def g(a, b):
raise AttributeError
future_f = c.submit(f, 1, 2)
future_g = c.submit(g, 1, 2)
with pytest.raises(TypeError):
await c.gather(future_f)
with pytest.raises(AttributeError):
await c.gather(future_g)
await a.close()
@gen_cluster(client=True)
async def test_wait(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
z = c.submit(inc, 2)
done, not_done = await wait([x, y, z])
assert done == {x, y, z}
assert not_done == set()
assert x.status == y.status == "finished"
@gen_cluster(client=True)
async def test_wait_first_completed(c, s, a, b):
x = c.submit(slowinc, 1)
y = c.submit(slowinc, 1)
z = c.submit(inc, 2)
done, not_done = await wait([x, y, z], return_when="FIRST_COMPLETED")
assert done == {z}
assert not_done == {x, y}
assert z.status == "finished"
assert x.status == "pending"
assert y.status == "pending"
@gen_cluster(client=True)
async def test_wait_timeout(c, s, a, b):
future = c.submit(sleep, 0.3)
with pytest.raises(TimeoutError):
await wait(future, timeout=0.01)
def test_wait_sync(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
done, not_done = wait([x, y])
assert done == {x, y}
assert not_done == set()
assert x.status == y.status == "finished"
future = c.submit(sleep, 0.3)
with pytest.raises(TimeoutError):
wait(future, timeout=0.01)
def test_wait_informative_error_for_timeouts(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
try:
wait(x, y)
except Exception as e:
assert "timeout" in str(e)
assert "list" in str(e)
@gen_cluster(client=True)
async def test_garbage_collection(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
assert c.refcount[x.key] == 2
x.__del__()
await asyncio.sleep(0)
assert c.refcount[x.key] == 1
z = c.submit(inc, y)
y.__del__()
await asyncio.sleep(0)
result = await z
assert result == 3
ykey = y.key
y.__del__()
await asyncio.sleep(0)
assert ykey not in c.futures
@gen_cluster(client=True)
async def test_garbage_collection_with_scatter(c, s, a, b):
[future] = await c.scatter([1])
assert future.key in c.futures
assert future.status == "finished"
assert s.who_wants[future.key] == {c.id}
key = future.key
assert c.refcount[key] == 1
future.__del__()
await asyncio.sleep(0)
assert c.refcount[key] == 0
while key in s.tasks and s.tasks[key].who_has:
await asyncio.sleep(0.1)
@gen_cluster(client=True)
async def test_recompute_released_key(c, s, a, b):
x = c.submit(inc, 100)
result1 = await x
xkey = x.key
del x
import gc
gc.collect()
await asyncio.sleep(0)
assert c.refcount[xkey] == 0
# 1 second batching needs a second action to trigger
while xkey in s.tasks and s.tasks[xkey].who_has or xkey in a.data or xkey in b.data:
await asyncio.sleep(0.1)
x = c.submit(inc, 100)
assert x.key in c.futures
result2 = await x
assert result1 == result2
@pytest.mark.slow
@gen_cluster(client=True)
async def test_long_tasks_dont_trigger_timeout(c, s, a, b):
from time import sleep
x = c.submit(sleep, 3)
await x
@pytest.mark.skip
@gen_cluster(client=True)
async def test_missing_data_heals(c, s, a, b):
a.validate = False
b.validate = False
x = c.submit(inc, 1)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([x, y, z])
# Secretly delete y's key
if y.key in a.data:
del a.data[y.key]
a.release_key(y.key, stimulus_id="test")
if y.key in b.data:
del b.data[y.key]
b.release_key(y.key, stimulus_id="test")
await asyncio.sleep(0)
w = c.submit(add, y, z)
result = await w
assert result == 3 + 4
@pytest.mark.skip
@gen_cluster(client=True)
async def test_gather_robust_to_missing_data(c, s, a, b):
a.validate = False
b.validate = False
x, y, z = c.map(inc, range(3))
await wait([x, y, z]) # everything computed
for f in [x, y]:
for w in [a, b]:
if f.key in w.data:
del w.data[f.key]
await asyncio.sleep(0)
w.release_key(f.key, stimulus_id="test")
xx, yy, zz = await c.gather([x, y, z])
assert (xx, yy, zz) == (1, 2, 3)
@pytest.mark.skip
@gen_cluster(client=True)
async def test_gather_robust_to_nested_missing_data(c, s, a, b):
a.validate = False
b.validate = False
w = c.submit(inc, 1)
x = c.submit(inc, w)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([z])
for worker in [a, b]:
for datum in [y, z]:
if datum.key in worker.data:
del worker.data[datum.key]
await asyncio.sleep(0)
worker.release_key(datum.key, stimulus_id="test")
result = await c.gather([z])
assert result == [inc(inc(inc(inc(1))))]
@gen_cluster(client=True)
async def test_tokenize_on_futures(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
tok = tokenize(x)
assert tokenize(x) == tokenize(x)
assert tokenize(x) == tokenize(y)
c.futures[x.key].finish()
assert tok == tokenize(y)
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_submit(c, s, a, b):
x = c.submit(inc, 1, workers={a.ip})
y = c.submit(inc, x, workers={b.ip})
await wait([x, y])
assert s.host_restrictions[x.key] == {a.ip}
assert x.key in a.data
assert s.host_restrictions[y.key] == {b.ip}
assert y.key in b.data
@gen_cluster(client=True)
async def test_restrictions_ip_port(c, s, a, b):
x = c.submit(inc, 1, workers={a.address})
y = c.submit(inc, x, workers={b.address})
await wait([x, y])
assert s.worker_restrictions[x.key] == {a.address}
assert x.key in a.data
assert s.worker_restrictions[y.key] == {b.address}
assert y.key in b.data
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_map(c, s, a, b):
L = c.map(inc, range(5), workers={a.ip})
await wait(L)
assert set(a.data) == {x.key for x in L}
assert not b.data
for x in L:
assert s.host_restrictions[x.key] == {a.ip}
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_get(c, s, a, b):
dsk = {"x": 1, "y": (inc, "x"), "z": (inc, "y")}
futures = c.get(dsk, ["y", "z"], workers=a.ip, sync=False)
result = await c.gather(futures)
assert result == [2, 3]
assert "y" in a.data
assert "z" in a.data
assert len(b.data) == 0
@gen_cluster(client=True)
async def test_restrictions_get_annotate(c, s, a, b):
x = 1
with dask.annotate(workers=a.address):
y = delayed(inc)(x)
with dask.annotate(workers=b.address):
z = delayed(inc)(y)
futures = c.get(z.__dask_graph__(), [y.key, z.key], sync=False)
result = await c.gather(futures)
assert result == [2, 3]
assert y.key in a.data
assert z.key in b.data
@gen_cluster(client=True)
async def dont_test_bad_restrictions_raise_exception(c, s, a, b):
z = c.submit(inc, 2, workers={"bad-address"})
try:
await z
assert False
except ValueError as e:
assert "bad-address" in str(e)
assert z.key in str(e)
@gen_cluster(client=True)
async def test_remove_worker(c, s, a, b):
L = c.map(inc, range(20))
await wait(L)
await b.close()
assert b.address not in s.workers
result = await c.gather(L)
assert result == list(map(inc, range(20)))
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_errors_dont_block(c, s, w):
L = [c.submit(inc, 1), c.submit(throws, 1), c.submit(inc, 2), c.submit(throws, 2)]
while not (L[0].status == L[2].status == "finished"):
await asyncio.sleep(0.01)
result = await c.gather([L[0], L[2]])
assert result == [2, 3]
@gen_cluster(client=True)
async def test_submit_quotes(c, s, a, b):
def assert_list(x, z=[]):
return isinstance(x, list) and isinstance(z, list)
x = c.submit(assert_list, [1, 2, 3])
result = await x
assert result
x = c.submit(assert_list, [1, 2, 3], z=[4, 5, 6])
result = await x
assert result
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(assert_list, [x, y])
result = await z
assert result
@gen_cluster(client=True)
async def test_map_quotes(c, s, a, b):
def assert_list(x, z=[]):
return isinstance(x, list) and isinstance(z, list)
L = c.map(assert_list, [[1, 2, 3], [4]])
result = await c.gather(L)
assert all(result)
L = c.map(assert_list, [[1, 2, 3], [4]], z=[10])
result = await c.gather(L)
assert all(result)
L = c.map(assert_list, [[1, 2, 3], [4]], [[]] * 3)
result = await c.gather(L)
assert all(result)
@gen_cluster()
async def test_two_consecutive_clients_share_results(s, a, b):
c = await Client(s.address, asynchronous=True)
x = c.submit(random.randint, 0, 1000, pure=True)
xx = await x
f = await Client(s.address, asynchronous=True)
y = f.submit(random.randint, 0, 1000, pure=True)
yy = await y
assert xx == yy
await c.close()
await f.close()
@gen_cluster(client=True)
async def test_submit_then_get_with_Future(c, s, a, b):
x = c.submit(slowinc, 1)
dsk = {"y": (inc, x)}
result = await c.get(dsk, "y", sync=False)
assert result == 3
@gen_cluster(client=True)
async def test_aliases(c, s, a, b):
x = c.submit(inc, 1)
dsk = {"y": x}
result = await c.get(dsk, "y", sync=False)
assert result == 2
@gen_cluster(client=True)
async def test_aliases_2(c, s, a, b):
dsk_keys = [
({"x": (inc, 1), "y": "x", "z": "x", "w": (add, "y", "z")}, ["y", "w"]),
({"x": "y", "y": 1}, ["x"]),
({"x": 1, "y": "x", "z": "y", "w": (inc, "z")}, ["w"]),
]
for dsk, keys in dsk_keys:
result = await c.gather(c.get(dsk, keys, sync=False))
assert list(result) == list(dask.get(dsk, keys))
await asyncio.sleep(0)
@gen_cluster(client=True)
async def test_scatter(c, s, a, b):
d = await c.scatter({"y": 20})
assert isinstance(d["y"], Future)
assert a.data.get("y") == 20 or b.data.get("y") == 20
y_who_has = s.get_who_has(keys=["y"])["y"]
assert a.address in y_who_has or b.address in y_who_has
assert s.get_nbytes(summary=False) == {"y": sizeof(20)}
yy = await c.gather([d["y"]])
assert yy == [20]
[x] = await c.scatter([10])
assert isinstance(x, Future)
assert a.data.get(x.key) == 10 or b.data.get(x.key) == 10
xx = await c.gather([x])
x_who_has = s.get_who_has(keys=[x.key])[x.key]
assert s.tasks[x.key].who_has
assert (
s.workers[a.address] in s.tasks[x.key].who_has
or s.workers[b.address] in s.tasks[x.key].who_has
)
assert s.get_nbytes(summary=False) == {"y": sizeof(20), x.key: sizeof(10)}
assert xx == [10]
z = c.submit(add, x, d["y"]) # submit works on Future
result = await z
assert result == 10 + 20
result = await c.gather([z, x])
assert result == [30, 10]
@gen_cluster(client=True)
async def test_scatter_types(c, s, a, b):
d = await c.scatter({"x": 1})
assert isinstance(d, dict)
assert list(d) == ["x"]
for seq in [[1], (1,), {1}, frozenset([1])]:
L = await c.scatter(seq)
assert isinstance(L, type(seq))
assert len(L) == 1
s.validate_state()
seq = await c.scatter(range(5))
assert isinstance(seq, list)
assert len(seq) == 5
s.validate_state()
@gen_cluster(client=True)
async def test_scatter_non_list(c, s, a, b):
x = await c.scatter(1)
assert isinstance(x, Future)
result = await x
assert result == 1
@gen_cluster(client=True)
async def test_scatter_tokenize_local(c, s, a, b):
from dask.base import normalize_token
class MyObj:
pass
L = []
@normalize_token.register(MyObj)
def f(x):
L.append(x)
return "x"
obj = MyObj()
future = await c.scatter(obj)
assert L and L[0] is obj
@gen_cluster(client=True)
async def test_scatter_singletons(c, s, a, b):
np = pytest.importorskip("numpy")
pd = pytest.importorskip("pandas")
for x in [1, np.ones(5), pd.DataFrame({"x": [1, 2, 3]})]:
future = await c.scatter(x)
result = await future
assert str(result) == str(x)
@gen_cluster(client=True)
async def test_scatter_typename(c, s, a, b):
future = await c.scatter(123)
assert future.key.startswith("int")
@gen_cluster(client=True)
async def test_scatter_hash(c, s, a, b):
x = await c.scatter(123)
y = await c.scatter(123)
assert x.key == y.key
z = await c.scatter(123, hash=False)
assert z.key != y.key
@gen_cluster(client=True)
async def test_scatter_hash_2(c, s, a, b):
[a] = await c.scatter([1])
[b] = await c.scatter([1])
assert a.key == b.key
s.validate_state()
@gen_cluster(client=True)
async def test_get_releases_data(c, s, a, b):
await c.gather(c.get({"x": (inc, 1)}, ["x"], sync=False))
import gc
gc.collect()
while c.refcount["x"]:
await asyncio.sleep(0.01)
def test_current(s, a, b):
with Client(s["address"]) as c:
assert Client.current() is c
with pytest.raises(ValueError):
Client.current()
with Client(s["address"]) as c:
assert Client.current() is c
def test_global_clients(loop):
assert _get_global_client() is None
with pytest.raises(ValueError):
default_client()
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert _get_global_client() is c
assert default_client() is c
with Client(s["address"], loop=loop) as f:
assert _get_global_client() is f
assert default_client() is f
assert default_client(c) is c
assert default_client(f) is f
assert _get_global_client() is None
@gen_cluster(client=True)
async def test_exception_on_exception(c, s, a, b):
x = c.submit(lambda: 1 / 0)
y = c.submit(inc, x)
with pytest.raises(ZeroDivisionError):
await y
z = c.submit(inc, y)
with pytest.raises(ZeroDivisionError):
await z
@gen_cluster(client=True)
async def test_get_task_prefix_states(c, s, a, b):
x = await c.submit(inc, 1)
res = s.get_task_prefix_states()
data = {
"inc": {
"erred": 0,
"memory": 1,
"processing": 0,
"released": 0,
"waiting": 0,
}
}
assert res == data
del x
while s.get_task_prefix_states() == data:
await asyncio.sleep(0.01)
res = s.get_task_prefix_states()
assert res == {}
@gen_cluster(client=True)
async def test_get_nbytes(c, s, a, b):
[x] = await c.scatter([1])
assert s.get_nbytes(summary=False) == {x.key: sizeof(1)}
y = c.submit(inc, x)
await y
assert s.get_nbytes(summary=False) == {x.key: sizeof(1), y.key: sizeof(2)}
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_nbytes_determines_worker(c, s, a, b):
x = c.submit(identity, 1, workers=[a.ip])
y = c.submit(identity, tuple(range(100)), workers=[b.ip])
await c.gather([x, y])
z = c.submit(lambda x, y: None, x, y)
await z
assert s.tasks[z.key].who_has == {s.workers[b.address]}
@gen_cluster(client=True)
async def test_if_intermediates_clear_on_error(c, s, a, b):
x = delayed(div, pure=True)(1, 0)
y = delayed(div, pure=True)(1, 2)
z = delayed(add, pure=True)(x, y)
f = c.compute(z)
with pytest.raises(ZeroDivisionError):
await f
s.validate_state()
assert not any(ts.who_has for ts in s.tasks.values())
@gen_cluster(
client=True, config={"distributed.scheduler.default-task-durations": {"f": "1ms"}}
)
async def test_pragmatic_move_small_data_to_large_data(c, s, a, b):
np = pytest.importorskip("numpy")
lists = c.map(np.ones, [10000] * 10, pure=False)
sums = c.map(np.sum, lists)
total = c.submit(sum, sums)
def f(x, y):
return None
results = c.map(f, lists, [total] * 10)
await wait([total])
await wait(results)
assert (
sum(
s.tasks[r.key].who_has.issubset(s.tasks[l.key].who_has)
for l, r in zip(lists, results)
)
>= 9
)
@gen_cluster(client=True)
async def test_get_with_non_list_key(c, s, a, b):
dsk = {("x", 0): (inc, 1), 5: (inc, 2)}
x = await c.get(dsk, ("x", 0), sync=False)
y = await c.get(dsk, 5, sync=False)
assert x == 2
assert y == 3
@gen_cluster(client=True)
async def test_get_with_error(c, s, a, b):
dsk = {"x": (div, 1, 0), "y": (inc, "x")}
with pytest.raises(ZeroDivisionError):
await c.get(dsk, "y", sync=False)
def test_get_with_error_sync(c):
dsk = {"x": (div, 1, 0), "y": (inc, "x")}
with pytest.raises(ZeroDivisionError):
c.get(dsk, "y")
@gen_cluster(client=True)
async def test_directed_scatter(c, s, a, b):
await c.scatter([1, 2, 3], workers=[a.address])
assert len(a.data) == 3
assert not b.data
await c.scatter([4, 5], workers=[b.name])
assert len(b.data) == 2
def test_directed_scatter_sync(c, s, a, b, loop):
futures = c.scatter([1, 2, 3], workers=[b["address"]])
has_what = sync(loop, c.scheduler.has_what)
assert len(has_what[b["address"]]) == len(futures)
assert len(has_what[a["address"]]) == 0
@gen_cluster(client=True)
async def test_scatter_direct(c, s, a, b):
future = await c.scatter(123, direct=True)
assert future.key in a.data or future.key in b.data
assert s.tasks[future.key].who_has
assert future.status == "finished"
result = await future
assert result == 123
assert not s.counters["op"].components[0]["scatter"]
result = await future
assert not s.counters["op"].components[0]["gather"]
result = await c.gather(future)
assert not s.counters["op"].components[0]["gather"]
@gen_cluster()
async def test_scatter_direct_2(s, a, b):
c = await Client(s.address, asynchronous=True, heartbeat_interval=10)
last = s.clients[c.id].last_seen
while s.clients[c.id].last_seen == last:
await asyncio.sleep(0.10)
await c.close()
@gen_cluster(client=True)
async def test_scatter_direct_numpy(c, s, a, b):
np = pytest.importorskip("numpy")
x = np.ones(5)
future = await c.scatter(x, direct=True)
result = await future
assert np.allclose(x, result)
assert not s.counters["op"].components[0]["scatter"]
@gen_cluster(client=True)
async def test_scatter_direct_broadcast(c, s, a, b):
future2 = await c.scatter(456, direct=True, broadcast=True)
assert future2.key in a.data
assert future2.key in b.data
assert s.tasks[future2.key].who_has == {s.workers[a.address], s.workers[b.address]}
result = await future2
assert result == 456
assert not s.counters["op"].components[0]["scatter"]
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_scatter_direct_balanced(c, s, *workers):
futures = await c.scatter([1, 2, 3], direct=True)
assert sorted(len(w.data) for w in workers) == [0, 1, 1, 1]
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_scatter_direct_broadcast_target(c, s, *workers):
futures = await c.scatter([123, 456], direct=True, workers=workers[0].address)
assert futures[0].key in workers[0].data
assert futures[1].key in workers[0].data
futures = await c.scatter(
[123, 456],
direct=True,
broadcast=True,
workers=[w.address for w in workers[:3]],
)
assert (
f.key in w.data and w.address in s.tasks[f.key].who_has
for f in futures
for w in workers[:3]
)
@gen_cluster(client=True, nthreads=[])
async def test_scatter_direct_empty(c, s):
with pytest.raises((ValueError, TimeoutError)):
await c.scatter(123, direct=True, timeout=0.1)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 5)
async def test_scatter_direct_spread_evenly(c, s, *workers):
futures = []
for i in range(10):
future = await c.scatter(i, direct=True)
futures.append(future)
assert all(w.data for w in workers)
@pytest.mark.parametrize("direct", [True, False])
@pytest.mark.parametrize("broadcast", [True, False])
def test_scatter_gather_sync(c, direct, broadcast):
futures = c.scatter([1, 2, 3], direct=direct, broadcast=broadcast)
results = c.gather(futures, direct=direct)
assert results == [1, 2, 3]
delayed(inc)(1).compute(direct=direct)
@gen_cluster(client=True)
async def test_gather_direct(c, s, a, b):
futures = await c.scatter([1, 2, 3])
data = await c.gather(futures, direct=True)
assert data == [1, 2, 3]
@gen_cluster(client=True)
async def test_many_submits_spread_evenly(c, s, a, b):
L = [c.submit(inc, i) for i in range(10)]
await wait(L)
assert a.data and b.data
@gen_cluster(client=True)
async def test_traceback(c, s, a, b):
x = c.submit(div, 1, 0)
tb = await x.traceback()
assert any("x / y" in line for line in pluck(3, traceback.extract_tb(tb)))
@gen_cluster(client=True)
async def test_get_traceback(c, s, a, b):
try:
await c.get({"x": (div, 1, 0)}, "x", sync=False)
except ZeroDivisionError:
exc_type, exc_value, exc_traceback = sys.exc_info()
L = traceback.format_tb(exc_traceback)
assert any("x / y" in line for line in L)
@gen_cluster(client=True)
async def test_gather_traceback(c, s, a, b):
x = c.submit(div, 1, 0)
try:
await c.gather(x)
except ZeroDivisionError:
exc_type, exc_value, exc_traceback = sys.exc_info()
L = traceback.format_tb(exc_traceback)
assert any("x / y" in line for line in L)
def test_traceback_sync(c):
x = c.submit(div, 1, 0)
tb = x.traceback()
assert any(
"x / y" in line
for line in concat(traceback.extract_tb(tb))
if isinstance(line, str)
)
y = c.submit(inc, x)
tb2 = y.traceback()
assert set(pluck(3, traceback.extract_tb(tb2))).issuperset(
set(pluck(3, traceback.extract_tb(tb)))
)
z = c.submit(div, 1, 2)
tb = z.traceback()
assert tb is None
@gen_cluster(client=True)
async def test_upload_file(c, s, a, b):
def g():
import myfile
return myfile.f()
with save_sys_modules():
for value in [123, 456]:
with tmp_text("myfile.py", f"def f():\n return {value}") as fn:
await c.upload_file(fn)
x = c.submit(g, pure=False)
result = await x
assert result == value
@gen_cluster(client=True)
async def test_upload_file_refresh_delayed(c, s, a, b):
with save_sys_modules():
for value in [123, 456]:
with tmp_text("myfile.py", f"def f():\n return {value}") as fn:
await c.upload_file(fn)
sys.path.append(os.path.dirname(fn))
from myfile import f
b = delayed(f)()
bb = c.compute(b, sync=False)
result = await c.gather(bb)
assert result == value
@gen_cluster(client=True)
async def test_upload_file_no_extension(c, s, a, b):
with tmp_text("myfile", "") as fn:
await c.upload_file(fn)
@gen_cluster(client=True)
async def test_upload_file_zip(c, s, a, b):
def g():
import myfile
return myfile.f()
with save_sys_modules():
try:
for value in [123, 456]:
with tmp_text(
"myfile.py", f"def f():\n return {value}"
) as fn_my_file:
with zipfile.ZipFile("myfile.zip", "w") as z:
z.write(fn_my_file, arcname=os.path.basename(fn_my_file))
await c.upload_file("myfile.zip")
x = c.submit(g, pure=False)
result = await x
assert result == value
finally:
if os.path.exists("myfile.zip"):
os.remove("myfile.zip")
@gen_cluster(client=True)
async def test_upload_file_egg(c, s, a, b):
def g():
import package_1
import package_2
return package_1.a, package_2.b
# c.upload_file tells each worker to
# - put this file in their local_directory
# - modify their sys.path to include it
# we don't care about the local_directory
# but we do care about restoring the path
with save_sys_modules():
for value in [123, 456]:
with tmpfile() as dirname:
os.mkdir(dirname)
with open(os.path.join(dirname, "setup.py"), "w") as f:
f.write("from setuptools import setup, find_packages\n")
f.write(
'setup(name="my_package", packages=find_packages(), version="{}")\n'.format(
value
)
)
# test a package with an underscore in the name
package_1 = os.path.join(dirname, "package_1")
os.mkdir(package_1)
with open(os.path.join(package_1, "__init__.py"), "w") as f:
f.write(f"a = {value}\n")
# test multiple top-level packages
package_2 = os.path.join(dirname, "package_2")
os.mkdir(package_2)
with open(os.path.join(package_2, "__init__.py"), "w") as f:
f.write(f"b = {value}\n")
# compile these into an egg
subprocess.check_call(
[sys.executable, "setup.py", "bdist_egg"], cwd=dirname
)
egg_root = os.path.join(dirname, "dist")
# first file ending with '.egg'
egg_name = [
fname for fname in os.listdir(egg_root) if fname.endswith(".egg")
][0]
egg_path = os.path.join(egg_root, egg_name)
await c.upload_file(egg_path)
os.remove(egg_path)
x = c.submit(g, pure=False)
result = await x
assert result == (value, value)
@gen_cluster(client=True)
async def test_upload_large_file(c, s, a, b):
assert a.local_directory
assert b.local_directory
with tmp_text("myfile", "abc") as fn:
with tmp_text("myfile2", "def") as fn2:
await c._upload_large_file(fn, remote_filename="x")
await c._upload_large_file(fn2)
for w in [a, b]:
assert os.path.exists(os.path.join(w.local_directory, "x"))
assert os.path.exists(os.path.join(w.local_directory, "myfile2"))
with open(os.path.join(w.local_directory, "x")) as f:
assert f.read() == "abc"
with open(os.path.join(w.local_directory, "myfile2")) as f:
assert f.read() == "def"
def test_upload_file_sync(c):
def g():
import myfile
return myfile.x
with tmp_text("myfile.py", "x = 123") as fn:
c.upload_file(fn)
x = c.submit(g)
assert x.result() == 123
@gen_cluster(client=True)
async def test_upload_file_exception(c, s, a, b):
with tmp_text("myfile.py", "syntax-error!") as fn:
with pytest.raises(SyntaxError):
await c.upload_file(fn)
def test_upload_file_exception_sync(c):
with tmp_text("myfile.py", "syntax-error!") as fn:
with pytest.raises(SyntaxError):
c.upload_file(fn)
@gen_cluster(client=True, nthreads=[])
async def test_upload_file_new_worker(c, s):
def g():
import myfile
return myfile.x
with tmp_text("myfile.py", "x = 123") as fn:
await c.upload_file(fn)
async with Worker(s.address):
x = await c.submit(g)
assert x == 123
@pytest.mark.skip
@gen_cluster()
async def test_multiple_clients(s, a, b):
a = await Client(s.address, asynchronous=True)
b = await Client(s.address, asynchronous=True)
x = a.submit(inc, 1)
y = b.submit(inc, 2)
assert x.client is a
assert y.client is b
xx = await x
yy = await y
assert xx == 2
assert yy == 3
z = a.submit(add, x, y)
assert z.client is a
zz = await z
assert zz == 5
await a.close()
await b.close()
@gen_cluster(client=True)
async def test_async_compute(c, s, a, b):
from dask.delayed import delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
[yy, zz, aa] = c.compute([y, z, 3], sync=False)
assert isinstance(yy, Future)
assert isinstance(zz, Future)
assert aa == 3
result = await c.gather([yy, zz])
assert result == [2, 0]
assert isinstance(c.compute(y), Future)
assert isinstance(c.compute([y]), (tuple, list))
@gen_cluster(client=True)
async def test_async_compute_with_scatter(c, s, a, b):
d = await c.scatter({("x", 1): 1, ("y", 1): 2})
x, y = d[("x", 1)], d[("y", 1)]
from dask.delayed import delayed
z = delayed(add)(delayed(inc)(x), delayed(inc)(y))
zz = c.compute(z)
[result] = await c.gather([zz])
assert result == 2 + 3
def test_sync_compute(c):
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
yy, zz = c.compute([y, z], sync=True)
assert (yy, zz) == (2, 0)
@gen_cluster(client=True)
async def test_remote_scatter_gather(c, s, a, b):
x, y, z = await c.scatter([1, 2, 3])
assert x.key in a.data or x.key in b.data
assert y.key in a.data or y.key in b.data
assert z.key in a.data or z.key in b.data
xx, yy, zz = await c.gather([x, y, z])
assert (xx, yy, zz) == (1, 2, 3)
@gen_cluster(client=True)
async def test_remote_submit_on_Future(c, s, a, b):
x = c.submit(lambda x: x + 1, 1)
y = c.submit(lambda x: x + 1, x)
result = await y
assert result == 3
def test_start_is_idempotent(c):
c.start()
c.start()
c.start()
x = c.submit(inc, 1)
assert x.result() == 2
@gen_cluster(client=True)
async def test_client_with_scheduler(c, s, a, b):
assert s.nthreads == {a.address: a.nthreads, b.address: b.nthreads}
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(add, x, y)
result = await x
assert result == 1 + 1
result = await z
assert result == 1 + 1 + 1 + 2
A, B, C = await c.scatter([1, 2, 3])
AA, BB, xx = await c.gather([A, B, x])
assert (AA, BB, xx) == (1, 2, 2)
result = await c.get({"x": (inc, 1), "y": (add, "x", 10)}, "y", sync=False)
assert result == 12
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_allow_restrictions(c, s, a, b):
aws = s.workers[a.address]
bws = s.workers[a.address]
x = c.submit(inc, 1, workers=a.ip)
await x
assert s.tasks[x.key].who_has == {aws}
assert not s.loose_restrictions
x = c.submit(inc, 2, workers=a.ip, allow_other_workers=True)
await x
assert s.tasks[x.key].who_has == {aws}
assert x.key in s.loose_restrictions
L = c.map(inc, range(3, 13), workers=a.ip, allow_other_workers=True)
await wait(L)
assert all(s.tasks[f.key].who_has == {aws} for f in L)
assert {f.key for f in L}.issubset(s.loose_restrictions)
x = c.submit(inc, 15, workers="127.0.0.3", allow_other_workers=True)
await x
assert s.tasks[x.key].who_has
assert x.key in s.loose_restrictions
L = c.map(inc, range(15, 25), workers="127.0.0.3", allow_other_workers=True)
await wait(L)
assert all(s.tasks[f.key].who_has for f in L)
assert {f.key for f in L}.issubset(s.loose_restrictions)
with pytest.raises(ValueError):
c.submit(inc, 1, allow_other_workers=True)
with pytest.raises(ValueError):
c.map(inc, [1], allow_other_workers=True)
with pytest.raises(TypeError):
c.submit(inc, 20, workers="127.0.0.1", allow_other_workers="Hello!")
with pytest.raises(TypeError):
c.map(inc, [20], workers="127.0.0.1", allow_other_workers="Hello!")
def test_bad_address():
with pytest.raises(OSError, match="connect"):
Client("123.123.123.123:1234", timeout=0.1)
with pytest.raises(OSError, match="connect"):
Client("127.0.0.1:1234", timeout=0.1)
def test_informative_error_on_cluster_type():
with pytest.raises(TypeError) as exc_info:
Client(LocalCluster)
assert "Scheduler address must be a string or a Cluster instance" in str(
exc_info.value
)
@gen_cluster(client=True)
async def test_long_error(c, s, a, b):
def bad(x):
raise ValueError("a" * 100000)
x = c.submit(bad, 10)
try:
await x
except ValueError as e:
assert len(str(e)) < 100000
tb = await x.traceback()
assert all(
len(line) < 100000
for line in concat(traceback.extract_tb(tb))
if isinstance(line, str)
)
@gen_cluster(client=True)
async def test_map_on_futures_with_kwargs(c, s, a, b):
def f(x, y=10):
return x + y
futures = c.map(inc, range(10))
futures2 = c.map(f, futures, y=20)
results = await c.gather(futures2)
assert results == [i + 1 + 20 for i in range(10)]
future = c.submit(inc, 100)
future2 = c.submit(f, future, y=200)
result = await future2
assert result == 100 + 1 + 200
class BadlySerializedObject:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise TypeError("hello!")
class FatallySerializedObject:
def __getstate__(self):
return 1
def __setstate__(self, state):
print("This should never have been deserialized, closing")
import sys
sys.exit(0)
@gen_cluster(client=True)
async def test_badly_serialized_input(c, s, a, b):
o = BadlySerializedObject()
future = c.submit(inc, o)
futures = c.map(inc, range(10))
L = await c.gather(futures)
assert list(L) == list(map(inc, range(10)))
assert future.status == "error"
with pytest.raises(Exception) as info:
await future
assert "hello!" in str(info.value)
@pytest.mark.skip
@gen_test()
async def test_badly_serialized_input_stderr(capsys, c):
o = BadlySerializedObject()
future = c.submit(inc, o)
while True:
sleep(0.01)
out, err = capsys.readouterr()
if "hello!" in err:
break
assert future.status == "error"
def test_repr(loop):
funcs = [str, repr, lambda x: x._repr_html_()]
with cluster(nworkers=3, worker_kwargs={"memory_limit": "2 GiB"}) as (s, [a, b, c]):
with Client(s["address"], loop=loop) as c:
for func in funcs:
text = func(c)
assert c.scheduler.address in text
assert "threads=3" in text or "Total threads: </strong>" in text
assert "6.00 GiB" in text
if "<table" not in text:
assert len(text) < 80
for func in funcs:
text = func(c)
assert "No scheduler connected" in text
@gen_cluster(client=True)
async def test_repr_async(c, s, a, b):
c._repr_html_()
@gen_cluster(client=True, worker_kwargs={"memory_limit": None})
async def test_repr_no_memory_limit(c, s, a, b):
c._repr_html_()
@gen_test()
async def test_repr_localcluster():
cluster = await LocalCluster(
processes=False, dashboard_address=":0", asynchronous=True
)
client = await Client(cluster, asynchronous=True)
try:
text = client._repr_html_()
assert cluster.scheduler.address in text
assert is_valid_xml(client._repr_html_())
finally:
await client.close()
await cluster.close()
@gen_cluster(client=True)
async def test_forget_simple(c, s, a, b):
x = c.submit(inc, 1, retries=2)
y = c.submit(inc, 2)
z = c.submit(add, x, y, workers=[a.ip], allow_other_workers=True)
await wait([x, y, z])
assert not s.waiting_data.get(x.key)
assert not s.waiting_data.get(y.key)
assert set(s.tasks) == {x.key, y.key, z.key}
s.client_releases_keys(keys=[x.key], client=c.id)
assert x.key in s.tasks
s.client_releases_keys(keys=[z.key], client=c.id)
assert x.key not in s.tasks
assert z.key not in s.tasks
assert not s.tasks[y.key].dependents
s.client_releases_keys(keys=[y.key], client=c.id)
assert not s.tasks
@gen_cluster(client=True)
async def test_forget_complex(e, s, A, B):
a, b, c, d = await e.scatter(list(range(4)))
ab = e.submit(add, a, b)
cd = e.submit(add, c, d)
ac = e.submit(add, a, c)
acab = e.submit(add, ac, ab)
await wait([a, b, c, d, ab, ac, cd, acab])
assert set(s.tasks) == {f.key for f in [ab, ac, cd, acab, a, b, c, d]}
s.client_releases_keys(keys=[ab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ab, ac, cd, acab, a, b, c, d]}
s.client_releases_keys(keys=[b.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ac, cd, acab, a, c, d]}
s.client_releases_keys(keys=[acab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ac, cd, a, c, d]}
assert b.key not in s.tasks
while b.key in A.data or b.key in B.data:
await asyncio.sleep(0.01)
s.client_releases_keys(keys=[ac.key], client=e.id)
assert set(s.tasks) == {f.key for f in [cd, a, c, d]}
@gen_cluster(client=True)
async def test_forget_in_flight(e, s, A, B):
delayed2 = partial(delayed, pure=True)
a, b, c, d = (delayed2(slowinc)(i) for i in range(4))
ab = delayed2(slowadd)(a, b, dask_key_name="ab")
cd = delayed2(slowadd)(c, d, dask_key_name="cd")
ac = delayed2(slowadd)(a, c, dask_key_name="ac")
acab = delayed2(slowadd)(ac, ab, dask_key_name="acab")
x, y = e.compute([ac, acab])
s.validate_state()
for i in range(5):
await asyncio.sleep(0.01)
s.validate_state()
s.client_releases_keys(keys=[y.key], client=e.id)
s.validate_state()
for k in [acab.key, ab.key, b.key]:
assert k not in s.tasks
@gen_cluster(client=True)
async def test_forget_errors(c, s, a, b):
x = c.submit(div, 1, 0)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([y])
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key in s.exceptions_blame
s.client_releases_keys(keys=[z.key], client=c.id)
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key not in s.exceptions_blame
s.client_releases_keys(keys=[x.key], client=c.id)
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key not in s.exceptions_blame
s.client_releases_keys(keys=[y.key], client=c.id)
assert x.key not in s.exceptions
assert x.key not in s.exceptions_blame
assert y.key not in s.exceptions_blame
assert z.key not in s.exceptions_blame
def test_repr_sync(c):
s = str(c)
r = repr(c)
assert c.scheduler.address in s
assert c.scheduler.address in r
assert str(2) in s # nworkers
assert "cores" in s or "threads" in s
@gen_cluster(client=True)
async def test_waiting_data(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(add, x, y, workers=[a.ip], allow_other_workers=True)
await wait([x, y, z])
assert not s.waiting_data.get(x.key)
assert not s.waiting_data.get(y.key)
@gen_cluster()
async def test_multi_client(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
assert set(s.client_comms) == {c.id, f.id}
x = c.submit(inc, 1)
y = f.submit(inc, 2)
y2 = c.submit(inc, 2)
assert y.key == y2.key
await wait([x, y])
assert s.wants_what == {
c.id: {x.key, y.key},
f.id: {y.key},
"fire-and-forget": set(),
}
assert s.who_wants == {x.key: {c.id}, y.key: {c.id, f.id}}
await c.close()
while c.id in s.wants_what:
await asyncio.sleep(0.01)
assert c.id not in s.wants_what
assert c.id not in s.who_wants[y.key]
assert x.key not in s.who_wants
await f.close()
while s.tasks:
await asyncio.sleep(0.01)
def long_running_client_connection(address):
with pristine_loop():
c = Client(address)
x = c.submit(lambda x: x + 1, 10)
x.result()
sleep(100)
@gen_cluster()
async def test_cleanup_after_broken_client_connection(s, a, b):
proc = mp_context.Process(target=long_running_client_connection, args=(s.address,))
proc.daemon = True
proc.start()
while not s.tasks:
await asyncio.sleep(0.01)
proc.terminate()
while s.tasks:
await asyncio.sleep(0.01)
@gen_cluster()
async def test_multi_garbage_collection(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
x = c.submit(inc, 1)
y = f.submit(inc, 2)
y2 = c.submit(inc, 2)
assert y.key == y2.key
await wait([x, y])
x.__del__()
while x.key in a.data or x.key in b.data:
await asyncio.sleep(0.01)
assert s.wants_what == {c.id: {y.key}, f.id: {y.key}, "fire-and-forget": set()}
assert s.who_wants == {y.key: {c.id, f.id}}
y.__del__()
while x.key in s.wants_what[f.id]:
await asyncio.sleep(0.01)
await asyncio.sleep(0.1)
assert y.key in a.data or y.key in b.data
assert s.wants_what == {c.id: {y.key}, f.id: set(), "fire-and-forget": set()}
assert s.who_wants == {y.key: {c.id}}
y2.__del__()
while y.key in a.data or y.key in b.data:
await asyncio.sleep(0.01)
assert not any(v for v in s.wants_what.values())
assert not s.who_wants
await c.close()
await f.close()
@gen_cluster(client=True)
async def test__broadcast(c, s, a, b):
x, y = await c.scatter([1, 2], broadcast=True)
assert a.data == b.data == {x.key: 1, y.key: 2}
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test__broadcast_integer(c, s, *workers):
x, y = await c.scatter([1, 2], broadcast=2)
assert len(s.tasks[x.key].who_has) == 2
assert len(s.tasks[y.key].who_has) == 2
@gen_cluster(client=True)
async def test__broadcast_dict(c, s, a, b):
d = await c.scatter({"x": 1}, broadcast=True)
assert a.data == b.data == {"x": 1}
def test_broadcast(c, s, a, b):
x, y = c.scatter([1, 2], broadcast=True)
has_what = sync(c.loop, c.scheduler.has_what)
assert {k: set(v) for k, v in has_what.items()} == {
a["address"]: {x.key, y.key},
b["address"]: {x.key, y.key},
}
[z] = c.scatter([3], broadcast=True, workers=[a["address"]])
has_what = sync(c.loop, c.scheduler.has_what)
assert {k: set(v) for k, v in has_what.items()} == {
a["address"]: {x.key, y.key, z.key},
b["address"]: {x.key, y.key},
}
@gen_cluster(client=True)
async def test_proxy(c, s, a, b):
msg = await c.scheduler.proxy(msg={"op": "identity"}, worker=a.address)
assert msg["id"] == a.identity()["id"]
@gen_cluster(client=True)
async def test_cancel(c, s, a, b):
x = c.submit(slowinc, 1)
y = c.submit(slowinc, x)
while y.key not in s.tasks:
await asyncio.sleep(0.01)
await c.cancel([x])
assert x.cancelled()
assert "cancel" in str(x)
s.validate_state()
while not y.cancelled():
await asyncio.sleep(0.01)
assert not s.tasks
s.validate_state()
@gen_cluster(client=True)
async def test_cancel_tuple_key(c, s, a, b):
x = c.submit(inc, 1, key=("x", 0, 1))
await x
await c.cancel(x)
with pytest.raises(CancelledError):
await x
@gen_cluster()
async def test_cancel_multi_client(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
x = c.submit(slowinc, 1)
y = f.submit(slowinc, 1)
assert x.key == y.key
await c.cancel([x])
assert x.cancelled()
assert not y.cancelled()
while y.key not in s.tasks:
await asyncio.sleep(0.01)
out = await y
assert out == 2
with pytest.raises(CancelledError):
await x
await c.close()
await f.close()
@gen_cluster(client=True)
async def test_cancel_collection(c, s, a, b):
L = c.map(double, [[1], [2], [3]])
x = db.Bag({("b", i): f for i, f in enumerate(L)}, "b", 3)
await c.cancel(x)
await c.cancel([x])
assert all(f.cancelled() for f in L)
while s.tasks:
await asyncio.sleep(0.01)
def test_cancel_sync(c):
x = c.submit(slowinc, 1, key="x")
y = c.submit(slowinc, x, key="y")
z = c.submit(slowinc, y, key="z")
c.cancel([y])
start = time()
while not z.cancelled():
sleep(0.01)
assert time() < start + 30
assert x.result() == 2
z.cancel()
assert z.cancelled()
@gen_cluster(client=True)
async def test_future_type(c, s, a, b):
x = c.submit(inc, 1)
await wait([x])
assert x.type == int
assert "int" in str(x)
@gen_cluster(client=True)
async def test_traceback_clean(c, s, a, b):
x = c.submit(div, 1, 0)
try:
await x
except Exception as e:
f = e
exc_type, exc_value, tb = sys.exc_info()
while tb:
assert "scheduler" not in tb.tb_frame.f_code.co_filename
assert "worker" not in tb.tb_frame.f_code.co_filename
tb = tb.tb_next
@gen_cluster(client=True)
async def test_map_differnet_lengths(c, s, a, b):
assert len(c.map(add, [1, 2], [1, 2, 3])) == 2
def test_Future_exception_sync_2(loop, capsys):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert dask.base.get_scheduler() == c.get
out, err = capsys.readouterr()
assert len(out.strip().split("\n")) == 1
assert dask.base.get_scheduler() != c.get
@gen_cluster(timeout=60, client=True)
async def test_async_persist(c, s, a, b):
from dask.delayed import Delayed, delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
w = delayed(add)(y, z)
yy, ww = c.persist([y, w])
assert type(yy) == type(y)
assert type(ww) == type(w)
assert len(yy.dask) == 1
assert len(ww.dask) == 1
assert len(w.dask) > 1
assert y.__dask_keys__() == yy.__dask_keys__()
assert w.__dask_keys__() == ww.__dask_keys__()
while y.key not in s.tasks and w.key not in s.tasks:
await asyncio.sleep(0.01)
assert s.who_wants[y.key] == {c.id}
assert s.who_wants[w.key] == {c.id}
yyf, wwf = c.compute([yy, ww])
yyy, www = await c.gather([yyf, wwf])
assert yyy == inc(1)
assert www == add(inc(1), dec(1))
assert isinstance(c.persist(y), Delayed)
assert isinstance(c.persist([y]), (list, tuple))
@gen_cluster(client=True)
async def test__persist(c, s, a, b):
pytest.importorskip("dask.array")
import dask.array as da
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = c.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy.__dask_keys__() == y.__dask_keys__()
g, h = c.compute([y, yy])
gg, hh = await c.gather([g, h])
assert (gg == hh).all()
def test_persist(c):
pytest.importorskip("dask.array")
import dask.array as da
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = c.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy.__dask_keys__() == y.__dask_keys__()
zz = yy.compute()
z = y.compute()
assert (zz == z).all()
@gen_cluster(timeout=60, client=True)
async def test_long_traceback(c, s, a, b):
from distributed.protocol.pickle import dumps
def deep(n):
if n == 0:
1 / 0
else:
return deep(n - 1)
x = c.submit(deep, 200)
await wait([x])
assert len(dumps(c.futures[x.key].traceback)) < 10000
assert isinstance(c.futures[x.key].exception, ZeroDivisionError)
@gen_cluster(client=True)
async def test_wait_on_collections(c, s, a, b):
L = c.map(double, [[1], [2], [3]])
x = db.Bag({("b", i): f for i, f in enumerate(L)}, "b", 3)
await wait(x)
assert all(f.key in a.data or f.key in b.data for f in L)
@gen_cluster(client=True)
async def test_futures_of_get(c, s, a, b):
x, y, z = c.map(inc, [1, 2, 3])
assert set(futures_of(0)) == set()
assert set(futures_of(x)) == {x}
assert set(futures_of([x, y, z])) == {x, y, z}
assert set(futures_of([x, [y], [[z]]])) == {x, y, z}
assert set(futures_of({"x": x, "y": [y]})) == {x, y}
b = db.Bag({("b", i): f for i, f in enumerate([x, y, z])}, "b", 3)
assert set(futures_of(b)) == {x, y, z}
sg = SubgraphCallable(
{"x": x, "y": y, "z": z, "out": (add, (add, (add, x, y), z), "in")},
"out",
("in",),
)
assert set(futures_of(sg)) == {x, y, z}
def test_futures_of_class():
da = pytest.importorskip("dask.array")
assert futures_of([da.Array]) == []
@gen_cluster(client=True)
async def test_futures_of_cancelled_raises(c, s, a, b):
x = c.submit(inc, 1)
await c.cancel([x])
with pytest.raises(CancelledError):
await x
with pytest.raises(CancelledError):
await c.get({"x": (inc, x), "y": (inc, 2)}, ["x", "y"], sync=False)
with pytest.raises(CancelledError):
c.submit(inc, x)
with pytest.raises(CancelledError):
c.submit(add, 1, y=x)
with pytest.raises(CancelledError):
c.map(add, [1], y=x)
assert "y" not in s.tasks
@pytest.mark.skip
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_dont_delete_recomputed_results(c, s, w):
x = c.submit(inc, 1) # compute first time
await wait([x])
x.__del__() # trigger garbage collection
await asyncio.sleep(0)
xx = c.submit(inc, 1) # compute second time
start = time()
while xx.key not in w.data: # data shows up
await asyncio.sleep(0.01)
assert time() < start + 1
while time() < start + (s.delete_interval + 100) / 1000: # and stays
assert xx.key in w.data
await asyncio.sleep(0.01)
@gen_cluster(nthreads=[], client=True)
async def test_fatally_serialized_input(c, s):
o = FatallySerializedObject()
future = c.submit(inc, o)
while not s.tasks:
await asyncio.sleep(0.01)
@pytest.mark.skip(reason="Use fast random selection now")
@gen_cluster(client=True)
async def test_balance_tasks_by_stacks(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
y = c.submit(inc, 2)
await wait(y)
assert len(a.data) == len(b.data) == 1
@gen_cluster(client=True)
async def test_run(c, s, a, b):
results = await c.run(inc, 1)
assert results == {a.address: 2, b.address: 2}
results = await c.run(inc, 1, workers=[a.address])
assert results == {a.address: 2}
results = await c.run(inc, 1, workers=[])
assert results == {}
@gen_cluster(client=True)
async def test_run_handles_picklable_data(c, s, a, b):
futures = c.map(inc, range(10))
await wait(futures)
def func():
return {}, set(), [], (), 1, "hello", b"100"
results = await c.run_on_scheduler(func)
assert results == func()
results = await c.run(func)
assert results == {w.address: func() for w in [a, b]}
def test_run_sync(c, s, a, b):
def func(x, y=10):
return x + y
result = c.run(func, 1, y=2)
assert result == {a["address"]: 3, b["address"]: 3}
result = c.run(func, 1, y=2, workers=[a["address"]])
assert result == {a["address"]: 3}
@gen_cluster(client=True)
async def test_run_coroutine(c, s, a, b):
results = await c.run(geninc, 1, delay=0.05)
assert results == {a.address: 2, b.address: 2}
results = await c.run(geninc, 1, delay=0.05, workers=[a.address])
assert results == {a.address: 2}
results = await c.run(geninc, 1, workers=[])
assert results == {}
with pytest.raises(RuntimeError, match="hello"):
await c.run(throws, 1)
results = await c.run(asyncinc, 2, delay=0.01)
assert results == {a.address: 3, b.address: 3}
def test_run_coroutine_sync(c, s, a, b):
result = c.run(geninc, 2, delay=0.01)
assert result == {a["address"]: 3, b["address"]: 3}
result = c.run(geninc, 2, workers=[a["address"]])
assert result == {a["address"]: 3}
t1 = time()
result = c.run(geninc, 2, delay=10, wait=False)
t2 = time()
assert result is None
assert t2 - t1 <= 1.0
@gen_cluster(client=True)
async def test_run_coroutine_deprecated(c, s, a, b):
async def foo():
return "bar"
with pytest.warns(FutureWarning, match="Client.run "):
results = await c.run_coroutine(foo)
assert results == {a.address: "bar", b.address: "bar"}
@gen_cluster(client=True)
async def test_run_exception(c, s, a, b):
class MyError(Exception):
pass
def raise_exception(dask_worker, addr):
if addr == dask_worker.address:
raise MyError("informative message")
return 123
with pytest.raises(MyError, match="informative message"):
await c.run(raise_exception, addr=a.address)
with pytest.raises(MyError, match="informative message"):
await c.run(raise_exception, addr=a.address, on_error="raise")
with pytest.raises(ValueError, match="on_error must be"):
await c.run(raise_exception, addr=a.address, on_error="invalid")
out = await c.run(raise_exception, addr=a.address, on_error="return")
assert isinstance(out[a.address], MyError)
assert out[b.address] == 123
out = await c.run(raise_exception, addr=a.address, on_error="ignore")
assert out == {b.address: 123}
@gen_cluster(client=True, config={"distributed.comm.timeouts.connect": "200ms"})
async def test_run_rpc_error(c, s, a, b):
a.stop()
with pytest.raises(OSError, match="Timed out trying to connect"):
await c.run(inc, 1)
with pytest.raises(OSError, match="Timed out trying to connect"):
await c.run(inc, 1, on_error="raise")
out = await c.run(inc, 1, on_error="return")
assert isinstance(out[a.address], OSError)
assert out[b.address] == 2
out = await c.run(inc, 1, on_error="ignore")
assert out == {b.address: 2}
def test_diagnostic_ui(loop):
with cluster() as (s, [a, b]):
a_addr = a["address"]
b_addr = b["address"]
with Client(s["address"], loop=loop) as c:
d = c.nthreads()
assert d == {a_addr: 1, b_addr: 1}
d = c.nthreads([a_addr])
assert d == {a_addr: 1}
d = c.nthreads(a_addr)
assert d == {a_addr: 1}
d = c.nthreads(a["address"])
assert d == {a_addr: 1}
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(inc, 3)
wait([x, y, z])
d = c.who_has()
assert set(d) == {x.key, y.key, z.key}
assert all(w in [a_addr, b_addr] for v in d.values() for w in v)
assert all(d.values())
d = c.who_has([x, y])
assert set(d) == {x.key, y.key}
d = c.who_has(x)
assert set(d) == {x.key}
d = c.has_what()
assert set(d) == {a_addr, b_addr}
assert all(k in [x.key, y.key, z.key] for v in d.values() for k in v)
d = c.has_what([a_addr])
assert set(d) == {a_addr}
d = c.has_what(a_addr)
assert set(d) == {a_addr}
def test_diagnostic_nbytes_sync(c):
incs = c.map(inc, [1, 2, 3])
doubles = c.map(double, [1, 2, 3])
wait(incs + doubles)
assert c.nbytes(summary=False) == {k.key: sizeof(1) for k in incs + doubles}
assert c.nbytes(summary=True) == {"inc": sizeof(1) * 3, "double": sizeof(1) * 3}
@gen_cluster(client=True)
async def test_diagnostic_nbytes(c, s, a, b):
incs = c.map(inc, [1, 2, 3])
doubles = c.map(double, [1, 2, 3])
await wait(incs + doubles)
assert s.get_nbytes(summary=False) == {k.key: sizeof(1) for k in incs + doubles}
assert s.get_nbytes(summary=True) == {"inc": sizeof(1) * 3, "double": sizeof(1) * 3}
@gen_cluster(client=True, nthreads=[])
async def test_worker_aliases(c, s):
a = Worker(s.address, name="alice")
b = Worker(s.address, name="bob")
w = Worker(s.address, name=3)
await asyncio.gather(a, b, w)
L = c.map(inc, range(10), workers="alice")
future = await c.scatter(123, workers=3)
await wait(L)
assert len(a.data) == 10
assert len(b.data) == 0
assert dict(w.data) == {future.key: 123}
for i, alias in enumerate([3, [3], "alice"]):
result = await c.submit(lambda x: x + 1, i, workers=alias)
assert result == i + 1
await asyncio.gather(a.close(), b.close(), w.close())
def test_persist_get_sync(c):
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
assert xxyy3.compute() == ((1 + 1) + (2 + 2)) + 10
@gen_cluster(client=True)
async def test_persist_get(c, s, a, b):
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
await asyncio.sleep(0.5)
result = await c.gather(c.get(xxyy3.dask, xxyy3.__dask_keys__(), sync=False))
assert result[0] == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
def test_client_num_fds(loop):
with cluster() as (s, [a, b]):
proc = psutil.Process()
with Client(s["address"], loop=loop) as c: # first client to start loop
before = proc.num_fds() # measure
for i in range(4):
with Client(s["address"], loop=loop): # start more clients
pass
start = time()
while proc.num_fds() > before:
sleep(0.01)
assert time() < start + 10, (before, proc.num_fds())
@gen_cluster()
async def test_startup_close_startup(s, a, b):
c = await Client(s.address, asynchronous=True)
await c.close()
c = await Client(s.address, asynchronous=True)
await c.close()
def test_startup_close_startup_sync(loop):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
sleep(0.1)
with Client(s["address"]) as c:
pass
with Client(s["address"]) as c:
pass
sleep(0.1)
with Client(s["address"]) as c:
pass
@gen_cluster(client=True)
async def test_badly_serialized_exceptions(c, s, a, b):
def f():
class BadlySerializedException(Exception):
def __reduce__(self):
raise TypeError()
raise BadlySerializedException("hello world")
x = c.submit(f)
with pytest.raises(Exception, match="hello world"):
await x
# Set rebalance() to work predictably on small amounts of managed memory. By default, it
# uses optimistic memory, which would only be possible to test by allocating very large
# amounts of managed memory, so that they would hide variations in unmanaged memory.
REBALANCE_MANAGED_CONFIG = {
"distributed.worker.memory.rebalance.measure": "managed",
"distributed.worker.memory.rebalance.sender-min": 0,
"distributed.worker.memory.rebalance.sender-recipient-gap": 0,
}
@gen_cluster(client=True, config=REBALANCE_MANAGED_CONFIG)
async def test_rebalance(c, s, a, b):
"""Test Client.rebalance(). These are just to test the Client wrapper around
Scheduler.rebalance(); for more thorough tests on the latter see test_scheduler.py.
"""
futures = await c.scatter(range(100), workers=[a.address])
assert len(a.data) == 100
assert len(b.data) == 0
await c.rebalance()
assert len(a.data) == 50
assert len(b.data) == 50
@gen_cluster(nthreads=[("", 1)] * 3, client=True, config=REBALANCE_MANAGED_CONFIG)
async def test_rebalance_workers_and_keys(client, s, a, b, c):
"""Test Client.rebalance(). These are just to test the Client wrapper around
Scheduler.rebalance(); for more thorough tests on the latter see test_scheduler.py.
"""
futures = await client.scatter(range(100), workers=[a.address])
assert (len(a.data), len(b.data), len(c.data)) == (100, 0, 0)
# Passing empty iterables is not the same as omitting the arguments
await client.rebalance([])
await client.rebalance(workers=[])
assert (len(a.data), len(b.data), len(c.data)) == (100, 0, 0)
# Limit rebalancing to two arbitrary keys and two arbitrary workers.
await client.rebalance([futures[3], futures[7]], [a.address, b.address])
assert (len(a.data), len(b.data), len(c.data)) == (98, 2, 0)
with pytest.raises(KeyError):
await client.rebalance(workers=["notexist"])
def test_rebalance_sync():
with dask.config.set(REBALANCE_MANAGED_CONFIG):
with Client(n_workers=2, processes=False, dashboard_address=":0") as c:
s = c.cluster.scheduler
a = c.cluster.workers[0]
b = c.cluster.workers[1]
futures = c.scatter(range(100), workers=[a.address])
assert len(a.data) == 100
assert len(b.data) == 0
c.rebalance()
assert len(a.data) == 50
assert len(b.data) == 50
@gen_cluster(client=True)
async def test_rebalance_unprepared(c, s, a, b):
"""Client.rebalance() internally waits for unfinished futures"""
futures = c.map(slowinc, range(10), delay=0.05, workers=a.address)
# Let the futures reach the scheduler
await asyncio.sleep(0.1)
# We didn't wait enough for futures to complete. However, Client.rebalance() will
# block until all futures are completed before invoking Scheduler.rebalance().
await c.rebalance(futures)
s.validate_state()
@gen_cluster(client=True)
async def test_rebalance_raises_on_explicit_missing_data(c, s, a, b):
"""rebalance() raises KeyError if explicitly listed futures disappear"""
f = Future("x", client=c, state="memory")
with pytest.raises(KeyError, match="Could not rebalance keys:"):
await c.rebalance(futures=[f])
@gen_cluster(client=True)
async def test_receive_lost_key(c, s, a, b):
x = c.submit(inc, 1, workers=[a.address])
await x
await a.close()
while x.status == "finished":
await asyncio.sleep(0.01)
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_unrunnable_task_runs(c, s, a, b):
x = c.submit(inc, 1, workers=[a.ip])
await x
await a.close()
while x.status == "finished":
await asyncio.sleep(0.01)
assert s.tasks[x.key] in s.unrunnable
assert s.get_task_status(keys=[x.key]) == {x.key: "no-worker"}
w = await Worker(s.address, loop=s.loop)
while x.status != "finished":
await asyncio.sleep(0.01)
assert s.tasks[x.key] not in s.unrunnable
result = await x
assert result == 2
await w.close()
@gen_cluster(client=True, nthreads=[])
async def test_add_worker_after_tasks(c, s):
futures = c.map(inc, range(10))
n = await Nanny(s.address, nthreads=2, loop=s.loop)
await c.gather(futures)
await n.close()
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_workers_register_indirect_data(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
y = c.submit(inc, x, workers=b.ip)
await y
assert b.data[x.key] == 1
assert s.tasks[x.key].who_has == {s.workers[a.address], s.workers[b.address]}
assert s.workers[b.address].has_what == {s.tasks[x.key], s.tasks[y.key]}
s.validate_state()
@gen_cluster(client=True)
async def test_submit_on_cancelled_future(c, s, a, b):
x = c.submit(inc, 1)
await x
await c.cancel(x)
with pytest.raises(CancelledError):
c.submit(inc, x)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate(c, s, *workers):
[a, b] = await c.scatter([1, 2])
await s.replicate(keys=[a.key, b.key], n=5)
s.validate_state()
assert len(s.tasks[a.key].who_has) == 5
assert len(s.tasks[b.key].who_has) == 5
assert sum(a.key in w.data for w in workers) == 5
assert sum(b.key in w.data for w in workers) == 5
@gen_cluster(client=True)
async def test_replicate_tuple_keys(c, s, a, b):
x = delayed(inc)(1, dask_key_name=("x", 1))
f = c.persist(x)
await c.replicate(f, n=5)
s.validate_state()
assert a.data and b.data
await c.rebalance(f)
s.validate_state()
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate_workers(c, s, *workers):
[a, b] = await c.scatter([1, 2], workers=[workers[0].address])
await s.replicate(
keys=[a.key, b.key], n=5, workers=[w.address for w in workers[:5]]
)
assert len(s.tasks[a.key].who_has) == 5
assert len(s.tasks[b.key].who_has) == 5
assert sum(a.key in w.data for w in workers[:5]) == 5
assert sum(b.key in w.data for w in workers[:5]) == 5
assert sum(a.key in w.data for w in workers[5:]) == 0
assert sum(b.key in w.data for w in workers[5:]) == 0
await s.replicate(keys=[a.key, b.key], n=1)
assert len(s.tasks[a.key].who_has) == 1
assert len(s.tasks[b.key].who_has) == 1
assert sum(a.key in w.data for w in workers) == 1
assert sum(b.key in w.data for w in workers) == 1
s.validate_state()
await s.replicate(keys=[a.key, b.key], n=None) # all
assert len(s.tasks[a.key].who_has) == 10
assert len(s.tasks[b.key].who_has) == 10
s.validate_state()
await s.replicate(
keys=[a.key, b.key], n=1, workers=[w.address for w in workers[:5]]
)
assert sum(a.key in w.data for w in workers[:5]) == 1
assert sum(b.key in w.data for w in workers[:5]) == 1
assert sum(a.key in w.data for w in workers[5:]) == 5
assert sum(b.key in w.data for w in workers[5:]) == 5
s.validate_state()
class CountSerialization:
def __init__(self):
self.n = 0
def __setstate__(self, n):
self.n = n + 1
def __getstate__(self):
return self.n
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate_tree_branching(c, s, *workers):
obj = CountSerialization()
[future] = await c.scatter([obj])
await s.replicate(keys=[future.key], n=10)
max_count = max(w.data[future.key].n for w in workers)
assert max_count > 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_client_replicate(c, s, *workers):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
await c.replicate([x, y], n=5)
assert len(s.tasks[x.key].who_has) == 5
assert len(s.tasks[y.key].who_has) == 5
await c.replicate([x, y], n=3)
assert len(s.tasks[x.key].who_has) == 3
assert len(s.tasks[y.key].who_has) == 3
await c.replicate([x, y])
s.validate_state()
assert len(s.tasks[x.key].who_has) == 10
assert len(s.tasks[y.key].who_has) == 10
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1), ("127.0.0.2", 1), ("127.0.0.2", 1)],
)
async def test_client_replicate_host(client, s, a, b, c):
aws = s.workers[a.address]
bws = s.workers[b.address]
cws = s.workers[c.address]
x = client.submit(inc, 1, workers="127.0.0.2")
await wait([x])
assert s.tasks[x.key].who_has == {bws} or s.tasks[x.key].who_has == {cws}
await client.replicate([x], workers=["127.0.0.2"])
assert s.tasks[x.key].who_has == {bws, cws}
await client.replicate([x], workers=["127.0.0.1"])
assert s.tasks[x.key].who_has == {aws, bws, cws}
def test_client_replicate_sync(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
c.replicate([x, y], n=2)
who_has = c.who_has()
assert len(who_has[x.key]) == len(who_has[y.key]) == 2
with pytest.raises(ValueError):
c.replicate([x], n=0)
assert y.result() == 3
@pytest.mark.skipif(WINDOWS, reason="Windows timer too coarse-grained")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 4)] * 1)
async def test_task_load_adapts_quickly(c, s, a):
future = c.submit(slowinc, 1, delay=0.2) # slow
await wait(future)
assert 0.15 < s.task_prefixes["slowinc"].duration_average < 0.4
futures = c.map(slowinc, range(10), delay=0) # very fast
await wait(futures)
assert 0 < s.task_prefixes["slowinc"].duration_average < 0.1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
async def test_even_load_after_fast_functions(c, s, a, b):
x = c.submit(inc, 1, workers=a.address) # very fast
y = c.submit(inc, 2, workers=b.address) # very fast
await wait([x, y])
futures = c.map(inc, range(2, 11))
await wait(futures)
assert any(f.key in a.data for f in futures)
assert any(f.key in b.data for f in futures)
# assert abs(len(a.data) - len(b.data)) <= 3
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
async def test_even_load_on_startup(c, s, a, b):
x, y = c.map(inc, [1, 2])
await wait([x, y])
assert len(a.data) == len(b.data) == 1
@pytest.mark.skip
@gen_cluster(client=True, nthreads=[("127.0.0.1", 2)] * 2)
async def test_contiguous_load(c, s, a, b):
w, x, y, z = c.map(inc, [1, 2, 3, 4])
await wait([w, x, y, z])
groups = [set(a.data), set(b.data)]
assert {w.key, x.key} in groups
assert {y.key, z.key} in groups
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_balanced_with_submit(c, s, *workers):
L = [c.submit(slowinc, i) for i in range(4)]
await wait(L)
for w in workers:
assert len(w.data) == 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_balanced_with_submit_and_resident_data(c, s, *workers):
[x] = await c.scatter([10], broadcast=True)
L = [c.submit(slowinc, x, pure=False) for i in range(4)]
await wait(L)
for w in workers:
assert len(w.data) == 2
@gen_cluster(client=True, nthreads=[("127.0.0.1", 20)] * 2)
async def test_scheduler_saturates_cores(c, s, a, b):
for delay in [0, 0.01, 0.1]:
futures = c.map(slowinc, range(100), delay=delay)
futures = c.map(slowinc, futures, delay=delay / 10)
while not s.tasks:
if s.tasks:
assert all(
len(p) >= 20
for w in s.workers.values()
for p in w.processing.values()
)
await asyncio.sleep(0.01)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 20)] * 2)
async def test_scheduler_saturates_cores_random(c, s, a, b):
for delay in [0, 0.01, 0.1]:
futures = c.map(randominc, range(100), scale=0.1)
while not s.tasks:
if s.tasks:
assert all(
len(p) >= 20
for w in s.workers.values()
for p in w.processing.values()
)
await asyncio.sleep(0.01)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_cancel_clears_processing(c, s, *workers):
da = pytest.importorskip("dask.array")
x = c.submit(slowinc, 1, delay=0.2)
while not s.tasks:
await asyncio.sleep(0.01)
await c.cancel(x)
while any(v for w in s.workers.values() for v in w.processing):
await asyncio.sleep(0.01)
s.validate_state()
def test_default_get():
with cluster() as (s, [a, b]):
pre_get = dask.base.get_scheduler()
pytest.raises(KeyError, dask.config.get, "shuffle")
with Client(s["address"], set_as_default=True) as c:
assert dask.base.get_scheduler() == c.get
assert dask.config.get("shuffle") == "tasks"
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
c = Client(s["address"], set_as_default=False)
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
c.close()
c = Client(s["address"], set_as_default=True)
assert dask.config.get("shuffle") == "tasks"
assert dask.base.get_scheduler() == c.get
c.close()
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
with Client(s["address"]) as c:
assert dask.base.get_scheduler() == c.get
with Client(s["address"], set_as_default=False) as c:
assert dask.base.get_scheduler() != c.get
assert dask.base.get_scheduler() != c.get
with Client(s["address"], set_as_default=True) as c1:
assert dask.base.get_scheduler() == c1.get
with Client(s["address"], set_as_default=True) as c2:
assert dask.base.get_scheduler() == c2.get
assert dask.base.get_scheduler() == c1.get
assert dask.base.get_scheduler() == pre_get
@gen_cluster(client=True)
async def test_ensure_default_client(c, s, a, b):
assert c is default_client()
async with Client(s.address, set_as_default=False, asynchronous=True) as c2:
assert c is default_client()
assert c2 is not default_client()
ensure_default_client(c2)
assert c is not default_client()
assert c2 is default_client()
def test_ensure_default_get_deprecated():
with pytest.warns(FutureWarning, match="`ensure_default_get` is deprecated"):
from distributed.client import ensure_default_get
assert ensure_default_get is ensure_default_client
@gen_cluster()
async def test_set_as_default(s, a, b):
with pytest.raises(ValueError):
default_client()
async with Client(s.address, set_as_default=False, asynchronous=True) as c1:
with pytest.raises(ValueError):
default_client()
async with Client(s.address, set_as_default=True, asynchronous=True) as c2:
assert default_client() is c2
async with Client(s.address, set_as_default=True, asynchronous=True) as c3:
assert default_client() is c3
async with Client(
s.address, set_as_default=False, asynchronous=True
) as c4:
assert default_client() is c3
await c4.scheduler_comm.close()
while c4.status != "running":
await asyncio.sleep(0.01)
assert default_client() is c3
with pytest.raises(ValueError):
default_client()
@gen_cluster(client=True)
async def test_get_processing(c, s, a, b):
processing = await c.processing()
assert processing == valmap(tuple, s.processing)
futures = c.map(
slowinc, range(10), delay=0.1, workers=[a.address], allow_other_workers=True
)
await asyncio.sleep(0.2)
x = await c.processing()
assert set(x) == {a.address, b.address}
x = await c.processing(workers=[a.address])
assert isinstance(x[a.address], (list, tuple))
@gen_cluster(client=True)
async def test_get_foo(c, s, a, b):
futures = c.map(inc, range(10))
await wait(futures)
x = await c.scheduler.ncores()
assert x == s.nthreads
x = await c.scheduler.ncores(workers=[a.address])
assert x == {a.address: s.nthreads[a.address]}
x = await c.scheduler.has_what()
assert valmap(sorted, x) == valmap(sorted, s.has_what)
x = await c.scheduler.has_what(workers=[a.address])
assert valmap(sorted, x) == {a.address: sorted(s.has_what[a.address])}
x = await c.scheduler.nbytes(summary=False)
assert x == s.get_nbytes(summary=False)
x = await c.scheduler.nbytes(keys=[futures[0].key], summary=False)
assert x == {futures[0].key: s.tasks[futures[0].key].nbytes}
x = await c.scheduler.who_has()
assert valmap(sorted, x) == valmap(sorted, s.who_has)
x = await c.scheduler.who_has(keys=[futures[0].key])
assert valmap(sorted, x) == {futures[0].key: sorted(s.who_has[futures[0].key])}
def assert_dict_key_equal(expected, actual):
assert set(expected.keys()) == set(actual.keys())
for k in actual.keys():
ev = expected[k]
av = actual[k]
assert list(ev) == list(av)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_get_foo_lost_keys(c, s, u, v, w):
x = c.submit(inc, 1, workers=[u.address])
y = await c.scatter(3, workers=[v.address])
await wait([x, y])
ua, va, wa = u.address, v.address, w.address
d = await c.scheduler.has_what()
assert_dict_key_equal(d, {ua: [x.key], va: [y.key], wa: []})
d = await c.scheduler.has_what(workers=[ua, va])
assert_dict_key_equal(d, {ua: [x.key], va: [y.key]})
d = await c.scheduler.who_has()
assert_dict_key_equal(d, {x.key: [ua], y.key: [va]})
d = await c.scheduler.who_has(keys=[x.key, y.key])
assert_dict_key_equal(d, {x.key: [ua], y.key: [va]})
await u.close()
await v.close()
d = await c.scheduler.has_what()
assert_dict_key_equal(d, {wa: []})
d = await c.scheduler.has_what(workers=[ua, va])
assert_dict_key_equal(d, {ua: [], va: []})
# The scattered key cannot be recomputed so it is forgotten
d = await c.scheduler.who_has()
assert_dict_key_equal(d, {x.key: []})
# ... but when passed explicitly, it is included in the result
d = await c.scheduler.who_has(keys=[x.key, y.key])
assert_dict_key_equal(d, {x.key: [], y.key: []})
@pytest.mark.slow
@gen_cluster(
client=True, Worker=Nanny, clean_kwargs={"threads": False, "processes": False}
)
async def test_bad_tasks_fail(c, s, a, b):
f = c.submit(sys.exit, 0)
with captured_logger(logging.getLogger("distributed.scheduler")) as logger:
with pytest.raises(KilledWorker) as info:
await f
text = logger.getvalue()
assert f.key in text
assert info.value.last_worker.nanny in {a.address, b.address}
await asyncio.gather(a.close(), b.close())
def test_get_processing_sync(c, s, a, b):
processing = c.processing()
assert not any(v for v in processing.values())
futures = c.map(
slowinc, range(10), delay=0.1, workers=[a["address"]], allow_other_workers=False
)
sleep(0.2)
aa = a["address"]
bb = b["address"]
processing = c.processing()
assert set(c.processing(aa)) == {aa}
assert set(c.processing([aa])) == {aa}
c.cancel(futures)
def test_close_idempotent(c):
c.close()
c.close()
c.close()
@nodebug
def test_get_returns_early(c):
start = time()
with suppress(RuntimeError):
result = c.get({"x": (throws, 1), "y": (sleep, 1)}, ["x", "y"])
assert time() < start + 0.5
# Futures should be released and forgotten
wait_for(lambda: not c.futures, timeout=0.1)
wait_for(lambda: not any(c.processing().values()), timeout=3)
x = c.submit(inc, 1)
x.result()
with suppress(RuntimeError):
result = c.get({"x": (throws, 1), x.key: (inc, 1)}, ["x", x.key])
assert x.key in c.futures
@pytest.mark.slow
@gen_cluster(Worker=Nanny, client=True, timeout=60)
async def test_Client_clears_references_after_restart(c, s, a, b):
x = c.submit(inc, 1)
assert x.key in c.refcount
await c.restart()
assert x.key not in c.refcount
key = x.key
del x
import gc
gc.collect()
await asyncio.sleep(0)
assert key not in c.refcount
@gen_cluster(Worker=Nanny, client=True)
async def test_restart_timeout_is_logged(c, s, a, b):
with captured_logger(logging.getLogger("distributed.client")) as logger:
await c.restart(timeout="0.5s")
text = logger.getvalue()
assert "Restart timed out after 0.50 seconds" in text
def test_get_stops_work_after_error(c):
with pytest.raises(RuntimeError):
c.get({"x": (throws, 1), "y": (sleep, 1.5)}, ["x", "y"])
start = time()
while any(c.processing().values()):
sleep(0.01)
assert time() < start + 0.5
def test_as_completed_list(c):
seq = c.map(inc, range(5))
seq2 = list(as_completed(seq))
assert set(c.gather(seq2)) == {1, 2, 3, 4, 5}
def test_as_completed_results(c):
seq = c.map(inc, range(5))
seq2 = list(as_completed(seq, with_results=True))
assert set(pluck(1, seq2)) == {1, 2, 3, 4, 5}
assert set(pluck(0, seq2)) == set(seq)
@pytest.mark.parametrize("with_results", [True, False])
def test_as_completed_batches(c, with_results):
n = 50
futures = c.map(slowinc, range(n), delay=0.01)
out = []
for batch in as_completed(futures, with_results=with_results).batches():
assert isinstance(batch, (tuple, list))
sleep(0.05)
out.extend(batch)
assert len(out) == n
if with_results:
assert set(pluck(1, out)) == set(range(1, n + 1))
else:
assert set(out) == set(futures)
def test_as_completed_next_batch(c):
futures = c.map(slowinc, range(2), delay=0.1)
ac = as_completed(futures)
assert not ac.is_empty()
assert ac.next_batch(block=False) == []
assert set(ac.next_batch(block=True)).issubset(futures)
while not ac.is_empty():
assert set(ac.next_batch(block=True)).issubset(futures)
assert ac.is_empty()
assert not ac.has_ready()
@gen_cluster(nthreads=[])
async def test_status(s):
c = await Client(s.address, asynchronous=True)
assert c.status == "running"
x = c.submit(inc, 1)
await c.close()
assert c.status == "closed"
@gen_cluster(client=True)
async def test_async_whowhat(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
who_has = await c.who_has()
has_what = await c.has_what()
assert type(who_has) is WhoHas
assert type(has_what) is HasWhat
assert who_has == {x.key: (a.address,)}
assert has_what == {a.address: (x.key,), b.address: ()}
def test_client_repr_html(c):
x = c.submit(inc, 1)
who_has = c.who_has()
has_what = c.has_what()
assert type(who_has) is WhoHas
assert type(has_what) is HasWhat
@gen_cluster(client=True)
async def test_persist_optimize_graph(c, s, a, b):
i = 10
for method in [c.persist, c.compute]:
b = db.range(i, npartitions=2)
i += 1
b2 = b.map(inc)
b3 = b2.map(inc)
b4 = method(b3, optimize_graph=False)
await wait(b4)
assert set(map(stringify, b3.__dask_keys__())).issubset(s.tasks)
b = db.range(i, npartitions=2)
i += 1
b2 = b.map(inc)
b3 = b2.map(inc)
b4 = method(b3, optimize_graph=True)
await wait(b4)
assert not any(stringify(k) in s.tasks for k in b2.__dask_keys__())
@gen_cluster(client=True, nthreads=[])
async def test_scatter_raises_if_no_workers(c, s):
with pytest.raises(TimeoutError):
await c.scatter(1, timeout=0.5)
@pytest.mark.slow
def test_reconnect(loop):
w = Worker("127.0.0.1", 9393, loop=loop)
loop.add_callback(w.start)
scheduler_cli = [
"dask-scheduler",
"--host",
"127.0.0.1",
"--port",
"9393",
"--no-dashboard",
]
with popen(scheduler_cli):
c = Client("127.0.0.1:9393", loop=loop)
c.wait_for_workers(1, timeout=10)
x = c.submit(inc, 1)
assert x.result(timeout=10) == 2
start = time()
while c.status != "connecting":
assert time() < start + 10
sleep(0.01)
assert x.status == "cancelled"
with pytest.raises(CancelledError):
x.result(timeout=10)
with popen(scheduler_cli):
start = time()
while c.status != "running":
sleep(0.1)
assert time() < start + 10
start = time()
while len(c.nthreads()) != 1:
sleep(0.05)
assert time() < start + 10
x = c.submit(inc, 1)
assert x.result(timeout=10) == 2
start = time()
while True:
assert time() < start + 10
try:
x.result(timeout=10)
assert False
except CommClosedError:
continue
except CancelledError:
break
sync(loop, w.close, timeout=1)
c.close()
class UnhandledException(Exception):
pass
@contextmanager
def catch_unhandled_exceptions() -> Generator[None, None, None]:
loop = asyncio.get_running_loop()
ctx: dict[str, Any] | None = None
old_handler = loop.get_exception_handler()
@loop.set_exception_handler
def _(loop: object, context: dict[str, Any]) -> None:
nonlocal ctx
ctx = context
try:
yield
finally:
loop.set_exception_handler(old_handler)
if ctx:
raise UnhandledException(ctx["message"]) from ctx.get("exception")
@gen_cluster(client=True, nthreads=[], client_kwargs={"timeout": 0.5})
async def test_reconnect_timeout(c, s):
with catch_unhandled_exceptions(), captured_logger(
logging.getLogger("distributed.client")
) as logger:
await s.close()
while c.status != "closed":
await c._update_scheduler_info()
await asyncio.sleep(0.05)
text = logger.getvalue()
assert "Failed to reconnect" in text
@pytest.mark.avoid_ci(reason="hangs on github actions ubuntu-latest CI")
@pytest.mark.slow
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@pytest.mark.parametrize("worker,count,repeat", [(Worker, 100, 5), (Nanny, 10, 20)])
def test_open_close_many_workers(loop, worker, count, repeat):
proc = psutil.Process()
with cluster(nworkers=0, active_rpc_timeout=2) as (s, _):
gc.collect()
before = proc.num_fds()
done = Semaphore(0)
running = weakref.WeakKeyDictionary()
workers = set()
status = True
async def start_worker(sleep, duration, repeat=1):
for i in range(repeat):
await asyncio.sleep(sleep)
if not status:
return
w = worker(s["address"], loop=loop)
running[w] = None
await w
workers.add(w)
addr = w.worker_address
running[w] = addr
await asyncio.sleep(duration)
await w.close()
del w
await asyncio.sleep(0)
done.release()
for i in range(count):
loop.add_callback(
start_worker, random.random() / 5, random.random() / 5, repeat=repeat
)
with Client(s["address"], loop=loop) as c:
sleep(1)
for i in range(count):
done.acquire(timeout=5)
gc.collect()
if not running:
break
start = time()
while c.nthreads():
sleep(0.2)
assert time() < start + 10
while len(workers) < count * repeat:
sleep(0.2)
status = False
[c.sync(w.close) for w in list(workers)]
for w in workers:
assert w.status == Status.closed
start = time()
while proc.num_fds() > before:
print("fds:", before, proc.num_fds())
sleep(0.1)
if time() > start + 10:
if worker == Worker: # this is an esoteric case
print("File descriptors did not clean up")
break
else:
raise ValueError("File descriptors did not clean up")
@gen_cluster()
async def test_idempotence(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
# Submit
x = c.submit(inc, 1)
await x
log = list(s.transition_log)
len_single_submit = len(log) # see last assert
y = f.submit(inc, 1)
assert x.key == y.key
await y
await asyncio.sleep(0.1)
log2 = list(s.transition_log)
assert log == log2
# Error
a = c.submit(div, 1, 0)
await wait(a)
assert a.status == "error"
log = list(s.transition_log)
b = f.submit(div, 1, 0)
assert a.key == b.key
await wait(b)
await asyncio.sleep(0.1)
log2 = list(s.transition_log)
assert log == log2
s.transition_log.clear()
# Simultaneous Submit
d = c.submit(inc, 2)
e = c.submit(inc, 2)
await wait([d, e])
assert len(s.transition_log) == len_single_submit
await c.close()
await f.close()
def test_scheduler_info(c):
info = c.scheduler_info()
assert isinstance(info, dict)
assert len(info["workers"]) == 2
assert isinstance(info["started"], float)
def test_write_scheduler_file(c):
info = c.scheduler_info()
with tmpfile("json") as scheduler_file:
c.write_scheduler_file(scheduler_file)
with Client(scheduler_file=scheduler_file) as c2:
info2 = c2.scheduler_info()
assert c.scheduler.address == c2.scheduler.address
# test that a ValueError is raised if the scheduler_file
# attribute is already set
with pytest.raises(ValueError):
c.write_scheduler_file(scheduler_file)
def test_get_versions_sync(c):
requests = pytest.importorskip("requests")
v = c.get_versions()
assert v["scheduler"] is not None
assert v["client"] is not None
assert len(v["workers"]) == 2
for k, v in v["workers"].items():
assert v is not None
c.get_versions(check=True)
# smoke test for versions
# that this does not raise
v = c.get_versions(packages=["requests"])
assert v["client"]["packages"]["requests"] == requests.__version__
@gen_cluster(client=True)
async def test_get_versions_async(c, s, a, b):
v = await c.get_versions(check=True)
assert v.keys() == {"scheduler", "client", "workers"}
@gen_cluster(client=True, config={"distributed.comm.timeouts.connect": "200ms"})
async def test_get_versions_rpc_error(c, s, a, b):
a.stop()
v = await c.get_versions()
assert v.keys() == {"scheduler", "client", "workers"}
assert v["workers"].keys() == {b.address}
def test_threaded_get_within_distributed(c):
import dask.multiprocessing
for get in [dask.local.get_sync, dask.multiprocessing.get, dask.threaded.get]:
def f():
return get({"x": (lambda: 1,)}, "x")
future = c.submit(f)
assert future.result() == 1
@gen_cluster(client=True)
async def test_lose_scattered_data(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
await a.close()
await asyncio.sleep(0.1)
assert x.status == "cancelled"
assert x.key not in s.tasks
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_partially_lose_scattered_data(e, s, a, b, c):
x = await e.scatter(1, workers=a.address)
await e.replicate(x, n=2)
await a.close()
await asyncio.sleep(0.1)
assert x.status == "finished"
assert s.get_task_status(keys=[x.key]) == {x.key: "memory"}
@gen_cluster(client=True)
async def test_scatter_compute_lose(c, s, a, b):
[x] = await c.scatter([[1, 2, 3, 4]], workers=a.address)
y = c.submit(inc, 1, workers=b.address)
z = c.submit(slowadd, x, y, delay=0.2)
await asyncio.sleep(0.1)
await a.close()
with pytest.raises(CancelledError):
await wait(z)
assert x.status == "cancelled"
assert y.status == "finished"
assert z.status == "cancelled"
@gen_cluster(client=True)
async def test_scatter_compute_store_lose(c, s, a, b):
"""
Create irreplaceable data on one machine,
cause a dependent computation to occur on another and complete
Kill the machine with the irreplaceable data. What happens to the complete
result? How about after it GCs and tries to come back?
"""
x = await c.scatter(1, workers=a.address)
xx = c.submit(inc, x, workers=a.address)
y = c.submit(inc, 1)
z = c.submit(slowadd, xx, y, delay=0.2, workers=b.address)
await wait(z)
await a.close()
while x.status == "finished":
await asyncio.sleep(0.01)
# assert xx.status == 'finished'
assert y.status == "finished"
assert z.status == "finished"
zz = c.submit(inc, z)
await wait(zz)
zkey = z.key
del z
while s.get_task_status(keys=[zkey]) != {zkey: "released"}:
await asyncio.sleep(0.01)
xxkey = xx.key
del xx
while x.key in s.tasks and zkey not in s.tasks and xxkey not in s.tasks:
await asyncio.sleep(0.01)
@gen_cluster(client=True)
async def test_scatter_compute_store_lose_processing(c, s, a, b):
"""
Create irreplaceable data on one machine,
cause a dependent computation to occur on another and complete
Kill the machine with the irreplaceable data. What happens to the complete
result? How about after it GCs and tries to come back?
"""
[x] = await c.scatter([1], workers=a.address)
y = c.submit(slowinc, x, delay=0.2)
z = c.submit(inc, y)
await asyncio.sleep(0.1)
await a.close()
while x.status == "finished":
await asyncio.sleep(0.01)
assert y.status == "cancelled"
assert z.status == "cancelled"
@gen_cluster()
async def test_serialize_future(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
future = c1.submit(lambda: 1)
result = await future
for ci in (c1, c2):
for ctxman in ci.as_current, lambda: temp_default_client(ci):
with ctxman():
future2 = pickle.loads(pickle.dumps(future))
assert future2.client is ci
assert stringify(future2.key) in ci.futures
result2 = await future2
assert result == result2
await c1.close()
await c2.close()
@gen_cluster()
async def test_temp_default_client(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
with temp_default_client(c1):
assert default_client() is c1
assert default_client(c2) is c2
with temp_default_client(c2):
assert default_client() is c2
assert default_client(c1) is c1
await c1.close()
await c2.close()
@gen_cluster(client=True)
async def test_as_current(c, s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
with temp_default_client(c):
assert Client.current() is c
with pytest.raises(ValueError):
Client.current(allow_global=False)
with c1.as_current():
assert Client.current() is c1
assert Client.current(allow_global=True) is c1
with c2.as_current():
assert Client.current() is c2
assert Client.current(allow_global=True) is c2
await c1.close()
await c2.close()
def test_as_current_is_thread_local(s):
l1 = threading.Lock()
l2 = threading.Lock()
l3 = threading.Lock()
l4 = threading.Lock()
l1.acquire()
l2.acquire()
l3.acquire()
l4.acquire()
def run1():
with Client(s["address"]) as c:
with c.as_current():
l1.acquire()
l2.release()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.acquire()
l4.release()
def run2():
with Client(s["address"]) as c:
with c.as_current():
l1.release()
l2.acquire()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.release()
l4.acquire()
t1 = threading.Thread(target=run1)
t2 = threading.Thread(target=run2)
t1.start()
t2.start()
t1.join()
t2.join()
@gen_cluster()
async def test_as_current_is_task_local(s, a, b):
l1 = asyncio.Lock()
l2 = asyncio.Lock()
l3 = asyncio.Lock()
l4 = asyncio.Lock()
await l1.acquire()
await l2.acquire()
await l3.acquire()
await l4.acquire()
async def run1():
async with Client(s.address, asynchronous=True) as c:
with c.as_current():
await l1.acquire()
l2.release()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
await l3.acquire()
l4.release()
async def run2():
async with Client(s.address, asynchronous=True) as c:
with c.as_current():
l1.release()
await l2.acquire()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.release()
await l4.acquire()
await asyncio.gather(run1(), run2())
@nodebug # test timing is fragile
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_persist_workers_annotate(e, s, a, b, c):
with dask.annotate(workers=a.address, allow_other_workers=False):
L1 = [delayed(inc)(i) for i in range(4)]
with dask.annotate(workers=b.address, allow_other_workers=False):
total = delayed(sum)(L1)
with dask.annotate(workers=c.address, allow_other_workers=True):
L2 = [delayed(add)(i, total) for i in L1]
with dask.annotate(workers=b.address, allow_other_workers=True):
total2 = delayed(sum)(L2)
# TODO: once annotations are faithfully forwarded upon graph optimization,
# we shouldn't need to disable that here.
out = e.persist(L1 + L2 + [total, total2], optimize_graph=False)
await wait(out)
assert all(v.key in a.data for v in L1)
assert total.key in b.data
assert s.loose_restrictions == {total2.key} | {v.key for v in L2}
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_persist_workers_annotate2(e, s, a, b, c):
def key_to_worker(key):
return a.address
L1 = [delayed(inc)(i) for i in range(4)]
for x in L1:
assert all(layer.annotations is None for layer in x.dask.layers.values())
with dask.annotate(workers=key_to_worker):
out = e.persist(L1, optimize_graph=False)
await wait(out)
for x in L1:
assert all(layer.annotations is None for layer in x.dask.layers.values())
for v in L1:
assert s.worker_restrictions[v.key] == {a.address}
@nodebug # test timing is fragile
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_persist_workers(e, s, a, b, c):
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
total2 = delayed(sum)(L2)
out = e.persist(
L1 + L2 + [total, total2],
workers=[a.address, b.address],
allow_other_workers=True,
)
await wait(out)
for v in L1 + L2 + [total, total2]:
assert s.worker_restrictions[v.key] == {a.address, b.address}
assert not any(c.address in r for r in s.worker_restrictions)
assert s.loose_restrictions == {total.key, total2.key} | {v.key for v in L1 + L2}
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_compute_workers_annotate(e, s, a, b, c):
with dask.annotate(workers=a.address, allow_other_workers=True):
L1 = [delayed(inc)(i) for i in range(4)]
with dask.annotate(workers=b.address, allow_other_workers=True):
total = delayed(sum)(L1)
with dask.annotate(workers=[c.address]):
L2 = [delayed(add)(i, total) for i in L1]
# TODO: once annotations are faithfully forwarded upon graph optimization,
# we shouldn't need to disable that here.
out = e.compute(L1 + L2 + [total], optimize_graph=False)
await wait(out)
for v in L1:
assert s.worker_restrictions[v.key] == {a.address}
for v in L2:
assert s.worker_restrictions[v.key] == {c.address}
assert s.worker_restrictions[total.key] == {b.address}
assert s.loose_restrictions == {total.key} | {v.key for v in L1}
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_compute_workers(e, s, a, b, c):
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
out = e.compute(
L1 + L2 + [total],
workers=[a.address, b.address],
allow_other_workers=True,
)
await wait(out)
for v in L1 + L2 + [total]:
assert s.worker_restrictions[v.key] == {a.address, b.address}
assert not any(c.address in r for r in s.worker_restrictions)
assert s.loose_restrictions == {total.key} | {v.key for v in L1 + L2}
@gen_cluster(client=True)
async def test_compute_nested_containers(c, s, a, b):
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
x = da.ones(10, chunks=(5,)) + 1
future = c.compute({"x": [x], "y": 123})
result = await future
assert isinstance(result, dict)
assert (result["x"][0] == np.ones(10) + 1).all()
assert result["y"] == 123
@gen_cluster(client=True)
async def test_scatter_type(c, s, a, b):
[future] = await c.scatter([1])
assert future.type == int
d = await c.scatter({"x": 1.0})
assert d["x"].type == float
@gen_cluster(client=True)
async def test_retire_workers_2(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
await s.retire_workers(workers=[a.address])
assert b.data == {x.key: 1}
assert s.who_has == {x.key: {b.address}}
assert s.has_what == {b.address: {x.key}}
assert a.address not in s.workers
@gen_cluster(client=True, nthreads=[("", 1)] * 10)
async def test_retire_many_workers(c, s, *workers):
futures = await c.scatter(list(range(100)))
await s.retire_workers(workers=[w.address for w in workers[:7]])
results = await c.gather(futures)
assert results == list(range(100))
while len(s.workers) != 3:
await asyncio.sleep(0.01)
assert len(s.has_what) == len(s.nthreads) == 3
assert all(future.done() for future in futures)
assert all(s.tasks[future.key].state == "memory" for future in futures)
assert await c.gather(futures) == list(range(100))
# Don't count how many task landed on each worker.
# Normally, tasks would be distributed evenly over the surviving workers. However,
# here all workers share the same process memory, so you'll get an unintuitive
# distribution of tasks if for any reason one transfer take longer than 2 seconds
# and as a consequence the Active Memory Manager ends up running for two iterations.
# This is something that will happen more frequently on low-powered CI machines.
# See test_active_memory_manager.py for tests that robustly verify the statistical
# distribution of tasks after worker retirement.
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 3)] * 2,
config={"distributed.scheduler.default-task-durations": {"f": "10ms"}},
)
async def test_weight_occupancy_against_data_movement(c, s, a, b):
await s.extensions["stealing"].stop()
def f(x, y=0, z=0):
sleep(0.01)
return x
y = await c.scatter([[1, 2, 3, 4]], workers=[a.address])
z = await c.scatter([1], workers=[b.address])
futures = c.map(f, [1, 2, 3, 4], y=y, z=z)
await wait(futures)
assert sum(f.key in a.data for f in futures) >= 2
assert sum(f.key in b.data for f in futures) >= 1
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1), ("127.0.0.1", 10)],
config={"distributed.scheduler.default-task-durations": {"f": "10ms"}},
)
async def test_distribute_tasks_by_nthreads(c, s, a, b):
await s.extensions["stealing"].stop()
def f(x, y=0):
sleep(0.01)
return x
y = await c.scatter([1], broadcast=True)
futures = c.map(f, range(20), y=y)
await wait(futures)
assert len(b.data) > 2 * len(a.data)
@gen_cluster(client=True, clean_kwargs={"threads": False})
async def test_add_done_callback(c, s, a, b):
S = set()
def f(future):
future.add_done_callback(g)
def g(future):
S.add((future.key, future.status))
u = c.submit(inc, 1, key="u")
v = c.submit(throws, "hello", key="v")
w = c.submit(slowinc, 2, delay=0.3, key="w")
x = c.submit(inc, 3, key="x")
u.add_done_callback(f)
v.add_done_callback(f)
w.add_done_callback(f)
await wait((u, v, w, x))
x.add_done_callback(f)
while len(S) < 4:
await asyncio.sleep(0.01)
assert S == {(f.key, f.status) for f in (u, v, w, x)}
@gen_cluster(client=True)
async def test_normalize_collection(c, s, a, b):
x = delayed(inc)(1)
y = delayed(inc)(x)
z = delayed(inc)(y)
yy = c.persist(y)
zz = c.normalize_collection(z)
assert len(z.dask) == len(y.dask) + 1
assert isinstance(zz.dask[y.key], Future)
assert len(zz.dask) < len(z.dask)
@gen_cluster(client=True)
async def test_normalize_collection_dask_array(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=(5,))
y = x + 1
yy = c.persist(y)
z = y.sum()
zdsk = dict(z.dask)
zz = c.normalize_collection(z)
assert z.dask == zdsk # do not mutate input
assert len(z.dask) > len(zz.dask)
assert any(isinstance(v, Future) for v in zz.dask.values())
for k, v in yy.dask.items():
assert zz.dask[k].key == v.key
result1 = await c.compute(z)
result2 = await c.compute(zz)
assert result1 == result2
@pytest.mark.slow
def test_normalize_collection_with_released_futures(c):
da = pytest.importorskip("dask.array")
x = da.arange(2**20, chunks=2**10)
y = x.persist()
wait(y)
sol = y.sum().compute()
# Start releasing futures
del y
# Try to reuse futures. Previously this was a race condition,
# and the call to `.compute()` would error out due to missing
# futures on the scheduler at compute time.
normalized = c.normalize_collection(x)
res = normalized.sum().compute()
assert res == sol
@pytest.mark.xfail(reason="https://github.com/dask/distributed/issues/4404")
@gen_cluster(client=True)
async def test_auto_normalize_collection(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=5)
assert len(x.dask) == 2
with dask.config.set(optimizations=[c._optimize_insert_futures]):
y = x.map_blocks(slowinc, delay=1, dtype=x.dtype)
yy = c.persist(y)
await wait(yy)
start = time()
future = c.compute(y.sum())
await future
end = time()
assert end - start < 1
start = time()
z = c.persist(y + 1)
await wait(z)
end = time()
assert end - start < 1
@pytest.mark.xfail(reason="https://github.com/dask/distributed/issues/4404")
def test_auto_normalize_collection_sync(c):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=5)
y = x.map_blocks(slowinc, delay=1, dtype=x.dtype)
yy = c.persist(y)
wait(yy)
with dask.config.set(optimizations=[c._optimize_insert_futures]):
start = time()
y.sum().compute()
end = time()
assert end - start < 1
def assert_no_data_loss(scheduler):
for key, start, finish, recommendations, _ in scheduler.transition_log:
if start == "memory" and finish == "released":
for k, v in recommendations.items():
assert not (k == key and v == "waiting")
@gen_cluster(client=True)
async def test_interleave_computations(c, s, a, b):
import distributed
distributed.g = s
xs = [delayed(slowinc)(i, delay=0.02) for i in range(30)]
ys = [delayed(slowdec)(x, delay=0.02) for x in xs]
zs = [delayed(slowadd)(x, y, delay=0.02) for x, y in zip(xs, ys)]
total = delayed(sum)(zs)
future = c.compute(total)
done = ("memory", "released")
await asyncio.sleep(0.1)
x_keys = [x.key for x in xs]
y_keys = [y.key for y in ys]
z_keys = [z.key for z in zs]
while not s.tasks or any(w.processing for w in s.workers.values()):
await asyncio.sleep(0.05)
x_done = sum(state in done for state in s.get_task_status(keys=x_keys).values())
y_done = sum(state in done for state in s.get_task_status(keys=y_keys).values())
z_done = sum(state in done for state in s.get_task_status(keys=z_keys).values())
assert x_done >= y_done >= z_done
assert x_done < y_done + 10
assert y_done < z_done + 10
assert_no_data_loss(s)
@pytest.mark.skip(reason="Now prefer first-in-first-out")
@gen_cluster(client=True)
async def test_interleave_computations_map(c, s, a, b):
xs = c.map(slowinc, range(30), delay=0.02)
ys = c.map(slowdec, xs, delay=0.02)
zs = c.map(slowadd, xs, ys, delay=0.02)
done = ("memory", "released")
x_keys = [x.key for x in xs]
y_keys = [y.key for y in ys]
z_keys = [z.key for z in zs]
while not s.tasks or any(w.processing for w in s.workers.values()):
await asyncio.sleep(0.05)
x_done = sum(state in done for state in s.get_task_status(keys=x_keys).values())
y_done = sum(state in done for state in s.get_task_status(keys=y_keys).values())
z_done = sum(state in done for state in s.get_task_status(keys=z_keys).values())
assert x_done >= y_done >= z_done
assert x_done < y_done + 10
assert y_done < z_done + 10
@gen_cluster(client=True)
async def test_scatter_dict_workers(c, s, a, b):
await c.scatter({"a": 10}, workers=[a.address, b.address])
assert "a" in a.data or "a" in b.data
@pytest.mark.slow
@gen_test()
async def test_client_timeout():
"""`await Client(...)` keeps retrying for 10 seconds if it can't find the Scheduler
straight away
"""
with dask.config.set({"distributed.comm.timeouts.connect": "10s"}):
c = Client("127.0.0.1:57484", asynchronous=True)
client_start_fut = asyncio.ensure_future(c)
await asyncio.sleep(2)
async with Scheduler(port=57484, dashboard_address=":0"):
await client_start_fut
assert await c.run_on_scheduler(lambda: 123) == 123
await c.close()
@gen_cluster(client=True)
async def test_submit_list_kwargs(c, s, a, b):
futures = await c.scatter([1, 2, 3])
def f(L=None):
return sum(L)
future = c.submit(f, L=futures)
result = await future
assert result == 1 + 2 + 3
@gen_cluster(client=True)
async def test_map_list_kwargs(c, s, a, b):
futures = await c.scatter([1, 2, 3])
def f(i, L=None):
return i + sum(L)
futures = c.map(f, range(10), L=futures)
results = await c.gather(futures)
assert results == [i + 6 for i in range(10)]
@gen_cluster(client=True)
async def test_dont_clear_waiting_data(c, s, a, b):
x = await c.scatter(1)
y = c.submit(slowinc, x, delay=0.5)
while y.key not in s.tasks:
await asyncio.sleep(0.01)
key = x.key
del x
for i in range(5):
assert s.waiting_data[key]
await asyncio.sleep(0)
@gen_cluster(client=True)
async def test_recreate_error_delayed(c, s, a, b):
x0 = delayed(dec)(2)
y0 = delayed(dec)(1)
x = delayed(div)(1, x0)
y = delayed(div)(1, y0)
tot = delayed(sum)(x, y)
f = c.compute(tot)
assert f.status == "pending"
error_f = await c._get_errored_future(f)
function, args, kwargs = await c._get_components_from_future(error_f)
assert f.status == "error"
assert function.__name__ == "div"
assert args == (1, 0)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_futures(c, s, a, b):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 1)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, x, y)
f = c.compute(tot)
assert f.status == "pending"
error_f = await c._get_errored_future(f)
function, args, kwargs = await c._get_components_from_future(error_f)
assert f.status == "error"
assert function.__name__ == "div"
assert args == (1, 0)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_collection(c, s, a, b):
b = db.range(10, npartitions=4)
b = b.map(lambda x: 1 / x)
b = b.persist()
f = c.compute(b)
error_f = await c._get_errored_future(f)
function, args, kwargs = await c._get_components_from_future(error_f)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = dd.from_pandas(pd.DataFrame({"a": [0, 1, 2, 3, 4]}), chunksize=2)
def make_err(x):
# because pandas would happily work with NaN
if x == 0:
raise ValueError
return x
df2 = df.a.map(make_err)
f = c.compute(df2)
error_f = await c._get_errored_future(f)
function, args, kwargs = await c._get_components_from_future(error_f)
with pytest.raises(ValueError):
function(*args, **kwargs)
# with persist
df3 = c.persist(df2)
error_f = await c._get_errored_future(df3)
function, args, kwargs = await c._get_components_from_future(error_f)
with pytest.raises(ValueError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_array(c, s, a, b):
da = pytest.importorskip("dask.array")
pytest.importorskip("scipy")
z = (da.linalg.inv(da.zeros((10, 10), chunks=10)) + 1).sum()
zz = z.persist()
error_f = await c._get_errored_future(zz)
function, args, kwargs = await c._get_components_from_future(error_f)
assert "0.,0.,0." in str(args).replace(" ", "") # args contain actual arrays
def test_recreate_error_sync(c):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 1)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, x, y)
f = c.compute(tot)
with pytest.raises(ZeroDivisionError):
c.recreate_error_locally(f)
assert f.status == "error"
def test_recreate_error_not_error(c):
f = c.submit(dec, 2)
with pytest.raises(ValueError, match="No errored futures passed"):
c.recreate_error_locally(f)
@gen_cluster(client=True)
async def test_recreate_task_delayed(c, s, a, b):
x0 = delayed(dec)(2)
y0 = delayed(dec)(2)
x = delayed(div)(1, x0)
y = delayed(div)(1, y0)
tot = delayed(sum)([x, y])
f = c.compute(tot)
assert f.status == "pending"
function, args, kwargs = await c._get_components_from_future(f)
assert f.status == "finished"
assert function.__name__ == "sum"
assert args == ([1, 1],)
assert function(*args, **kwargs) == 2
@gen_cluster(client=True)
async def test_recreate_task_futures(c, s, a, b):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 2)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, [x, y])
f = c.compute(tot)
assert f.status == "pending"
function, args, kwargs = await c._get_components_from_future(f)
assert f.status == "finished"
assert function.__name__ == "sum"
assert args == ([1, 1],)
assert function(*args, **kwargs) == 2
@gen_cluster(client=True)
async def test_recreate_task_collection(c, s, a, b):
b = db.range(10, npartitions=4)
b = b.map(lambda x: int(3628800 / (x + 1)))
b = b.persist()
f = c.compute(b)
function, args, kwargs = await c._get_components_from_future(f)
assert function(*args, **kwargs) == [
3628800,
1814400,
1209600,
907200,
725760,
604800,
518400,
453600,
403200,
362880,
]
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = dd.from_pandas(pd.DataFrame({"a": [0, 1, 2, 3, 4]}), chunksize=2)
df2 = df.a.map(lambda x: x + 1)
f = c.compute(df2)
function, args, kwargs = await c._get_components_from_future(f)
expected = pd.DataFrame({"a": [1, 2, 3, 4, 5]})["a"]
assert function(*args, **kwargs).equals(expected)
# with persist
df3 = c.persist(df2)
# recreate_task_locally only works with futures
with pytest.raises(AttributeError):
function, args, kwargs = await c._get_components_from_future(df3)
f = c.compute(df3)
function, args, kwargs = await c._get_components_from_future(f)
assert function(*args, **kwargs).equals(expected)
@gen_cluster(client=True)
async def test_recreate_task_array(c, s, a, b):
da = pytest.importorskip("dask.array")
z = (da.zeros((10, 10), chunks=10) + 1).sum()
f = c.compute(z)
function, args, kwargs = await c._get_components_from_future(f)
assert function(*args, **kwargs) == 100
def test_recreate_task_sync(c):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 2)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, [x, y])
f = c.compute(tot)
assert c.recreate_task_locally(f) == 2
@gen_cluster(client=True)
async def test_retire_workers(c, s, a, b):
assert set(s.workers) == {a.address, b.address}
await c.retire_workers(workers=[a.address], close_workers=True)
assert set(s.workers) == {b.address}
while a.status != Status.closed:
await asyncio.sleep(0.01)
class MyException(Exception):
pass
@gen_cluster(client=True)
async def test_robust_unserializable(c, s, a, b):
class Foo:
def __getstate__(self):
raise MyException()
with pytest.raises(MyException):
future = c.submit(identity, Foo())
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_robust_undeserializable(c, s, a, b):
class Foo:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise MyException("hello")
future = c.submit(identity, Foo())
with pytest.raises(MyException):
await future
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_robust_undeserializable_function(c, s, a, b):
class Foo:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise MyException("hello")
def __call__(self, *args):
return 1
future = c.submit(Foo(), 1)
with pytest.raises(MyException):
await future
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_fire_and_forget(c, s, a, b):
future = c.submit(slowinc, 1, delay=0.1)
import distributed
def f(x):
distributed.foo = 123
try:
fire_and_forget(c.submit(f, future))
while not hasattr(distributed, "foo"):
await asyncio.sleep(0.01)
assert distributed.foo == 123
finally:
del distributed.foo
while len(s.tasks) > 1:
await asyncio.sleep(0.01)
assert set(s.who_wants) == {future.key}
assert set(s.tasks) == {future.key}
@gen_cluster(client=True)
async def test_fire_and_forget_err(c, s, a, b):
fire_and_forget(c.submit(div, 1, 0))
await asyncio.sleep(0.1)
# erred task should clear out quickly
start = time()
while s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 1
def test_quiet_client_close(loop):
with captured_logger(logging.getLogger("distributed")) as logger:
with Client(
loop=loop,
processes=False,
dashboard_address=":0",
threads_per_worker=4,
) as c:
futures = c.map(slowinc, range(1000), delay=0.01)
sleep(0.200) # stop part-way
sleep(0.1) # let things settle
out = logger.getvalue()
lines = out.strip().split("\n")
assert len(lines) <= 2
for line in lines:
assert (
not line
or "Reconnecting" in line
or "garbage" in line
or set(line) == {"-"}
), line
@pytest.mark.slow
def test_quiet_client_close_when_cluster_is_closed_before_client(loop):
with captured_logger(logging.getLogger("tornado.application")) as logger:
cluster = LocalCluster(loop=loop, n_workers=1, dashboard_address=":0")
client = Client(cluster, loop=loop)
cluster.close()
client.close()
out = logger.getvalue()
assert "CancelledError" not in out
@gen_cluster()
async def test_close(s, a, b):
c = await Client(s.address, asynchronous=True)
future = c.submit(inc, 1)
await wait(future)
assert c.id in s.wants_what
await c.close()
while c.id in s.wants_what or s.tasks:
await asyncio.sleep(0.01)
def test_threadsafe(c):
def f(_):
d = deque(maxlen=50)
for i in range(100):
future = c.submit(inc, random.randint(0, 100))
d.append(future)
sleep(0.001)
c.gather(list(d))
total = c.submit(sum, list(d))
return total.result()
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(20) as e:
results = list(e.map(f, range(20)))
assert results and all(results)
del results
@pytest.mark.slow
def test_threadsafe_get(c):
da = pytest.importorskip("dask.array")
x = da.arange(100, chunks=(10,))
def f(_):
total = 0
for i in range(20):
total += (x + random.randint(0, 20)).sum().compute()
sleep(0.001)
return total
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(30) as e:
results = list(e.map(f, range(30)))
assert results and all(results)
@pytest.mark.slow
def test_threadsafe_compute(c):
da = pytest.importorskip("dask.array")
x = da.arange(100, chunks=(10,))
def f(_):
total = 0
for i in range(20):
future = c.compute((x + random.randint(0, 20)).sum())
total += future.result()
sleep(0.001)
return total
from concurrent.futures import ThreadPoolExecutor
e = ThreadPoolExecutor(30)
results = list(e.map(f, range(30)))
assert results and all(results)
@gen_cluster(client=True)
async def test_identity(c, s, a, b):
assert c.id.lower().startswith("client")
assert a.id.lower().startswith("worker")
assert b.id.lower().startswith("worker")
assert s.id.lower().startswith("scheduler")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 4)] * 2)
async def test_get_client(c, s, a, b):
assert get_client() is c
assert c.asynchronous
def f(x):
import distributed
client = get_client()
assert not client.asynchronous
assert client is distributed.tmp_client
future = client.submit(inc, x)
return future.result()
import distributed
distributed.tmp_client = c
try:
futures = c.map(f, range(5))
results = await c.gather(futures)
assert results == list(map(inc, range(5)))
finally:
del distributed.tmp_client
def test_get_client_no_cluster():
# Clean up any global workers added by other tests. This test requires that
# there are no global workers.
Worker._instances.clear()
msg = "No global client found and no address provided"
with pytest.raises(ValueError, match=rf"^{msg}$"):
get_client()
@gen_cluster(client=True)
async def test_serialize_collections(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.arange(10, chunks=(5,)).persist()
def f(x):
assert isinstance(x, da.Array)
return x.sum().compute()
future = c.submit(f, x)
result = await future
assert result == sum(range(10))
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 1)
async def test_secede_simple(c, s, a):
def f():
client = get_client()
secede()
return client.submit(inc, 1).result()
result = await c.submit(f)
assert result == 2
@gen_cluster(client=True)
async def test_secede_balances(c, s, a, b):
"""Ensure that tasks scheduled from a seceded thread can be scheduled
elsewhere"""
def f(x):
client = get_client()
secede()
futures = client.map(inc, range(10), pure=False)
total = client.submit(sum, futures).result()
return total
futures = c.map(f, range(10), workers=[a.address])
results = await c.gather(futures)
# We dispatch 10 tasks and every task generates 11 more tasks
# 10 * 11 + 10
assert a.executed_count + b.executed_count == 120
assert a.executed_count >= 10
assert b.executed_count > 0
assert results == [sum(map(inc, range(10)))] * 10
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_long_running_not_in_occupancy(c, s, a):
# https://github.com/dask/distributed/issues/5332
from distributed import Lock
l = Lock()
await l.acquire()
def long_running(lock):
sleep(0.1)
secede()
lock.acquire()
f = c.submit(long_running, l)
while f.key not in s.tasks:
await asyncio.sleep(0.01)
assert s.workers[a.address].occupancy == parse_timedelta(
dask.config.get("distributed.scheduler.unknown-task-duration")
)
while s.workers[a.address].occupancy:
await asyncio.sleep(0.01)
await a.heartbeat()
ts = s.tasks[f.key]
ws = s.workers[a.address]
s.set_duration_estimate(ts, ws)
assert s.workers[a.address].occupancy == 0
s.reevaluate_occupancy(0)
assert s.workers[a.address].occupancy == 0
await l.release()
await f
@gen_cluster(client=True)
async def test_sub_submit_priority(c, s, a, b):
def func():
client = get_client()
f = client.submit(slowinc, 1, delay=0.5, key="slowinc")
client.gather(f)
future = c.submit(func, key="f")
while len(s.tasks) != 2:
await asyncio.sleep(0.001)
# lower values schedule first
assert s.tasks["f"].priority > s.tasks["slowinc"].priority, (
s.tasks["f"].priority,
s.tasks["slowinc"].priority,
)
def test_get_client_sync(c, s, a, b):
results = c.run(lambda: get_worker().scheduler.address)
assert results == {w["address"]: s["address"] for w in [a, b]}
results = c.run(lambda: get_client().scheduler.address)
assert results == {w["address"]: s["address"] for w in [a, b]}
@gen_cluster(client=True)
async def test_serialize_collections_of_futures(c, s, a, b):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
from dask.dataframe.utils import assert_eq
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2).persist()
future = await c.scatter(ddf)
ddf2 = await future
df2 = await c.compute(ddf2)
assert_eq(df, df2)
def test_serialize_collections_of_futures_sync(c):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
from dask.dataframe.utils import assert_eq
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2).persist()
future = c.scatter(ddf)
result = future.result()
assert_eq(result.compute(), df)
assert future.type == dd.DataFrame
assert c.submit(lambda x, y: assert_eq(x.compute(), y), future, df).result()
def _dynamic_workload(x, delay=0.01):
if delay == "random":
sleep(random.random() / 2)
else:
sleep(delay)
if x > 4:
return 4
secede()
client = get_client()
futures = client.map(
_dynamic_workload, [x + i + 1 for i in range(2)], pure=False, delay=delay
)
total = client.submit(sum, futures)
return total.result()
def test_dynamic_workloads_sync(c):
future = c.submit(_dynamic_workload, 0, delay=0.02)
assert future.result(timeout=20) == 52
@pytest.mark.slow
def test_dynamic_workloads_sync_random(c):
future = c.submit(_dynamic_workload, 0, delay="random")
assert future.result(timeout=20) == 52
@pytest.mark.skipif(COMPILED, reason="Fails with cythonized scheduler")
@gen_cluster(client=True)
async def test_bytes_keys(c, s, a, b):
key = b"inc-123"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is bytes
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
@gen_cluster(client=True)
async def test_unicode_ascii_keys(c, s, a, b):
uni_type = str
key = "inc-123"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is uni_type
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
@gen_cluster(client=True)
async def test_unicode_keys(c, s, a, b):
uni_type = str
key = "inc-123\u03bc"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is uni_type
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
future2 = c.submit(inc, future)
result2 = await future2
assert result2 == 3
future3 = await c.scatter({"data-123": 123})
result3 = await future3["data-123"]
assert result3 == 123
def test_use_synchronous_client_in_async_context(loop, c):
async def f():
x = await c.scatter(123)
y = c.submit(inc, x)
z = await c.gather(y)
return z
z = sync(loop, f)
assert z == 124
def test_quiet_quit_when_cluster_leaves(loop_in_thread):
loop = loop_in_thread
with LocalCluster(loop=loop, dashboard_address=":0", silence_logs=False) as cluster:
with captured_logger("distributed.comm") as sio:
with Client(cluster, loop=loop) as client:
futures = client.map(lambda x: x + 1, range(10))
sleep(0.05)
cluster.close()
sleep(0.05)
text = sio.getvalue()
assert not text
def test_warn_executor(loop, s, a, b):
with warnings.catch_warnings(record=True) as record:
with Executor(s["address"], loop=loop) as c:
pass
assert any("Client" in str(r.message) for r in record)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_future(c, s, a, b):
x = c.submit(slowdec, 1, delay=0.5)
future = c.submit(slowinc, 1, delay=0.5)
await asyncio.sleep(0.1)
results = await asyncio.gather(
c.call_stack(future), c.call_stack(keys=[future.key])
)
assert all(list(first(result.values())) == [future.key] for result in results)
assert results[0] == results[1]
result = results[0]
ts = a.tasks.get(future.key)
if ts is not None and ts.state == "executing":
w = a
else:
w = b
assert list(result) == [w.address]
assert list(result[w.address]) == [future.key]
assert "slowinc" in str(result)
assert "slowdec" not in str(result)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_all(c, s, a, b):
future = c.submit(slowinc, 1, delay=0.8)
while not a.executing_count and not b.executing_count:
await asyncio.sleep(0.01)
result = await c.call_stack()
w = a if a.executing_count else b
assert list(result) == [w.address]
assert list(result[w.address]) == [future.key]
assert "slowinc" in str(result)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_collections(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.random.random(100, chunks=(10,)).map_blocks(slowinc, delay=0.5).persist()
while not a.executing_count and not b.executing_count:
await asyncio.sleep(0.001)
result = await c.call_stack(x)
assert result
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_collections_all(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.random.random(100, chunks=(10,)).map_blocks(slowinc, delay=0.5).persist()
while not a.executing_count and not b.executing_count:
await asyncio.sleep(0.001)
result = await c.call_stack()
assert result
@pytest.mark.flaky(condition=WINDOWS, reruns=10, reruns_delay=5)
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "100ms"})
async def test_profile(c, s, a, b):
futures = c.map(slowinc, range(10), delay=0.05, workers=a.address)
await wait(futures)
x = await c.profile(start=time() + 10, stop=time() + 20)
assert not x["count"]
x = await c.profile(start=0, stop=time())
assert (
x["count"]
== sum(p["count"] for _, p in a.profile_history) + a.profile_recent["count"]
)
y = await c.profile(start=time() - 0.300, stop=time())
assert 0 < y["count"] < x["count"]
assert not any(p["count"] for _, p in b.profile_history)
result = await c.profile(workers=b.address)
assert not result["count"]
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "100ms"})
async def test_profile_keys(c, s, a, b):
x = c.map(slowinc, range(10), delay=0.05, workers=a.address)
y = c.map(slowdec, range(10), delay=0.05, workers=a.address)
await wait(x + y)
xp = await c.profile("slowinc")
yp = await c.profile("slowdec")
p = await c.profile()
assert p["count"] == xp["count"] + yp["count"]
with captured_logger(logging.getLogger("distributed")) as logger:
prof = await c.profile("does-not-exist")
assert prof == profile.create()
out = logger.getvalue()
assert not out
@gen_cluster()
async def test_client_with_name(s, a, b):
with captured_logger("distributed.scheduler") as sio:
client = await Client(s.address, asynchronous=True, name="foo")
assert "foo" in client.id
await client.close()
text = sio.getvalue()
assert "foo" in text
@gen_cluster(client=True)
async def test_future_defaults_to_default_client(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
future = Future(x.key)
assert future.client is c
@gen_cluster(client=True)
async def test_future_auto_inform(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
client = await Client(s.address, asynchronous=True)
future = Future(x.key, client)
while future.status != "finished":
await asyncio.sleep(0.01)
await client.close()
def test_client_async_before_loop_starts():
with pristine_loop() as loop:
client = Client(asynchronous=True, loop=loop)
assert client.asynchronous
client.close()
@pytest.mark.slow
@gen_cluster(client=True, Worker=Nanny, timeout=60, nthreads=[("127.0.0.1", 3)] * 2)
async def test_nested_compute(c, s, a, b):
def fib(x):
assert get_worker().get_current_task()
if x < 2:
return x
a = delayed(fib)(x - 1)
b = delayed(fib)(x - 2)
c = a + b
return c.compute()
future = c.submit(fib, 8)
result = await future
assert result == 21
assert len(s.transition_log) > 50
@gen_cluster(client=True)
async def test_task_metadata(c, s, a, b):
await c.set_metadata("x", 1)
result = await c.get_metadata("x")
assert result == 1
future = c.submit(inc, 1)
key = future.key
await wait(future)
await c.set_metadata(key, 123)
result = await c.get_metadata(key)
assert result == 123
del future
while key in s.tasks:
await asyncio.sleep(0.01)
with pytest.raises(KeyError):
await c.get_metadata(key)
result = await c.get_metadata(key, None)
assert result is None
await c.set_metadata(["x", "a"], 1)
result = await c.get_metadata("x")
assert result == {"a": 1}
await c.set_metadata(["x", "b"], 2)
result = await c.get_metadata("x")
assert result == {"a": 1, "b": 2}
result = await c.get_metadata(["x", "a"])
assert result == 1
await c.set_metadata(["x", "a", "c", "d"], 1)
result = await c.get_metadata("x")
assert result == {"a": {"c": {"d": 1}}, "b": 2}
@gen_cluster(client=True, Worker=Nanny)
async def test_logs(c, s, a, b):
await wait(c.map(inc, range(5)))
logs = await c.get_scheduler_logs(n=5)
assert logs
for _, msg in logs:
assert "distributed.scheduler" in msg
w_logs = await c.get_worker_logs(n=5)
assert set(w_logs.keys()) == {a.worker_address, b.worker_address}
for log in w_logs.values():
for _, msg in log:
assert "distributed.worker" in msg
n_logs = await c.get_worker_logs(nanny=True)
assert set(n_logs.keys()) == {a.worker_address, b.worker_address}
for log in n_logs.values():
for _, msg in log:
assert "distributed.nanny" in msg
n_logs = await c.get_worker_logs(nanny=True, workers=[a.worker_address])
assert set(n_logs.keys()) == {a.worker_address}
for log in n_logs.values():
for _, msg in log:
assert "distributed.nanny" in msg
@gen_cluster(client=True)
async def test_avoid_delayed_finalize(c, s, a, b):
x = delayed(inc)(1)
future = c.compute(x)
result = await future
assert result == 2
assert list(s.tasks) == [future.key] == [x.key]
@gen_cluster()
async def test_config_scheduler_address(s, a, b):
with dask.config.set({"scheduler-address": s.address}):
with captured_logger("distributed.client") as sio:
c = await Client(asynchronous=True)
assert c.scheduler.address == s.address
text = sio.getvalue()
assert s.address in text
await c.close()
@gen_cluster(client=True)
async def test_warn_when_submitting_large_values(c, s, a, b):
with warnings.catch_warnings(record=True) as record:
future = c.submit(lambda x: x + 1, b"0" * 2000000)
text = str(record[0].message)
assert "2.00 MB" in text or "1.91 MiB" in text
assert "large" in text
assert "..." in text
assert "'000" in text
assert "000'" in text
assert len(text) < 2000
with warnings.catch_warnings(record=True) as record:
data = b"0" * 2000000
for i in range(10):
future = c.submit(lambda x, y: x, data, i)
assert len(record) < 2
@gen_cluster(client=True)
async def test_unhashable_function(c, s, a, b):
func = _UnhashableCallable()
result = await c.submit(func, 1)
assert result == 2
@gen_cluster()
async def test_client_name(s, a, b):
with dask.config.set({"client-name": "hello-world"}):
c = await Client(s.address, asynchronous=True)
assert any("hello-world" in name for name in list(s.clients))
await c.close()
def test_client_doesnt_close_given_loop(loop_in_thread, s, a, b):
with Client(s["address"], loop=loop_in_thread) as c:
assert c.submit(inc, 1).result() == 2
with Client(s["address"], loop=loop_in_thread) as c:
assert c.submit(inc, 2).result() == 3
@gen_cluster(client=True, nthreads=[])
async def test_quiet_scheduler_loss(c, s):
c._periodic_callbacks["scheduler-info"].interval = 10
with captured_logger(logging.getLogger("distributed.client")) as logger:
await s.close()
await c._update_scheduler_info()
text = logger.getvalue()
assert "BrokenPipeError" not in text
def test_dashboard_link(loop, monkeypatch):
monkeypatch.setenv("USER", "myusername")
with cluster(scheduler_kwargs={"dashboard_address": ":12355"}) as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
with dask.config.set(
{"distributed.dashboard.link": "{scheme}://foo-{USER}:{port}/status"}
):
link = "http://foo-myusername:12355/status"
assert link == c.dashboard_link
text = c._repr_html_()
assert link in text
@gen_test()
async def test_dashboard_link_inproc():
async with Client(processes=False, asynchronous=True, dashboard_address=":0") as c:
with dask.config.set({"distributed.dashboard.link": "{host}"}):
assert "/" not in c.dashboard_link
@gen_test()
async def test_client_timeout_2():
with dask.config.set({"distributed.comm.timeouts.connect": "10ms"}):
start = time()
c = Client("127.0.0.1:3755", asynchronous=True)
with pytest.raises((TimeoutError, IOError)):
await c
stop = time()
assert c.status == "closed"
await c.close()
assert stop - start < 1
@gen_test()
async def test_client_active_bad_port():
import tornado.httpserver
import tornado.web
application = tornado.web.Application([(r"/", tornado.web.RequestHandler)])
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8080)
with dask.config.set({"distributed.comm.timeouts.connect": "10ms"}):
c = Client("127.0.0.1:8080", asynchronous=True)
with pytest.raises((TimeoutError, IOError)):
await c
await c._close(fast=True)
http_server.stop()
@pytest.mark.parametrize("direct", [True, False])
@gen_cluster(client=True, client_kwargs={"serializers": ["dask", "msgpack"]})
async def test_turn_off_pickle(c, s, a, b, direct):
np = pytest.importorskip("numpy")
assert (await c.submit(inc, 1)) == 2
await c.submit(np.ones, 5)
await c.scatter(1)
# Can't send complex data
with pytest.raises(TypeError):
await c.scatter(inc)
# can send complex tasks (this uses pickle regardless)
future = c.submit(lambda x: x, inc)
await wait(future)
# but can't receive complex results
with pytest.raises(TypeError):
await c.gather(future, direct=direct)
# Run works
result = await c.run(lambda: 1)
assert list(result.values()) == [1, 1]
result = await c.run_on_scheduler(lambda: 1)
assert result == 1
# But not with complex return values
with pytest.raises(TypeError):
await c.run(lambda: inc)
with pytest.raises(TypeError):
await c.run_on_scheduler(lambda: inc)
@gen_cluster()
async def test_de_serialization(s, a, b):
np = pytest.importorskip("numpy")
c = await Client(
s.address,
asynchronous=True,
serializers=["msgpack", "pickle"],
deserializers=["msgpack"],
)
try:
# Can send complex data
future = await c.scatter(np.ones(5))
# But can not retrieve it
with pytest.raises(TypeError):
result = await future
finally:
await c.close()
@gen_cluster()
async def test_de_serialization_none(s, a, b):
np = pytest.importorskip("numpy")
c = await Client(s.address, asynchronous=True, deserializers=["msgpack"])
try:
# Can send complex data
future = await c.scatter(np.ones(5))
# But can not retrieve it
with pytest.raises(TypeError):
result = await future
finally:
await c.close()
@gen_cluster()
async def test_client_repr_closed(s, a, b):
c = await Client(s.address, asynchronous=True)
await c.close()
c._repr_html_()
def test_client_repr_closed_sync(loop):
with Client(loop=loop, processes=False, dashboard_address=":0") as c:
pass
c._repr_html_()
@pytest.mark.xfail(reason="https://github.com/dask/dask/pull/6807")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_nested_prioritization(c, s, w):
x = delayed(inc)(1, dask_key_name=("a", 2))
y = delayed(inc)(2, dask_key_name=("a", 10))
o = dask.order.order(merge(x.__dask_graph__(), y.__dask_graph__()))
fx, fy = c.compute([x, y])
await wait([fx, fy])
assert (o[x.key] < o[y.key]) == (
s.tasks[stringify(fx.key)].priority < s.tasks[stringify(fy.key)].priority
)
@gen_cluster(client=True)
async def test_scatter_error_cancel(c, s, a, b):
# https://github.com/dask/distributed/issues/2038
def bad_fn(x):
raise Exception("lol")
x = await c.scatter(1)
y = c.submit(bad_fn, x)
del x
await wait(y)
assert y.status == "error"
await asyncio.sleep(0.1)
assert y.status == "error" # not cancelled
@pytest.mark.parametrize("workers_arg", [False, True])
@pytest.mark.parametrize("direct", [False, True])
@pytest.mark.parametrize("broadcast", [False, True, 10])
@gen_cluster(
client=True,
nthreads=[("", 1)] * 10,
config={"distributed.worker.memory.pause": False},
)
async def test_scatter_and_replicate_avoid_paused_workers(
c, s, *workers, workers_arg, direct, broadcast
):
paused_workers = [w for i, w in enumerate(workers) if i not in (3, 7)]
for w in paused_workers:
w.status = Status.paused
while any(s.workers[w.address].status != Status.paused for w in paused_workers):
await asyncio.sleep(0.01)
f = await c.scatter(
{"x": 1},
workers=[w.address for w in workers[1:-1]] if workers_arg else None,
broadcast=broadcast,
direct=direct,
)
if not broadcast:
await c.replicate(f, n=10)
expect = [i in (3, 7) for i in range(10)]
actual = [("x" in w.data) for w in workers]
assert actual == expect
@pytest.mark.xfail(reason="GH#5409 Dask-Default-Threads are frequently detected")
def test_no_threads_lingering():
if threading.active_count() < 40:
return
active = dict(threading._active)
print(f"==== Found {len(active)} active threads: ====")
for t in active.values():
print(t)
assert False
@gen_cluster()
async def test_direct_async(s, a, b):
c = await Client(s.address, asynchronous=True, direct_to_workers=True)
assert c.direct_to_workers
await c.close()
c = await Client(s.address, asynchronous=True, direct_to_workers=False)
assert not c.direct_to_workers
await c.close()
def test_direct_sync(c):
assert not c.direct_to_workers
def f():
return get_client().direct_to_workers
assert c.submit(f).result()
@gen_cluster()
async def test_mixing_clients(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
future = c1.submit(inc, 1)
with pytest.raises(ValueError):
c2.submit(inc, future)
assert not c2.futures # Don't create Futures on second Client
await c1.close()
await c2.close()
@gen_cluster(client=True)
async def test_tuple_keys(c, s, a, b):
x = dask.delayed(inc)(1, dask_key_name=("x", 1))
y = dask.delayed(inc)(x, dask_key_name=("y", 1))
future = c.compute(y)
assert (await future) == 3
@gen_cluster(client=True)
async def test_multiple_scatter(c, s, a, b):
futures = await asyncio.gather(*(c.scatter(1, direct=True) for _ in range(5)))
x = await futures[0]
x = await futures[0]
@gen_cluster(client=True)
async def test_map_large_kwargs_in_graph(c, s, a, b):
np = pytest.importorskip("numpy")
x = np.random.random(100000)
futures = c.map(lambda a, b: a + b, range(100), b=x)
while not s.tasks:
await asyncio.sleep(0.01)
assert len(s.tasks) == 101
assert any(k.startswith("ndarray") for k in s.tasks)
@gen_cluster(client=True)
async def test_retry(c, s, a, b):
def f():
assert dask.config.get("foo")
with dask.config.set(foo=False):
future = c.submit(f)
with pytest.raises(AssertionError):
await future
with dask.config.set(foo=True):
await future.retry()
await future
@gen_cluster(client=True)
async def test_retry_dependencies(c, s, a, b):
def f():
return dask.config.get("foo")
x = c.submit(f)
y = c.submit(inc, x)
with pytest.raises(KeyError):
await y
with dask.config.set(foo=100):
await y.retry()
result = await y
assert result == 101
await y.retry()
await x.retry()
result = await y
assert result == 101
@gen_cluster(client=True)
async def test_released_dependencies(c, s, a, b):
def f(x):
return dask.config.get("foo") + 1
x = c.submit(inc, 1, key="x")
y = c.submit(f, x, key="y")
del x
with pytest.raises(KeyError):
await y
with dask.config.set(foo=100):
await y.retry()
result = await y
assert result == 101
@gen_cluster(client=True, clean_kwargs={"threads": False})
async def test_profile_bokeh(c, s, a, b):
pytest.importorskip("bokeh.plotting")
from bokeh.model import Model
await c.gather(c.map(slowinc, range(10), delay=0.2))
state, figure = await c.profile(plot=True)
assert isinstance(figure, Model)
with tmpfile("html") as fn:
try:
await c.profile(filename=fn)
except PermissionError:
if WINDOWS:
pytest.xfail()
assert os.path.exists(fn)
@gen_cluster(client=True)
async def test_get_mix_futures_and_SubgraphCallable(c, s, a, b):
future = c.submit(add, 1, 2)
subgraph = SubgraphCallable(
{"_2": (add, "_0", "_1"), "_3": (add, future, "_2")}, "_3", ("_0", "_1")
)
dsk = {"a": 1, "b": 2, "c": (subgraph, "a", "b"), "d": (subgraph, "c", "b")}
future2 = c.get(dsk, "d", sync=False)
result = await future2
assert result == 11
# Nested subgraphs
subgraph2 = SubgraphCallable(
{
"_2": (subgraph, "_0", "_1"),
"_3": (subgraph, "_2", "_1"),
"_4": (add, "_3", future2),
},
"_4",
("_0", "_1"),
)
dsk2 = {"e": 1, "f": 2, "g": (subgraph2, "e", "f")}
result = await c.get(dsk2, "g", sync=False)
assert result == 22
@gen_cluster(client=True)
async def test_get_mix_futures_and_SubgraphCallable_dask_dataframe(c, s, a, b):
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = pd.DataFrame({"x": range(1, 11)})
ddf = dd.from_pandas(df, npartitions=2).persist()
ddf = ddf.map_partitions(lambda x: x)
ddf["x"] = ddf["x"].astype("f8")
ddf = ddf.map_partitions(lambda x: x)
ddf["x"] = ddf["x"].astype("f8")
result = await c.compute(ddf)
assert result.equals(df.astype("f8"))
def test_direct_to_workers(s, loop):
with Client(s["address"], loop=loop, direct_to_workers=True) as client:
future = client.scatter(1)
future.result()
resp = client.run_on_scheduler(lambda dask_scheduler: dask_scheduler.events)
assert "gather" not in str(resp)
@gen_cluster(client=True)
async def test_instances(c, s, a, b):
assert list(Client._instances) == [c]
assert list(Scheduler._instances) == [s]
assert set(Worker._instances) == {a, b}
@gen_cluster(client=True)
async def test_wait_for_workers(c, s, a, b):
future = asyncio.ensure_future(c.wait_for_workers(n_workers=3))
await asyncio.sleep(0.22) # 2 chances
assert not future.done()
w = await Worker(s.address)
start = time()
await future
assert time() < start + 1
await w.close()
with pytest.raises(TimeoutError) as info:
await c.wait_for_workers(n_workers=10, timeout="1 ms")
assert "2/10" in str(info.value).replace(" ", "")
assert "1 ms" in str(info.value)
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@pytest.mark.asyncio
@pytest.mark.parametrize("Worker", [Worker, Nanny])
async def test_file_descriptors_dont_leak(Worker):
pytest.importorskip("pandas")
df = dask.datasets.timeseries(freq="10s", dtypes={"x": int, "y": float})
proc = psutil.Process()
before = proc.num_fds()
async with Scheduler(dashboard_address=":0") as s:
async with Worker(s.address), Worker(s.address), Client(
s.address, asynchronous=True
):
assert proc.num_fds() > before
await df.sum().persist()
start = time()
while proc.num_fds() > before:
await asyncio.sleep(0.01)
assert time() < start + 10, (before, proc.num_fds())
@gen_test()
async def test_dashboard_link_cluster():
class MyCluster(LocalCluster):
@property
def dashboard_link(self):
return "http://foo.com"
async with MyCluster(
processes=False, asynchronous=True, dashboard_address=":0"
) as cluster:
async with Client(cluster, asynchronous=True) as client:
assert "http://foo.com" in client._repr_html_()
@gen_test()
async def test_shutdown():
async with Scheduler(dashboard_address=":0") as s:
async with Worker(s.address) as w:
async with Client(s.address, asynchronous=True) as c:
await c.shutdown()
assert s.status == Status.closed
assert w.status == Status.closed
@pytest.mark.asyncio
async def test_shutdown_localcluster(cleanup):
async with LocalCluster(
n_workers=1, asynchronous=True, processes=False, dashboard_address=":0"
) as lc:
async with Client(lc, asynchronous=True) as c:
await c.shutdown()
assert lc.scheduler.status == Status.closed
@gen_test()
async def test_config_inherited_by_subprocess():
with dask.config.set(foo=100):
async with LocalCluster(
n_workers=1,
asynchronous=True,
processes=True,
dashboard_address=":0",
) as lc:
async with Client(lc, asynchronous=True) as c:
assert await c.submit(dask.config.get, "foo") == 100
@gen_cluster(client=True)
async def test_futures_of_sorted(c, s, a, b):
pytest.importorskip("dask.dataframe")
df = await dask.datasets.timeseries(dtypes={"x": int}).persist()
futures = futures_of(df)
for k, f in zip(df.__dask_keys__(), futures):
assert str(k) in str(f)
@pytest.mark.flaky(reruns=10, reruns_delay=5)
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "10ms"})
async def test_profile_server(c, s, a, b):
for i in range(5):
try:
x = c.map(slowinc, range(10), delay=0.01, workers=a.address, pure=False)
await wait(x)
await asyncio.gather(
c.run(slowinc, 1, delay=0.5), c.run_on_scheduler(slowdec, 1, delay=0.5)
)
p = await c.profile(server=True) # All worker servers
assert "slowinc" in str(p)
p = await c.profile(scheduler=True) # Scheduler
assert "slowdec" in str(p)
except AssertionError:
if i == 4:
raise
else:
pass
else:
break
@gen_cluster(client=True)
async def test_await_future(c, s, a, b):
future = c.submit(inc, 1)
async def f(): # flake8: noqa
result = await future
assert result == 2
await f()
future = c.submit(div, 1, 0)
async def f():
with pytest.raises(ZeroDivisionError):
await future
await f()
@gen_cluster(client=True)
async def test_as_completed_async_for(c, s, a, b):
futures = c.map(inc, range(10))
ac = as_completed(futures)
results = []
async def f():
async for future in ac:
result = await future
results.append(result)
await f()
assert set(results) == set(range(1, 11))
@gen_cluster(client=True)
async def test_as_completed_async_for_results(c, s, a, b):
futures = c.map(inc, range(10))
ac = as_completed(futures, with_results=True)
results = []
async def f():
async for future, result in ac:
results.append(result)
await f()
assert set(results) == set(range(1, 11))
assert not s.counters["op"].components[0]["gather"]
@gen_cluster(client=True)
async def test_as_completed_async_for_cancel(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(sleep, 0.3)
ac = as_completed([x, y])
async def _():
await asyncio.sleep(0.1)
await y.cancel(asynchronous=True)
c.loop.add_callback(_)
L = []
async def f():
async for future in ac:
L.append(future)
await f()
assert L == [x, y]
@gen_test()
async def test_async_with():
async with Client(processes=False, dashboard_address=":0", asynchronous=True) as c:
assert await c.submit(lambda x: x + 1, 10) == 11
assert c.status == "closed"
assert c.cluster.status == Status.closed
def test_client_sync_with_async_def(loop):
async def ff():
await asyncio.sleep(0.01)
return 1
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert sync(loop, ff) == 1
assert c.sync(ff) == 1
@pytest.mark.skip(reason="known intermittent failure")
@gen_cluster(client=True)
async def test_dont_hold_on_to_large_messages(c, s, a, b):
np = pytest.importorskip("numpy")
da = pytest.importorskip("dask.array")
x = np.random.random(1000000)
xr = weakref.ref(x)
d = da.from_array(x, chunks=(100000,))
d = d.persist()
del x
start = time()
while xr() is not None:
if time() > start + 5:
# Help diagnosing
from types import FrameType
x = xr()
if x is not None:
del x
rc = sys.getrefcount(xr())
refs = gc.get_referrers(xr())
print("refs to x:", rc, refs, gc.isenabled())
frames = [r for r in refs if isinstance(r, FrameType)]
for i, f in enumerate(frames):
print(
"frames #%d:" % i,
f.f_code.co_name,
f.f_code.co_filename,
sorted(f.f_locals),
)
pytest.fail("array should have been destroyed")
await asyncio.sleep(0.200)
@gen_cluster(client=True)
async def test_run_on_scheduler_async_def(c, s, a, b):
async def f(dask_scheduler):
await asyncio.sleep(0.01)
dask_scheduler.foo = "bar"
await c.run_on_scheduler(f)
assert s.foo == "bar"
async def f(dask_worker):
await asyncio.sleep(0.01)
dask_worker.foo = "bar"
await c.run(f)
assert a.foo == "bar"
assert b.foo == "bar"
@gen_cluster(client=True)
async def test_run_on_scheduler_async_def_wait(c, s, a, b):
async def f(dask_scheduler):
await asyncio.sleep(0.01)
dask_scheduler.foo = "bar"
await c.run_on_scheduler(f, wait=False)
while not hasattr(s, "foo"):
await asyncio.sleep(0.01)
assert s.foo == "bar"
async def f(dask_worker):
await asyncio.sleep(0.01)
dask_worker.foo = "bar"
await c.run(f, wait=False)
while not hasattr(a, "foo") or not hasattr(b, "foo"):
await asyncio.sleep(0.01)
assert a.foo == "bar"
assert b.foo == "bar"
@pytest.mark.skipif(WINDOWS, reason="frequently kills off the whole test suite")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 2)] * 2)
async def test_performance_report(c, s, a, b):
pytest.importorskip("bokeh")
da = pytest.importorskip("dask.array")
async def f(stacklevel, mode=None):
"""
We wrap this in a function so that the assertions aren't in the
performanace report itself
Also, we want this comment to appear
"""
x = da.random.random((1000, 1000), chunks=(100, 100))
with tmpfile(extension="html") as fn:
async with performance_report(
filename=fn, stacklevel=stacklevel, mode=mode
):
await c.compute((x + x.T).sum())
with open(fn) as f:
data = f.read()
return data
# Ensure default kwarg maintains backward compatability
data = await f(stacklevel=1)
assert "Also, we want this comment to appear" in data
assert "bokeh" in data
assert "random" in data
assert "Dask Performance Report" in data
assert "x = da.random" in data
assert "Threads: 4" in data
assert "No logs to report" in data
assert dask.__version__ in data
# stacklevel=2 captures code two frames back -- which in this case
# is the testing function
data = await f(stacklevel=2)
assert "async def test_performance_report(c, s, a, b):" in data
assert "Dask Performance Report" in data
# stacklevel=0 or lower is overridden to stacklevel=1 so we don't see
# distributed internals
data = await f(stacklevel=0)
assert "Also, we want this comment to appear" in data
assert "Dask Performance Report" in data
data = await f(stacklevel=1, mode="inline")
assert "cdn.bokeh.org" not in data
data = await f(stacklevel=1, mode="cdn")
assert "cdn.bokeh.org" in data
@gen_cluster(nthreads=[])
async def test_client_gather_semaphore_loop(s):
async with Client(s.address, asynchronous=True) as c:
assert c._gather_semaphore._loop is c.loop.asyncio_loop
@gen_cluster(client=True)
async def test_as_completed_condition_loop(c, s, a, b):
seq = c.map(inc, range(5))
ac = as_completed(seq)
assert ac.condition._loop == c.loop.asyncio_loop
def test_client_connectionpool_semaphore_loop(s, a, b):
with Client(s["address"]) as c:
assert c.rpc.semaphore._loop is c.loop.asyncio_loop
@pytest.mark.slow
@gen_cluster(nthreads=[], timeout=60)
async def test_mixed_compression(s):
pytest.importorskip("lz4")
da = pytest.importorskip("dask.array")
async with Nanny(
s.address, nthreads=1, config={"distributed.comm.compression": None}
):
async with Nanny(
s.address, nthreads=1, config={"distributed.comm.compression": "lz4"}
):
async with Client(s.address, asynchronous=True) as c:
await c.get_versions()
x = da.ones((10000, 10000))
y = x + x.T
await c.compute(y.sum())
@gen_cluster(client=True)
async def test_futures_in_subgraphs(c, s, a, b):
"""Regression test of <https://github.com/dask/distributed/issues/4145>"""
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
ddf = dd.from_pandas(
pd.DataFrame(
dict(
uid=range(50),
enter_time=pd.date_range(
start="2020-01-01", end="2020-09-01", periods=50, tz="UTC"
),
)
),
npartitions=5,
)
ddf = ddf[ddf.uid.isin(range(29))].persist()
ddf["local_time"] = ddf.enter_time.dt.tz_convert("US/Central")
ddf["day"] = ddf.enter_time.dt.day_name()
ddf = await c.submit(dd.categorical.categorize, ddf, columns=["day"], index=False)
@gen_cluster(client=True)
async def test_get_task_metadata(c, s, a, b):
# Populate task metadata
await c.register_worker_plugin(TaskStateMetadataPlugin())
async with get_task_metadata() as tasks:
f = c.submit(slowinc, 1)
await f
metadata = tasks.metadata
assert f.key in metadata
assert metadata[f.key] == s.tasks.get(f.key).metadata
state = tasks.state
assert f.key in state
assert state[f.key] == "memory"
assert not any(isinstance(p, CollectTaskMetaDataPlugin) for p in s.plugins)
@gen_cluster(client=True)
async def test_get_task_metadata_multiple(c, s, a, b):
# Populate task metadata
await c.register_worker_plugin(TaskStateMetadataPlugin())
# Ensure that get_task_metadata only collects metadata for
# tasks which are submitted and completed within its context
async with get_task_metadata() as tasks1:
f1 = c.submit(slowinc, 1)
await f1
async with get_task_metadata() as tasks2:
f2 = c.submit(slowinc, 2)
await f2
metadata1 = tasks1.metadata
metadata2 = tasks2.metadata
assert len(metadata1) == 2
assert sorted(metadata1.keys()) == sorted([f1.key, f2.key])
assert metadata1[f1.key] == s.tasks.get(f1.key).metadata
assert metadata1[f2.key] == s.tasks.get(f2.key).metadata
assert len(metadata2) == 1
assert list(metadata2.keys()) == [f2.key]
assert metadata2[f2.key] == s.tasks.get(f2.key).metadata
@gen_cluster(client=True)
async def test_register_worker_plugin_exception(c, s, a, b):
class MyPlugin:
def setup(self, worker=None):
raise ValueError("Setup failed")
with pytest.raises(ValueError, match="Setup failed"):
await c.register_worker_plugin(MyPlugin())
@gen_cluster(client=True)
async def test_log_event(c, s, a, b):
# Log an event from inside a task
def foo():
get_worker().log_event("topic1", {"foo": "bar"})
assert not await c.get_events("topic1")
await c.submit(foo)
events = await c.get_events("topic1")
assert len(events) == 1
assert events[0][1] == {"foo": "bar"}
# Log an event while on the scheduler
def log_scheduler(dask_scheduler):
dask_scheduler.log_event("topic2", {"woo": "hoo"})
await c.run_on_scheduler(log_scheduler)
events = await c.get_events("topic2")
assert len(events) == 1
assert events[0][1] == {"woo": "hoo"}
# Log an event from the client process
await c.log_event("topic2", ("alice", "bob"))
events = await c.get_events("topic2")
assert len(events) == 2
assert events[1][1] == ("alice", "bob")
@gen_cluster(client=True)
async def test_annotations_task_state(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(qux="bar", priority=100):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all(
{"qux": "bar", "priority": 100} == ts.annotations for ts in s.tasks.values()
)
@pytest.mark.parametrize("fn", ["compute", "persist"])
@gen_cluster(client=True)
async def test_annotations_compute_time(c, s, a, b, fn):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=(5,))
with dask.annotate(foo="bar"):
# Turn off optimization to avoid rewriting layers and picking up annotations
# that way. Instead, we want `compute`/`persist` to be able to pick them up.
fut = getattr(c, fn)(x, optimize_graph=False)
await wait(fut)
assert s.tasks
assert all(ts.annotations == {"foo": "bar"} for ts in s.tasks.values())
@pytest.mark.xfail(reason="https://github.com/dask/dask/issues/7036")
@gen_cluster(client=True)
async def test_annotations_survive_optimization(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(foo="bar"):
x = da.ones(10, chunks=(5,))
ann = x.__dask_graph__().layers[x.name].annotations
assert ann is not None
assert ann.get("foo", None) == "bar"
(xx,) = dask.optimize(x)
ann = xx.__dask_graph__().layers[x.name].annotations
assert ann is not None
assert ann.get("foo", None) == "bar"
@gen_cluster(client=True)
async def test_annotations_priorities(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(priority=15):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all("15" in str(ts.priority) for ts in s.tasks.values())
assert all(ts.priority[0] == -15 for ts in s.tasks.values())
assert all({"priority": 15} == ts.annotations for ts in s.tasks.values())
@gen_cluster(client=True)
async def test_annotations_workers(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(workers=[a.address]):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all({"workers": (a.address,)} == ts.annotations for ts in s.tasks.values())
assert all({a.address} == ts.worker_restrictions for ts in s.tasks.values())
assert a.data
assert not b.data
@gen_cluster(client=True)
async def test_annotations_retries(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(retries=2):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all(ts.retries == 2 for ts in s.tasks.values())
assert all(ts.annotations == {"retries": 2} for ts in s.tasks.values())
@gen_cluster(client=True)
async def test_annotations_blockwise_unpack(c, s, a, b):
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
from dask.array.utils import assert_eq
# A flaky doubling function -- need extra args because it is called before
# application to establish dtype/meta.
scale = varying([ZeroDivisionError("one"), ZeroDivisionError("two"), 2, 2])
def flaky_double(x):
return scale() * x
# A reliable double function.
def reliable_double(x):
return 2 * x
x = da.ones(10, chunks=(5,))
# The later annotations should not override the earlier annotations
with dask.annotate(retries=2):
y = x.map_blocks(flaky_double, meta=np.array((), dtype=float))
with dask.annotate(retries=0):
z = y.map_blocks(reliable_double, meta=np.array((), dtype=float))
with dask.config.set(optimization__fuse__active=False):
z = await c.compute(z)
assert_eq(z, np.ones(10) * 4.0)
@gen_cluster(
client=True,
nthreads=[
("127.0.0.1", 1),
("127.0.0.1", 1, {"resources": {"GPU": 1}}),
],
)
async def test_annotations_resources(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(resources={"GPU": 1}):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all([{"GPU": 1} == ts.resource_restrictions for ts in s.tasks.values()])
assert all([{"resources": {"GPU": 1}} == ts.annotations for ts in s.tasks.values()])
@gen_cluster(
client=True,
nthreads=[
("127.0.0.1", 1),
("127.0.0.1", 1, {"resources": {"GPU": 1}}),
],
)
async def test_annotations_resources_culled(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((2, 2, 2), chunks=1)
with dask.annotate(resources={"GPU": 1}):
y = x.map_blocks(lambda x0: x0, meta=x._meta)
z = y[0, 0, 0]
(z,) = c.compute([z], optimize_graph=False)
await z
# it worked!
@gen_cluster(client=True)
async def test_annotations_loose_restrictions(c, s, a, b):
da = pytest.importorskip("dask.array")
# Eventually fails if allow_other_workers=False
with dask.annotate(workers=["fake"], allow_other_workers=True):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all(not ts.worker_restrictions for ts in s.tasks.values())
assert all({"fake"} == ts.host_restrictions for ts in s.tasks.values())
assert all(
[
{"workers": ("fake",), "allow_other_workers": True} == ts.annotations
for ts in s.tasks.values()
]
)
@gen_cluster(client=True)
async def test_workers_collection_restriction(c, s, a, b):
da = pytest.importorskip("dask.array")
future = c.compute(da.arange(10), workers=a.address)
await future
assert a.data and not b.data
@gen_cluster(client=True, nthreads=[("127.0.0.1", 0)])
async def test_get_client_functions_spawn_clusters(c, s, a):
# see gh4565
scheduler_addr = c.scheduler.address
def f(x):
with LocalCluster(
n_workers=1,
processes=False,
dashboard_address=":0",
worker_dashboard_address=":0",
) as cluster2:
with Client(cluster2) as c1:
c2 = get_client()
c1_scheduler = c1.scheduler.address
c2_scheduler = c2.scheduler.address
assert c1_scheduler != c2_scheduler
assert c2_scheduler == scheduler_addr
await c.gather(c.map(f, range(2)))
await a.close()
c_default = default_client()
assert c is c_default
def test_computation_code_walk_frames():
test_function_code = inspect.getsource(test_computation_code_walk_frames)
code = Client._get_computation_code()
assert test_function_code == code
def nested_call():
return Client._get_computation_code()
assert nested_call() == inspect.getsource(nested_call)
with pytest.raises(TypeError, match="Ignored modules must be a list"):
with dask.config.set(
{"distributed.diagnostics.computations.ignore-modules": "test_client"}
):
code = Client._get_computation_code()
with dask.config.set(
{"distributed.diagnostics.computations.ignore-modules": ["test_client"]}
):
import sys
upper_frame_code = inspect.getsource(sys._getframe(1))
code = Client._get_computation_code()
assert code == upper_frame_code
assert nested_call() == upper_frame_code
def test_computation_object_code_dask_compute(client):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = x.sum().compute()
y = future
test_function_code = inspect.getsource(test_computation_object_code_dask_compute)
def fetch_comp_code(dask_scheduler):
computations = list(dask_scheduler.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
return comp.code[0]
code = client.run_on_scheduler(fetch_comp_code)
assert code == test_function_code
def test_computation_object_code_not_available(client):
np = pytest.importorskip("numpy")
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
df = pd.DataFrame({"a": range(10)})
ddf = dd.from_pandas(df, npartitions=3)
result = np.where(ddf.a > 4)
def fetch_comp_code(dask_scheduler):
computations = list(dask_scheduler.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
return comp.code[0]
code = client.run_on_scheduler(fetch_comp_code)
assert code == "<Code not available>"
@gen_cluster(client=True)
async def test_computation_object_code_dask_persist(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = x.sum().persist()
await future
test_function_code = inspect.getsource(
test_computation_object_code_dask_persist.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True)
async def test_computation_object_code_client_submit_simple(c, s, a, b):
def func(x):
return x
fut = c.submit(func, 1)
await fut
test_function_code = inspect.getsource(
test_computation_object_code_client_submit_simple.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True)
async def test_computation_object_code_client_submit_list_comp(c, s, a, b):
def func(x):
return x
futs = [c.submit(func, x) for x in range(10)]
await c.gather(futs)
test_function_code = inspect.getsource(
test_computation_object_code_client_submit_list_comp.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
# Code is deduplicated
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True)
async def test_computation_object_code_client_submit_dict_comp(c, s, a, b):
def func(x):
return x
futs = {x: c.submit(func, x) for x in range(10)}
await c.gather(futs)
test_function_code = inspect.getsource(
test_computation_object_code_client_submit_dict_comp.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
# Code is deduplicated
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True)
async def test_computation_object_code_client_map(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = c.compute(x.sum(), retries=2)
y = await future
test_function_code = inspect.getsource(
test_computation_object_code_client_map.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True)
async def test_computation_object_code_client_compute(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = c.compute(x.sum(), retries=2)
y = await future
test_function_code = inspect.getsource(
test_computation_object_code_client_compute.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True, Worker=Nanny)
async def test_upload_directory(c, s, a, b, tmp_path):
from dask.distributed import UploadDirectory
# Be sure to exclude code coverage reports
files_start = {f for f in os.listdir() if not f.startswith(".coverage")}
with open(tmp_path / "foo.py", "w") as f:
f.write("x = 123")
with open(tmp_path / "bar.py", "w") as f:
f.write("from foo import x")
plugin = UploadDirectory(tmp_path, restart=True, update_path=True)
await c.register_worker_plugin(plugin)
[name] = a.plugins
assert os.path.split(tmp_path)[-1] in name
def f():
import bar
return bar.x
results = await c.run(f)
assert results[a.worker_address] == 123
assert results[b.worker_address] == 123
async with Nanny(s.address, local_directory=tmp_path / "foo", name="foo") as n:
results = await c.run(f)
assert results[n.worker_address] == 123
files_end = {f for f in os.listdir() if not f.startswith(".coverage")}
assert files_start == files_end # no change
@gen_cluster(client=True)
async def test_exception_text(c, s, a, b):
def bad(x):
raise Exception(x)
future = c.submit(bad, 123)
await wait(future)
ts = s.tasks[future.key]
assert isinstance(ts.exception_text, str)
assert "123" in ts.exception_text
assert "Exception(x)" in ts.traceback_text
assert "bad" in ts.traceback_text
@gen_cluster(client=True)
async def test_async_task(c, s, a, b):
async def f(x):
return x + 1
future = c.submit(f, 10)
result = await future
assert result == 11
@gen_cluster(client=True)
async def test_async_task_with_partial(c, s, a, b):
async def f(x, y):
return x + y + 1
future = c.submit(functools.partial(f, 1), 10)
result = await future
assert result == 12
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_events_subscribe_topic(c, s, a):
log = []
def user_event_handler(event):
log.append(event)
c.subscribe_topic("test-topic", user_event_handler)
while not s.event_subscriber["test-topic"]:
await asyncio.sleep(0.01)
a.log_event("test-topic", {"important": "event"})
while len(log) != 1:
await asyncio.sleep(0.01)
time_, msg = log[0]
assert isinstance(time_, float)
assert msg == {"important": "event"}
c.unsubscribe_topic("test-topic")
while s.event_subscriber["test-topic"]:
await asyncio.sleep(0.01)
a.log_event("test-topic", {"forget": "me"})
while len(s.events["test-topic"]) == 1:
await asyncio.sleep(0.01)
assert len(log) == 1
async def async_user_event_handler(event):
log.append(event)
await asyncio.sleep(0)
c.subscribe_topic("test-topic", async_user_event_handler)
while not s.event_subscriber["test-topic"]:
await asyncio.sleep(0.01)
a.log_event("test-topic", {"async": "event"})
while len(log) == 1:
await asyncio.sleep(0.01)
assert len(log) == 2
time_, msg = log[1]
assert isinstance(time_, float)
assert msg == {"async": "event"}
# Even though the middle event was not subscribed to, the scheduler still
# knows about all and we can retrieve them
all_events = await c.get_events(topic="test-topic")
assert len(all_events) == 3
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_events_subscribe_topic_cancelled(c, s, a):
event_handler_started = asyncio.Event()
exc_info = None
async def user_event_handler(event):
nonlocal exc_info
c.unsubscribe_topic("test-topic")
event_handler_started.set()
with pytest.raises(asyncio.CancelledError) as exc_info:
await asyncio.sleep(0.5)
c.subscribe_topic("test-topic", user_event_handler)
while not s.event_subscriber["test-topic"]:
await asyncio.sleep(0.01)
a.log_event("test-topic", {})
await event_handler_started.wait()
await c._close(fast=True)
assert exc_info is not None
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_events_all_servers_use_same_channel(c, s, a):
"""Ensure that logs from all server types (scheduler, worker, nanny)
and the clients themselves arrive"""
log = []
def user_event_handler(event):
log.append(event)
c.subscribe_topic("test-topic", user_event_handler)
while not s.event_subscriber["test-topic"]:
await asyncio.sleep(0.01)
async with Nanny(s.address) as n:
a.log_event("test-topic", "worker")
n.log_event("test-topic", "nanny")
s.log_event("test-topic", "scheduler")
await c.log_event("test-topic", "client")
while not len(log) == 4 == len(set(log)):
await asyncio.sleep(0.1)
@gen_cluster(client=True, nthreads=[])
async def test_events_unsubscribe_raises_if_unknown(c, s):
with pytest.raises(ValueError, match="No event handler known for topic unknown"):
c.unsubscribe_topic("unknown")
@gen_cluster(client=True)
async def test_log_event_warn(c, s, a, b):
def foo():
get_worker().log_event(["foo", "warn"], "Hello!")
with pytest.warns(Warning, match="Hello!"):
await c.submit(foo)
@gen_cluster(client=True)
async def test_log_event_warn_dask_warns(c, s, a, b):
from dask.distributed import warn
def foo():
warn("Hello!")
with pytest.warns(Warning, match="Hello!"):
await c.submit(foo)
@gen_cluster(client=True, Worker=Nanny)
async def test_print(c, s, a, b, capsys):
from dask.distributed import print
def foo():
print("Hello!", 123, sep=":")
await c.submit(foo)
out, err = capsys.readouterr()
assert "Hello!:123" in out
@gen_cluster(client=True, Worker=Nanny)
async def test_print_non_msgpack_serializable(c, s, a, b, capsys):
from dask.distributed import print
def foo():
print(object())
await c.submit(foo)
out, err = capsys.readouterr()
assert "<object object at" in out
def test_print_simple(capsys):
from dask.distributed import print
print("Hello!", 123, sep=":")
out, err = capsys.readouterr()
assert "Hello!:123" in out
def _verify_cluster_dump(url, format: str, addresses: set[str]) -> dict:
fsspec = pytest.importorskip("fsspec") # for load_cluster_dump
url = str(url) + (".msgpack.gz" if format == "msgpack" else ".yaml")
state = load_cluster_dump(url)
assert isinstance(state, dict)
assert "scheduler" in state
assert "workers" in state
assert "versions" in state
assert state["workers"].keys() == addresses
return state
def test_dump_cluster_state_write_from_scheduler(
c, s, a, b, tmp_path, monkeypatch: pytest.MonkeyPatch
):
monkeypatch.chdir(tmp_path)
scheduler_dir = tmp_path / "scheduler"
scheduler_dir.mkdir()
c.run_on_scheduler(os.chdir, str(scheduler_dir))
c.dump_cluster_state("not-url")
assert (tmp_path / "not-url.msgpack.gz").is_file()
c.dump_cluster_state("file://is-url")
assert (scheduler_dir / "is-url.msgpack.gz").is_file()
c.dump_cluster_state("file://local-explicit", write_from_scheduler=False)
assert (tmp_path / "local-explicit.msgpack.gz").is_file()
c.dump_cluster_state("scheduler-explicit", write_from_scheduler=True)
assert (scheduler_dir / "scheduler-explicit.msgpack.gz").is_file()
@pytest.mark.parametrize("local", [True, False])
@pytest.mark.parametrize("_format", ["msgpack", "yaml"])
def test_dump_cluster_state_sync(c, s, a, b, tmp_path, _format, local):
filename = tmp_path / "foo"
if not local:
pytest.importorskip("fsspec")
# Make it look like an fsspec path
filename = f"file://{filename}"
c.dump_cluster_state(filename, format=_format)
_verify_cluster_dump(filename, _format, {a["address"], b["address"]})
@pytest.mark.parametrize("local", [True, False])
@pytest.mark.parametrize("_format", ["msgpack", "yaml"])
@gen_cluster(client=True)
async def test_dump_cluster_state_async(c, s, a, b, tmp_path, _format, local):
filename = tmp_path / "foo"
if not local:
pytest.importorskip("fsspec")
# Make it look like an fsspec path
filename = f"file://{filename}"
await c.dump_cluster_state(filename, format=_format)
_verify_cluster_dump(filename, _format, {a.address, b.address})
@pytest.mark.parametrize("local", [True, False])
@gen_cluster(client=True)
async def test_dump_cluster_state_json(c, s, a, b, tmp_path, local):
filename = tmp_path / "foo"
if not local:
pytest.importorskip("fsspec")
# Make it look like an fsspec path
filename = f"file://{filename}"
with pytest.raises(ValueError, match="Unsupported format"):
await c.dump_cluster_state(filename, format="json")
@gen_cluster(client=True)
async def test_dump_cluster_state_exclude_default(c, s, a, b, tmp_path):
futs = c.map(inc, range(10))
while len(s.tasks) != len(futs):
await asyncio.sleep(0.01)
excluded_by_default = [
"run_spec",
]
filename = tmp_path / "foo"
await c.dump_cluster_state(
filename=filename,
format="yaml",
)
with open(f"{filename}.yaml") as fd:
state = yaml.safe_load(fd)
assert "workers" in state
assert len(state["workers"]) == len(s.workers)
for worker, worker_dump in state["workers"].items():
for k, task_dump in worker_dump["tasks"].items():
assert not any(blocked in task_dump for blocked in excluded_by_default)
assert k in s.tasks
assert "scheduler" in state
assert "tasks" in state["scheduler"]
tasks = state["scheduler"]["tasks"]
assert len(tasks) == len(futs)
for k, task_dump in tasks.items():
assert not any(blocked in task_dump for blocked in excluded_by_default)
assert k in s.tasks
await c.dump_cluster_state(
filename=filename,
format="yaml",
exclude=(),
)
with open(f"{filename}.yaml") as fd:
state = yaml.safe_load(fd)
assert "workers" in state
assert len(state["workers"]) == len(s.workers)
for worker, worker_dump in state["workers"].items():
for k, task_dump in worker_dump["tasks"].items():
assert all(blocked in task_dump for blocked in excluded_by_default)
assert k in s.tasks
assert "scheduler" in state
assert "tasks" in state["scheduler"]
tasks = state["scheduler"]["tasks"]
assert len(tasks) == len(futs)
for k, task_dump in tasks.items():
assert all(blocked in task_dump for blocked in excluded_by_default)
assert k in s.tasks
class TestClientSecurityLoader:
@contextmanager
def config_loader(self, monkeypatch, loader):
module_name = "totally_fake_module_name_1"
module = types.ModuleType(module_name)
module.loader = loader
with monkeypatch.context() as m:
m.setitem(sys.modules, module_name, module)
with dask.config.set(
{"distributed.client.security-loader": f"{module_name}.loader"}
):
yield
@pytest.mark.asyncio
async def test_security_loader(self, monkeypatch):
security = tls_only_security()
async with Scheduler(
dashboard_address=":0", protocol="tls", security=security
) as scheduler:
def loader(info):
assert info == {"address": scheduler.address}
return security
with self.config_loader(monkeypatch, loader):
async with Client(scheduler.address, asynchronous=True) as client:
assert client.security is security
@pytest.mark.asyncio
async def test_security_loader_ignored_if_explicit_security_provided(
self, monkeypatch
):
security = tls_only_security()
def loader(info):
assert False
async with Scheduler(
dashboard_address=":0", protocol="tls", security=security
) as scheduler:
with self.config_loader(monkeypatch, loader):
async with Client(
scheduler.address, security=security, asynchronous=True
) as client:
assert client.security is security
@pytest.mark.asyncio
async def test_security_loader_ignored_if_returns_none(self, monkeypatch):
"""Test that if a security loader is configured, but it returns `None`,
then the default security configuration is used"""
ca_file = get_cert("tls-ca-cert.pem")
keycert = get_cert("tls-key-cert.pem")
config = {
"distributed.comm.require-encryption": True,
"distributed.comm.tls.ca-file": ca_file,
"distributed.comm.tls.client.cert": keycert,
"distributed.comm.tls.scheduler.cert": keycert,
"distributed.comm.tls.worker.cert": keycert,
}
def loader(info):
loader.called = True
return None
with dask.config.set(config):
async with Scheduler(dashboard_address=":0", protocol="tls") as scheduler:
# Smoketest to make sure config was picked up (so we're actually testing something)
assert scheduler.security.tls_client_cert
assert scheduler.security.tls_scheduler_cert
with self.config_loader(monkeypatch, loader):
async with Client(scheduler.address, asynchronous=True) as client:
assert (
client.security.tls_client_cert
== scheduler.security.tls_client_cert
)
assert loader.called
@pytest.mark.asyncio
async def test_security_loader_import_failed(self):
security = tls_only_security()
with dask.config.set(
{"distributed.client.security-loader": "totally_fake_module_name_2.loader"}
):
with pytest.raises(ImportError, match="totally_fake_module_name_2.loader"):
async with Client("tls://bad-address:8888", asynchronous=True):
pass
@gen_cluster(client=True, nthreads=[])
async def test_wait_for_workers_updates_info(c, s):
async with Worker(s.address):
await c.wait_for_workers(1)
assert c.scheduler_info()["workers"]
|
SQLiPy.py
|
"""
Name: SQLiPy
Version: 0.1
Date: 9/3/2015
Author: Josh Berry - josh.berry@codewatch.org
Github: https://github.com/codewatchorg/sqlipy
Description: This plugin leverages the SQLMap API to initiate SQLMap scans against the target.
This plugin requires the beta version of Jython as it uses the JSON module.
I used this blog post to quickly understand and leverage the SQLMap API (thrilled that someone figured this out for me):
http://volatile-minds.blogspot.com/2013/04/unofficial-sqlmap-restful-api.html
The following Burp plugins were reviewed to help develop this:
- Payload Parser: https://github.com/infodel
- Burp SAMl: https://github.com/Meatballs1/burp_saml
- ActiveScan++:
- WCF Binary SOAP Handler: http://blog.securityps.com/2013/02/burp-suite-plugin-view-and-modify-wcf.html
- WSDL Wizard: https://github.com/SmeegeSec/WSDLWizard/blob/master/WSDLWizard.py
- co2: https://code.google.com/p/burp-co2/
"""
from burp import IBurpExtender
from burp import IBurpExtenderCallbacks
from burp import IContextMenuFactory
from burp import IHttpRequestResponse
from burp import IMessageEditorController
from burp import IMessageEditorTabFactory
from burp import ITab
from burp import IMessageEditorTab
from burp import IScannerCheck
from burp import IScanIssue
from javax import swing
from javax.swing.filechooser import FileNameExtensionFilter
from java.awt import GridBagLayout
from java import awt
import subprocess
import re
import urllib2
import sys
import json
import threading
import time
class SqlMapScanIssue(IScanIssue):
def __init__(self, httpService, url, httpMessages, name, detail, confidence, severity):
self.HttpService = httpService
self.vulnurl = url
self.HttpMessages = httpMessages
self.vulnname = name
self.vulndetail = detail
self.vulnsev = severity
self.vulnconf = confidence
return
def getUrl(self):
return self.vulnurl
def getIssueName(self):
return self.vulnname
def getIssueType(self):
return 0
def getSeverity(self):
return self.vulnsev
def getConfidence(self):
return self.vulnconf
def getIssueBackground(self):
return None
def getRemediationBackground(self):
return None
def getIssueDetail(self):
return self.vulndetail
def getRemediationDetail(self):
return None
def getHttpMessages(self):
return self.HttpMessages
def getHttpService(self):
return self.HttpService
class ThreadExtender(IBurpExtender, IContextMenuFactory, ITab, IScannerCheck):
def __init__(self, burpobject, sqlmapip, sqlmapport, sqlmaptask, url, httpmessage, cbacks):
self.burpobject = burpobject
self.sqlmapip = sqlmapip
self.sqlmapport = sqlmapport
self.sqlmaptask = sqlmaptask
self.url = url
self.httpmessage = httpmessage
self.cbacks = cbacks
def checkResults(self):
time.sleep(30)
print 'Checking results on task: '+self.sqlmaptask+'\n'
while True:
try:
req = urllib2.Request('http://' + self.sqlmapip + ':' + self.sqlmapport + '/scan/' + self.sqlmaptask + '/status')
req.add_header('Content-Type', 'application/json')
resp = json.load(urllib2.urlopen(req))
if resp['status'] == "running":
print 'Scan for task '+self.sqlmaptask+' is still running.\n'
time.sleep(30)
elif resp['status'] == "terminated":
if resp['returncode'] == 0:
print 'Scan for task '+self.sqlmaptask+' completed. Gathering results.\n'
dbtype = ''
payloads = ''
banner = ''
cu = ''
cdb = ''
hostname = ''
isdba = ''
lusers = ''
lprivs = ''
lroles = ''
ldbs = ''
lpswds = ''
try:
req = urllib2.Request('http://' + self.sqlmapip + ':' + self.sqlmapport + '/scan/' + self.sqlmaptask + '/data')
req.add_header('Content-Type', 'application/json')
resp = json.load(urllib2.urlopen(req))
vulnerable = False
for findings in resp['data']:
vulnerable = True
# Get basic scan info
if findings['type'] == 0:
dbtype = findings['value'][0]['dbms']
for items in findings['value']:
firstpayload = True
for k in items['data']:
if firstpayload:
payloads = '<li>'+items['data'][k]['payload']+'</li>'
firstpayload = False
else:
payloads = payloads + '<li>'+items['data'][k]['payload']+'</li>'
if firstpayload == False:
payloads = '<ul>' + payloads + '</ul><BR>'
# Get banner info
if findings['type'] == 2:
banner = findings['value']+'<BR>'
# Get Current Users
elif findings['type'] == 3:
cu = 'Current User: '+findings['value']+'<BR>'
# Get Current Database
elif findings['type'] == 4:
cdb = 'Current Database: '+findings['value']+'<BR>'
# Get Hostname
elif findings['type'] == 5:
hostname = 'Hostname: '+findings['value']+'<BR>'
# Is the user a DBA?
elif findings['type'] == 6:
if findings['value'] == True:
isdba = 'Is a DBA: Yes'+'<BR>'
else:
isdba = 'Is a DBA: No'+'<BR>'
# Get list of users
elif findings['type'] == 7:
firstuser = True
for user in findings['value']:
if firstuser:
lusers = '<li>'+user+'</li>'
firstuser = False
else:
lusers = lusers + '<li>'+user+'</li>'
if firstuser == False:
lusers = 'Users:<ul>' + lusers + '</ul><BR>'
# Get list of passwords
elif findings['type'] == 8:
userdata = ''
userpswds = ''
firstuser = True
for users in findings['value']:
firstpswd = True
if firstuser:
firstuser = False
userdata = '<li>'+users+'</li>'
else:
userdata = userdata + '<li>'+users+'</li>'
for pswd in findings['value'][users]:
if firstpswd:
firstswd = False
userpswds = '<li>'+pswd+'</li>'
else:
userpswds = userpswds + '<li>'+pswd+'</li>'
lpswds = lpswds + userdata + '<ul>'+userpswds+'</ul>'
userdata = ''
userpswds = ''
if firstuser == False:
lpswds = 'Password Hashes per User:<ul>'+lpswds+'</ul><BR>'
# Get list of privileges
elif findings['type'] == 9:
userdata = ''
userprivs = ''
firstuser = True
for users in findings['value']:
firstpriv = True
if firstuser:
firstuser = False
userdata = '<li>'+users+'</li>'
else:
userdata = userdata + '<li>'+users+'</li>'
for priv in findings['value'][users]:
if firstpriv:
firstpriv = False
userprivs = '<li>'+priv+'</li>'
else:
userprivs = userprivs + '<li>'+priv+'</li>'
lprivs = lprivs + userdata + '<ul>'+userprivs+'</ul>'
userdata = ''
userprivs = ''
if firstuser == False:
lprivs = 'Privileges per User:<ul>'+lprivs+'</ul><BR>'
# Get list of roles
elif findings['type'] == 10:
userdata = ''
userroles = ''
firstuser = True
for users in findings['value']:
firstrole = True
if firstuser:
firstuser = False
userdata = '<li>'+users+'</li>'
else:
userdata = userdata + '<li>'+users+'</li>'
for role in findings['value'][users]:
if firstrole:
firstrole = False
userroles = '<li>'+role+'</li>'
else:
userroles = userroles + '<li>'+role+'</li>'
lroles = lroles + userdata + '<ul>'+userroles+'</ul>'
userdata = ''
userroles = ''
if firstuser == False:
lroles = 'Roles per User:<ul>'+lroles+'</ul><BR>'
# Get list of DBs
elif findings['type'] == 11:
firstdb = True
for db in findings['value']:
if firstdb:
ldbs = '<li>'+db+'</li>'
firstdb = False
else:
ldbs = ldbs + '<li>'+db+'</li>'
if firstdb == False:
ldbs = 'Databases:<ul>' + ldbs + '</ul><BR>'
if vulnerable:
scanIssue = SqlMapScanIssue(self.httpmessage.getHttpService(), self.url, [self.httpmessage], 'SQLMap Scan Finding',
'The application has been found to be vulnerable to SQL injection by SQLMap. The following payloads successfully identified SQL injection vulnerabilities:<p>'+payloads+'</p><p>Enumerated Data:</p><BR><p>'+dbtype+': '+banner+'</p><p>'+cu+'</p><p>'+cdb+'</p><p>'+hostname+'</p><p>'+isdba+'</p><p>'+lusers+'</p><p>'+lpswds+'</p><p>'+lprivs+'</p><p>'+lroles+'</p><p>'+ldbs+'</p>', 'Certain', 'High')
self.cbacks.addScanIssue(scanIssue)
print 'SQLi vulnerabilities were found for task '+self.sqlmaptask+' and have been reported.\n'
else:
print 'Scan completed for task '+self.sqlmaptask+' but SQLi vulnerabilities were not found.\n'
break
except:
print 'No results for SQLMap task: '+self.sqlmaptask+'\n'
break
else:
print 'SQLMap scan failed for task: '+self.sqlmaptask+'\n'
break
else:
print 'SQLMap scan failed for task: '+self.sqlmaptask+'\n'
break
except:
print 'Thread failed to get results for SQLMap task: ' + self.sqlmaptask+'\n'
break
class BurpExtender(IBurpExtender, IContextMenuFactory, ITab):
pythonfile = ''
apifile = ''
tamperfile = ''
threads = []
scanMessage = ''
# Implement IBurpExtender
def registerExtenderCallbacks(self, callbacks):
# Print information about the plugin, set extension name, setup basic stuff
self.printHeader()
callbacks.setExtensionName("SQLiPy")
self._callbacks = callbacks
self._helpers = callbacks.getHelpers()
callbacks.registerContextMenuFactory(self)
# Create SQLMap API configuration JPanel
self._jPanel = swing.JPanel()
self._jPanel.setLayout(awt.GridBagLayout())
self._jPanelConstraints = awt.GridBagConstraints()
# Create panel for IP info
self._jLabelIPListen = swing.JLabel("Listen on IP:")
self._jPanelConstraints.fill = awt.GridBagConstraints.HORIZONTAL
self._jPanelConstraints.gridx = 0
self._jPanelConstraints.gridy = 0
self._jPanel.add(self._jLabelIPListen, self._jPanelConstraints)
self._jTextFieldIPListen = swing.JTextField("",15)
self._jPanelConstraints.fill = awt.GridBagConstraints.HORIZONTAL
self._jPanelConstraints.gridx = 1
self._jPanelConstraints.gridy = 0
self._jPanel.add(self._jTextFieldIPListen, self._jPanelConstraints)
# Create panel for Port info
self._jLabelPortListen = swing.JLabel("Listen on Port:")
self._jPanelConstraints.fill = awt.GridBagConstraints.HORIZONTAL
self._jPanelConstraints.gridx = 0
self._jPanelConstraints.gridy = 1
self._jPanel.add(self._jLabelPortListen, self._jPanelConstraints)
self._jTextFieldPortListen = swing.JTextField("",3)
self._jPanelConstraints.fill = awt.GridBagConstraints.HORIZONTAL
self._jPanelConstraints.gridx = 1
self._jPanelConstraints.gridy = 1
self._jPanel.add(self._jTextFieldPortListen, self._jPanelConstraints)
# Create panel to contain Python button
self._jLabelPython = swing.JLabel("Select Python:")
self._jPanelConstraints.fill = awt.GridBagConstraints.HORIZONTAL
self._jPanelConstraints.gridx = 0
self._jPanelConstraints.gridy = 2
self._jPanel.add(self._jLabelPython, self._jPanelConstraints)
self._jButtonSetPython = swing.JButton('Python', actionPerformed=self.setPython)
self._jPanelConstraints.fill = awt.GridBagConstraints.HORIZONTAL
self._jPanelConstraints.gridx = 1
self._jPanelConstraints.gridy = 2
self._jPanel.add(self._jButtonSetPython, self._jPanelConstraints)
# Create panel to contain API button
self._jLabelAPI = swing.JLabel("Select API:")
self._jPanelConstraints.fill = awt.GridBagConstraints.HORIZONTAL
self._jPanelConstraints.gridx = 0
self._jPanelConstraints.gridy = 3
self._jPanel.add(self._jLabelAPI, self._jPanelConstraints)
self._jButtonSetAPI = swing.JButton('SQLMap API', actionPerformed=self.setAPI)
self._jPanelConstraints.fill = awt.GridBagConstraints.HORIZONTAL
self._jPanelConstraints.gridx = 1
self._jPanelConstraints.gridy = 3
self._jPanel.add(self._jButtonSetAPI, self._jPanelConstraints)
# Create panel to execute API
self._jButtonStartAPI = swing.JButton('Start API', actionPerformed=self.startAPI)
self._jPanelConstraints.fill = awt.GridBagConstraints.HORIZONTAL
self._jPanelConstraints.gridx = 0
self._jPanelConstraints.gridy = 4
self._jPanelConstraints.gridwidth = 2
self._jPanel.add(self._jButtonStartAPI, self._jPanelConstraints)
# Create SQLMap scanner panel
# Combobox Values
levelValues = [1,2,3,4,5]
riskValues = [0,1,2,3]
threadValues = [1,2,3,4,5,6,7,8,9,10]
delayValues = [0,1,2,3,4,5]
timeoutValues = [1,5,10,15,20,25,30,35,40,45,50,55,60]
retryValues = [1,2,3,4,5,6,7,8,9,10]
dbmsValues = ['Any', 'MySQL', 'Oracle', 'PostgreSQL', 'Microsoft SQL Server', 'Microsoft Access', 'SQLite', 'Firebird', 'Sybase', 'SAP MaxDB', 'DB2']
osValues = ['Any', 'Linux', 'Windows']
# GUI components
self._jLabelScanText = swing.JLabel()
self._jLabelScanIPListen = swing.JLabel()
self._jLabelScanPortListen = swing.JLabel()
self._jTextFieldScanIPListen = swing.JTextField()
self._jTextFieldScanPortListen = swing.JTextField()
self._jSeparator1 = swing.JSeparator()
self._jLabelURL = swing.JLabel()
self._jTextFieldURL = swing.JTextField()
self._jLabelData = swing.JLabel()
self._jTextData = swing.JTextArea()
self._jScrollPaneData = swing.JScrollPane(self._jTextData)
self._jLabelCookie = swing.JLabel()
self._jTextFieldCookie = swing.JTextField()
self._jLabelReferer = swing.JLabel()
self._jTextFieldReferer = swing.JTextField()
self._jLabelUA = swing.JLabel()
self._jTextFieldUA = swing.JTextField()
self._jSeparator2 = swing.JSeparator()
self._jLabelParam = swing.JLabel()
self._jTextFieldParam = swing.JTextField()
self._jCheckTO = swing.JCheckBox()
self._jSeparator3 = swing.JSeparator()
self._jComboLevel = swing.JComboBox(levelValues)
self._jLabelLevel = swing.JLabel()
self._jLabelRisk = swing.JLabel()
self._jComboRisk = swing.JComboBox(riskValues)
self._jSeparator4 = swing.JSeparator()
self._jCheckHPP = swing.JCheckBox('Param Pollution')
self._jCheckCU = swing.JCheckBox('Current User')
self._jCheckDB = swing.JCheckBox('Current DB')
self._jCheckHost = swing.JCheckBox('Hostname')
self._jCheckDBA = swing.JCheckBox('Is DBA?')
self._jCheckUsers = swing.JCheckBox('List Users')
self._jCheckPrivs = swing.JCheckBox('List Privs')
self._jCheckPswds = swing.JCheckBox('List Passwords')
self._jCheckRoles = swing.JCheckBox('List Roles')
self._jCheckDBs = swing.JCheckBox('List DBs')
self._jSeparator5 = swing.JSeparator()
self._jLabelThreads = swing.JLabel()
self._jLabelDelay = swing.JLabel()
self._jLabelTimeout = swing.JLabel()
self._jLabelRetry = swing.JLabel()
self._jComboThreads = swing.JComboBox(threadValues)
self._jComboDelay = swing.JComboBox(delayValues)
self._jComboTimeout = swing.JComboBox(timeoutValues)
self._jComboRetry = swing.JComboBox(retryValues)
self._jSeparator6 = swing.JSeparator()
self._jLabelDBMS = swing.JLabel()
self._jComboDBMS = swing.JComboBox(dbmsValues)
self._jLabelOS = swing.JLabel()
self._jComboOS = swing.JComboBox(osValues)
self._jSeparator7 = swing.JSeparator()
self._jLabelProxy = swing.JLabel()
self._jTextFieldProxy = swing.JTextField()
self._jSeparator8 = swing.JSeparator()
self._jLabelTamper = swing.JLabel()
self._jButtonSetTamper = swing.JButton('Tamper', actionPerformed=self.setTamper)
self._jButtonStartScan = swing.JButton('Start Scan', actionPerformed=self.startScan)
self._jLabelScanAPI = swing.JLabel()
# Configure GUI
self._jLabelScanText.setText('API Listening On:')
self._jLabelScanIPListen.setText('SQLMap API IP:')
self._jLabelScanPortListen.setText('SQLMap API Port:')
self._jLabelURL.setText('URL:')
self._jLabelData.setText('Post Data:')
self._jTextData.setColumns(20)
self._jTextData.setRows(5)
self._jTextData.setLineWrap(True)
self._jScrollPaneData.setVerticalScrollBarPolicy(swing.JScrollPane.VERTICAL_SCROLLBAR_ALWAYS)
self._jLabelCookie.setText('Cookies:')
self._jLabelReferer.setText('Referer:')
self._jLabelUA.setText('User-Agent:')
self._jLabelParam.setText('Test Parameter(s):')
self._jCheckTO.setText('Text Only')
self._jLabelLevel.setText('Level:')
self._jLabelRisk.setText('Risk:')
self._jComboLevel.setSelectedIndex(2)
self._jComboRisk.setSelectedIndex(1)
self._jComboThreads.setSelectedIndex(0)
self._jComboDelay.setSelectedIndex(0)
self._jComboTimeout.setSelectedIndex(6)
self._jComboRetry.setSelectedIndex(2)
self._jComboDBMS.setSelectedIndex(0)
self._jComboOS.setSelectedIndex(0)
self._jLabelThreads.setText('Threads:')
self._jLabelDelay.setText('Delay:')
self._jLabelTimeout.setText('Timeout:')
self._jLabelRetry.setText('Retries')
self._jLabelDBMS.setText('DBMS Backend:')
self._jLabelOS.setText('Operating System:')
self._jLabelProxy.setText('Proxy (HTTP://IP:Port):')
self._jLabelTamper.setText('Tamper Script:')
# Configure locations
self._jLabelScanText.setBounds(15, 16, 126, 20)
self._jLabelScanIPListen.setBounds(15, 58, 115, 20)
self._jLabelScanPortListen.setBounds(402, 55, 129, 20)
self._jTextFieldScanIPListen.setBounds(167, 52, 206, 26)
self._jTextFieldScanPortListen.setBounds(546, 52, 63, 26)
self._jSeparator1.setBounds(15, 96, 790, 10)
self._jLabelURL.setBounds(15, 117, 35, 20)
self._jTextFieldURL.setBounds(166, 114, 535, 26)
self._jLabelData.setBounds(15, 156, 73, 20)
self._jTextData.setColumns(20)
self._jTextData.setRows(5)
self._jScrollPaneData.setBounds(166, 156, 535, 96)
self._jLabelCookie.setBounds(15, 271, 61, 20)
self._jTextFieldCookie.setBounds(166, 271, 535, 26)
self._jLabelReferer.setBounds(15, 320, 57, 20)
self._jTextFieldReferer.setBounds(166, 320, 535, 26)
self._jLabelUA.setBounds(15, 374, 86, 20)
self._jTextFieldUA.setBounds(166, 371, 535, 26)
self._jSeparator2.setBounds(15, 459, 790, 10)
self._jLabelParam.setBounds(15, 483, 132, 20)
self._jTextFieldParam.setBounds(165, 480, 366, 26)
self._jCheckTO.setBounds(584, 479, 101, 29)
self._jSeparator3.setBounds(15, 526, 790, 10)
self._jComboLevel.setBounds(165, 544, 180, 26)
self._jLabelLevel.setBounds(15, 547, 42, 20)
self._jLabelRisk.setBounds(430, 547, 35, 20)
self._jComboRisk.setBounds(518, 544, 180, 26)
self._jSeparator4.setBounds(15, 588, 790, 10)
self._jCheckHPP.setBounds(15, 608, 145, 29)
self._jCheckCU.setBounds(191, 608, 123, 29)
self._jCheckDB.setBounds(340, 608, 111, 29)
self._jCheckHost.setBounds(469, 608, 103, 29)
self._jCheckDBA.setBounds(599, 608, 105, 29)
self._jCheckUsers.setBounds(15, 655, 101, 29)
self._jCheckPswds.setBounds(191, 655, 135, 29)
self._jCheckPrivs.setBounds(344, 655, 95, 29)
self._jCheckRoles.setBounds(469, 655, 99, 29)
self._jCheckDBs.setBounds(599, 655, 89, 29)
self._jSeparator5.setBounds(15, 696, 790, 10)
self._jLabelThreads.setBounds(15, 719, 63, 20)
self._jLabelDelay.setBounds(193, 719, 45, 20)
self._jLabelTimeout.setBounds(346, 719, 65, 20)
self._jLabelRetry.setBounds(522, 719, 48, 20)
self._jComboThreads.setBounds(100, 716, 78, 26)
self._jComboDelay.setBounds(253, 716, 78, 26)
self._jComboTimeout.setBounds(429, 716, 78, 26)
self._jComboRetry.setBounds(585, 716, 78, 26)
self._jSeparator6.setBounds(15, 758, 790, 10)
self._jLabelDBMS.setBounds(15, 781, 110, 20)
self._jComboDBMS.setBounds(143, 778, 191, 26)
self._jLabelOS.setBounds(352, 781, 132, 20)
self._jComboOS.setBounds(502, 778, 191, 26)
self._jSeparator7.setBounds(15, 820, 790, 10)
self._jLabelProxy.setBounds(15, 844, 171, 20)
self._jTextFieldProxy.setBounds(204, 841, 256, 26)
self._jSeparator8.setBounds(15, 887, 790, 10)
self._jLabelTamper.setBounds(482, 844, 106, 20)
self._jButtonSetTamper.setBounds(606, 840, 87, 29)
self._jButtonStartScan.setBounds(346, 905, 103, 29)
self._jLabelScanAPI.setBounds(167, 16, 200, 20)
# Create main panel
self._jScanPanel = swing.JPanel()
self._jScanPanel.setLayout(None)
self._jScanPanel.setPreferredSize(awt.Dimension(1000,1000))
self._jScanPanel.add(self._jLabelScanText)
self._jScanPanel.add(self._jLabelScanIPListen)
self._jScanPanel.add(self._jLabelScanPortListen)
self._jScanPanel.add(self._jTextFieldScanIPListen)
self._jScanPanel.add(self._jTextFieldScanPortListen)
self._jScanPanel.add(self._jSeparator1)
self._jScanPanel.add(self._jLabelURL)
self._jScanPanel.add(self._jTextFieldURL)
self._jScanPanel.add(self._jLabelData)
self._jScanPanel.add(self._jScrollPaneData)
self._jScanPanel.add(self._jLabelCookie)
self._jScanPanel.add(self._jTextFieldCookie)
self._jScanPanel.add(self._jLabelReferer)
self._jScanPanel.add(self._jTextFieldReferer)
self._jScanPanel.add(self._jLabelUA)
self._jScanPanel.add(self._jTextFieldUA)
self._jScanPanel.add(self._jSeparator2)
self._jScanPanel.add(self._jLabelParam)
self._jScanPanel.add(self._jTextFieldParam)
self._jScanPanel.add(self._jCheckTO)
self._jScanPanel.add(self._jSeparator3)
self._jScanPanel.add(self._jComboLevel)
self._jScanPanel.add(self._jLabelLevel)
self._jScanPanel.add(self._jLabelRisk)
self._jScanPanel.add(self._jComboRisk)
self._jScanPanel.add(self._jSeparator4)
self._jScanPanel.add(self._jCheckHPP)
self._jScanPanel.add(self._jCheckCU)
self._jScanPanel.add(self._jCheckDB)
self._jScanPanel.add(self._jCheckHost)
self._jScanPanel.add(self._jCheckDBA)
self._jScanPanel.add(self._jCheckUsers)
self._jScanPanel.add(self._jCheckPswds)
self._jScanPanel.add(self._jCheckPrivs)
self._jScanPanel.add(self._jCheckRoles)
self._jScanPanel.add(self._jCheckDBs)
self._jScanPanel.add(self._jSeparator5)
self._jScanPanel.add(self._jLabelThreads)
self._jScanPanel.add(self._jLabelDelay)
self._jScanPanel.add(self._jLabelTimeout)
self._jScanPanel.add(self._jLabelRetry)
self._jScanPanel.add(self._jComboThreads)
self._jScanPanel.add(self._jComboDelay)
self._jScanPanel.add(self._jComboTimeout)
self._jScanPanel.add(self._jComboRetry)
self._jScanPanel.add(self._jSeparator6)
self._jScanPanel.add(self._jLabelDBMS)
self._jScanPanel.add(self._jComboDBMS)
self._jScanPanel.add(self._jLabelOS)
self._jScanPanel.add(self._jComboOS)
self._jScanPanel.add(self._jSeparator7)
self._jScanPanel.add(self._jLabelProxy)
self._jScanPanel.add(self._jTextFieldProxy)
self._jScanPanel.add(self._jSeparator8)
self._jScanPanel.add(self._jLabelTamper)
self._jScanPanel.add(self._jButtonSetTamper)
self._jScanPanel.add(self._jButtonStartScan)
self._jScanPanel.add(self._jLabelScanAPI)
self._jScrollPaneMain = swing.JScrollPane(self._jScanPanel)
self._jScrollPaneMain.setViewportView(self._jScanPanel)
self._jScrollPaneMain.setPreferredSize(awt.Dimension(999,999))
# Setup Tabs
self._jConfigTab = swing.JTabbedPane()
self._jConfigTab.addTab("SQLMap API", self._jPanel)
self._jConfigTab.addTab("SQLMap Scanner", self._jScrollPaneMain)
callbacks.customizeUiComponent(self._jConfigTab)
callbacks.addSuiteTab(self)
return
# Create a menu item if the appropriate section of the UI is selected
def createMenuItems(self, invocation):
menu = []
# Which part of the interface the user selects
ctx = invocation.getInvocationContext()
# Message Viewer Req will show menu item if selected by the user
if ctx == 0 or ctx == 2:
menu.append(swing.JMenuItem("SQLiPy Scan", None, actionPerformed=lambda x, inv=invocation: self.sqlMapScan(inv)))
return menu if menu else None
def getTabCaption(self):
return 'SQLiPy'
def getUiComponent(self):
return self._jConfigTab
def sqlMapScan(self, invocation):
# Check initial message for proper request/response and set variables - Burp will not return valid info otherwise
try:
invMessage = invocation.getSelectedMessages()
message = invMessage[0]
reqInfo = self._helpers.analyzeRequest(message)
reqUrl = str(reqInfo.getUrl())
reqBody = message.getRequest()
bodyData = self._helpers.bytesToString(reqBody[reqInfo.getBodyOffset():])
reqHeaders = newHeaders = list(reqInfo.getHeaders())
referer = ''
ua = ''
cookie = ''
for header in reqHeaders:
if re.search('^Referer', header, re.IGNORECASE) is not None:
referer = re.sub('^Referer\:\s+', '', header, re.IGNORECASE)
elif re.search('^User-Agent', header, re.IGNORECASE) is not None:
ua = re.sub('^User-Agent\:\s+', '', header, re.IGNORECASE)
elif re.search('^Cookie', header, re.IGNORECASE) is not None:
cookie = re.sub('^Cookie\:\s+', '', header, re.IGNORECASE)
self._jTextFieldURL.setText(reqUrl)
self._jTextData.setText(bodyData)
self._jTextFieldCookie.setText(cookie)
self._jTextFieldUA.setText(ua)
self._jTextFieldReferer.setText(referer)
self._jConfigTab.setSelectedComponent(self._jScrollPaneMain)
self.scanMessage = message
self.scanUrl = reqInfo.getUrl()
parentTab = self._jConfigTab.getParent()
parentTab.setSelectedComponent(self._jConfigTab)
except:
print 'Failed to add data to scan tab.'
def printHeader(self):
print 'SQLiPy\nBurp interface to SQLMap via the SQLMap API\njosh.berry@codewatch.org\n\n'
def setAPI(self, e):
selectFile = swing.JFileChooser()
filter = swing.filechooser.FileNameExtensionFilter("python files", ["py"])
selectFile.addChoosableFileFilter(filter)
returnedFile = selectFile.showDialog(self._jPanel, "SQLMap API")
if returnedFile == swing.JFileChooser.APPROVE_OPTION:
file = selectFile.getSelectedFile()
self.apifile = file.getPath()
print 'Selected API at ' + file.getPath()
self._jLabelAPI.setText('API set to: ' + file.getPath())
def setPython(self, e):
selectFile = swing.JFileChooser()
returnedFile = selectFile.showDialog(self._jPanel, "Python EXE")
if returnedFile == swing.JFileChooser.APPROVE_OPTION:
file = selectFile.getSelectedFile()
self.pythonfile = file.getPath()
print 'Selected Python at ' + file.getPath()
self._jLabelPython.setText('Python set to: ' + file.getPath())
def setTamper(self, e):
selectFile = swing.JFileChooser()
filter = swing.filechooser.FileNameExtensionFilter("python files", ["py"])
selectFile.addChoosableFileFilter(filter)
returnedFile = selectFile.showDialog(self._jPanel, "Tamper")
if returnedFile == swing.JFileChooser.APPROVE_OPTION:
file = selectFile.getSelectedFile()
self.tamperfile = file.getPath()
self._jLabelTamper.setText('Tamper Script: ' + file.getPath())
def startAPI(self, button):
try:
print 'Calling: ' + self.pythonfile + ' ' + self.apifile + ' -s -H ' + self._jTextFieldIPListen.getText() + ' -p ' + self._jTextFieldPortListen.getText() + '\n'
sqlmapdir = ''
if re.search('^[a-zA-Z]\:', self.apifile) is not None:
sqlmapdir = self.apifile.rsplit('\\', 1)[0]
else:
sqlmapdir = self.apifile.rsplit('/', 1)[0]
self.sqlmapapi = subprocess.Popen(self.pythonfile + ' ' + self.apifile + ' -s -H ' + self._jTextFieldIPListen.getText() + ' -p ' + self._jTextFieldPortListen.getText(), cwd=sqlmapdir, stdout=subprocess.PIPE)
self._jLabelScanAPI.setText('API Listening on: ' + self._jTextFieldIPListen.getText() + ':' + self._jTextFieldPortListen.getText())
self._jTextFieldScanIPListen.setText(self._jTextFieldIPListen.getText())
self._jTextFieldScanPortListen.setText(self._jTextFieldPortListen.getText())
for x in range(0, 4):
print self.sqlmapapi.stdout.readline().rstrip()
print '\n'
except:
print 'Failed to start the SQLMap API\n'
def startScan(self, button):
hpp = ''
cu = ''
cdb = ''
hostname = ''
isdba = ''
lusers = ''
lpswds = ''
lprivs = ''
lroles = ''
ldbs = ''
textonly = ''
postdata = None
datacmd = ''
cookiedata = None
cookiecmd = ''
uadata = None
uacmd = ''
headerdata = None
headercmd = ''
refererdata = None
referercmd = ''
proxy = None
proxycmd = ''
dbms = None
dbmscmd = ''
os = None
oscmd = ''
tampercmd = ''
tamperdata = None
paramcmd = ''
paramdata = None
if self._jCheckTO.isSelected():
textonly = ' --text-only'
textonlystatus = True
else:
textonlystatus = False
if self._jCheckHPP.isSelected():
hpp = ' --hpp'
hppstatus = True
else:
hppstatus = False
if self._jCheckCU.isSelected():
cu = ' --current-user'
custatus = True
else:
custatus = False
if self._jCheckDB.isSelected():
cdb = ' --current-db'
cdbstatus = True
else:
cdbstatus = False
if self._jCheckHost.isSelected():
hostname = ' --hostname'
hostnamestatus = True
else:
hostnamestatus = False
if self._jCheckDBA.isSelected():
isdba = ' --is-dba'
isdbastatus = True
else:
isdbastatus = False
if self._jCheckUsers.isSelected():
lusers = ' --users'
lusersstatus = True
else:
lusersstatus = False
if self._jCheckPswds.isSelected():
lpswds = ' --passwords'
lpswdsstatus = True
else:
lpswdsstatus = False
if self._jCheckPrivs.isSelected():
lprivs = ' --privileges'
lprivsstatus = True
else:
lprivsstatus = False
if self._jCheckRoles.isSelected():
lroles = ' --roles'
lrolesstatus = True
else:
lrolesstatus = False
if self._jCheckDBs.isSelected():
ldbs = ' --dbs'
ldbsstatus = True
else:
ldbsstatus = False
if re.search('(http|https)\://', self._jTextFieldProxy.getText()) is not None:
proxy = self._jTextFieldProxy.getText()
proxycmd = ' --proxy=' + self._jTextFieldProxy.getText()
if not re.search('^Any$', self._jComboDBMS.getSelectedItem()) is not None:
dbms = self._jComboDBMS.getSelectedItem()
dbmscmd = ' --dbms=' + self._jComboDBMS.getSelectedItem()
if not re.search('^Any$', self._jComboOS.getSelectedItem()) is not None:
os = self._jComboOS.getSelectedItem()
oscmd = ' --os=' + self._jComboOS.getSelectedItem()
if re.search('[a-zA-Z0-9]', self.tamperfile) is not None:
tampercmd = ' --tamper=' + self.tamperfile
tamperdata = self.tamperfile
if re.search('[a-zA-Z0-9]', self._jTextData.getText()) is not None:
postdata = self._jTextData.getText()
datacmd = ' --data=\'' + self._jTextData.getText() + '\''
if re.search('[a-zA-Z0-9]', self._jTextFieldCookie.getText()) is not None:
cookiedata = self._jTextFieldCookie.getText()
cookiecmd = ' --cookie=\'' + self._jTextFieldCookie.getText() + '\''
if re.search('[a-zA-Z0-9]', self._jTextFieldUA.getText()) is not None:
uadata = self._jTextFieldUA.getText()
uacmd = ' --user-agent=\'' + self._jTextFieldUA.getText() + '\''
if re.search('[a-zA-Z0-9]', self._jTextFieldReferer.getText()) is not None:
refererdata = self._jTextFieldReferer.getText()
referercmd = ' --referer=\'' + self._jTextFieldReferer.getText() + '\''
if re.search('[a-zA-Z0-9]', self._jTextFieldParam.getText()) is not None:
paramdata = self._jTextFieldParam.getText()
paramcmd = ' -p \'' + self._jTextFieldParam.getText() + '\''
try:
print 'SQLMap Command: -u \'' + self._jTextFieldURL.getText() + '\'' + datacmd + cookiecmd + uacmd + referercmd + proxycmd + ' --delay=' + str(self._jComboDelay.getSelectedItem()) + ' --timeout=' + str(self._jComboTimeout.getSelectedItem()) + ' --retries=' + str(self._jComboDelay.getSelectedItem()) + paramcmd + dbmscmd + oscmd + tampercmd + ' --level=' + str(self._jComboLevel.getSelectedItem()) + ' --risk=' + str(self._jComboRisk.getSelectedItem()) + textonly + hpp + ' --threads=' + str(self._jComboThreads.getSelectedItem()) + ' -b' + cu + cdb + hostname + isdba + lusers + lpswds + lprivs + lroles + ldbs + ' --batch --answers="crack=N,dict=N"\n'
req = urllib2.Request('http://' + self._jTextFieldScanIPListen.getText() + ':' + self._jTextFieldScanPortListen.getText() + '/task/new')
resp = json.load(urllib2.urlopen(req))
if resp['success'] == True:
sqlitask = resp['taskid']
sqliopts = {'getUsers': lusersstatus, 'getPasswordHashes': lpswdsstatus, 'delay': self._jComboDelay.getSelectedItem(), 'isDba': isdbastatus, 'risk': self._jComboRisk.getSelectedItem(), 'getCurrentUser': custatus, 'getRoles': lrolesstatus, 'getPrivileges': lprivsstatus, 'testParameter': paramdata, 'timeout': self._jComboTimeout.getSelectedItem(), 'level': self._jComboLevel.getSelectedItem(), 'getCurrentDb': cdbstatus, 'answers': 'crack=N,dict=N', 'cookie': cookiedata, 'proxy': proxy, 'os': os, 'threads': self._jComboThreads.getSelectedItem(), 'url': self._jTextFieldURL.getText(), 'getDbs': ldbsstatus, 'referer': refererdata, 'retries': self._jComboRetry.getSelectedItem(), 'getHostname': hostnamestatus, 'agent': uadata, 'dbms': dbms, 'tamper': tamperdata, 'hpp': hppstatus, 'getBanner': 'true', 'data': postdata, 'textOnly': textonlystatus}
print 'Created SQLMap Task: ' + sqlitask + '\n'
try:
req = urllib2.Request('http://' + self._jTextFieldScanIPListen.getText() + ':' + self._jTextFieldScanPortListen.getText() + '/option/' + sqlitask + '/set')
req.add_header('Content-Type', 'application/json')
resp = json.load(urllib2.urlopen(req, json.dumps(sqliopts)))
if resp['success'] == True:
print 'SQLMap options set on Task ' + sqlitask + ': ' + json.dumps(sqliopts) + '\n'
sqliopts = {'url': self._jTextFieldURL.getText()}
try:
checkreq = urllib2.Request('http://' + self._jTextFieldScanIPListen.getText() + ':' + self._jTextFieldScanPortListen.getText() + '/option/' + sqlitask + '/list')
checkresp = json.load(urllib2.urlopen(checkreq))
print 'SQLMap options returned: ' + json.dumps(checkresp) + '\n'
except:
print 'Failed to get list of options from SQLMap API\n'
try:
req = urllib2.Request('http://' + self._jTextFieldScanIPListen.getText() + ':' + self._jTextFieldScanPortListen.getText() + '/scan/' + sqlitask + '/start')
req.add_header('Content-Type', 'application/json')
resp = json.load(urllib2.urlopen(req, json.dumps(sqliopts)))
if resp['success'] == True:
findings = ThreadExtender(self, self._jTextFieldScanIPListen.getText(), self._jTextFieldScanPortListen.getText(), sqlitask, self.scanUrl, self.scanMessage, self._callbacks)
t = threading.Thread(target=findings.checkResults)
self.threads.append(t)
t.start()
print 'Started SQLMap Scan on Task ' + sqlitask +' with Engine ID: ' + str(resp['engineid']) + ' - ' + self._jTextFieldURL.getText() + '\n'
else:
print 'Failed to start SQLMap Scan for Task: ' + sqlitask + '\n'
except:
print 'Failed to start SQLMap Scan for Task: ' + sqlitask + '\n'
else:
print 'Failed to set options on SQLMap Task: ' + sqlitask + '\n'
except:
print 'Failed to set options on SQLMap Task: ' + sqlitask + '\n'
else:
print 'SQLMap task creation failed\n'
except:
print 'SQLMap task creation failed\n'
|
app.py
|
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
import os, sys
from soundspider import SoundSpider
from time import sleep
import threading
class Handler:
def onDestroy(self, *args):
try:
download_thread._stop()
except:
pass
Gtk.main_quit()
def onToggleDownload(self, button):
status = "Downloading..."
builder.get_object('label4').set_text(status)
button.set_sensitive(False)
builder.get_object("folder_label").set_sensitive(False)
builder.get_object("url_label").set_sensitive(False)
## verbose?
# verbose = True
verbose = False
params = (builder.get_object("url_label").get_text(),builder.get_object("folder_label").get_text(),verbose, builder.get_object('label4'), button,builder.get_object("url_label"),builder.get_object("folder_label"))
download_thread = threading.Thread(target=SoundSpider.convert, args=params)
download_thread.start()
return
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
# """ Get absolute path to resource, works for dev and for PyInstaller """
# base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))
# return os.path.join(base_path, relative_path)
download_thread = threading.Thread()
builder = Gtk.Builder()
builder.add_from_file(resource_path("ui.glade"))
builder.connect_signals(Handler())
window = builder.get_object("window1")
window.show_all()
Gtk.main()
|
bench_req_rep_raw.py
|
import time
from multiprocessing import Process
from nanoservice import Service, Client
import util
def start_service(addr, n):
""" Start a service """
s = Service(addr)
started = time.time()
for _ in range(n):
msg = s.socket.recv()
s.socket.send(msg)
s.socket.close()
duration = time.time() - started
print('Raw REP service stats:')
util.print_stats(n, duration)
return
def bench(client, n):
""" Benchmark n requests """
items = list(range(n))
# Time client publish operations
# ------------------------------
started = time.time()
msg = b'x'
for i in items:
client.socket.send(msg)
res = client.socket.recv()
assert msg == res
duration = time.time() - started
print('Raw REQ client stats:')
util.print_stats(n, duration)
def run(N, addr):
# Fork service
service_process = Process(target=start_service, args=(addr, N))
service_process.start()
time.sleep(0.1) # Wait for service connect
# Create client and make reqs
c = Client(addr)
bench(c, N)
c.socket.close()
time.sleep(0.2)
service_process.terminate()
if __name__ == '__main__':
N = 50000
print('')
print('Req-Rep over IPC (raw)')
print('-----------------------------')
run(N, 'ipc:///tmp/bench-raw-reqrep-ipc.sock')
print('')
print('Req-Rep over TCP (raw)')
print('-----------------------------')
run(N, 'tcp://127.0.0.1:5052')
|
httpclient_test.py
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
import base64
import binascii
from contextlib import closing
import copy
import functools
import sys
import threading
import datetime
from io import BytesIO
from tornado.escape import utf8, native_str
from tornado import gen
from tornado.httpclient import HTTPRequest, HTTPResponse, _RequestProxy, HTTPError, HTTPClient
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado.log import gen_log
from tornado import netutil
from tornado.stack_context import ExceptionStackContext, NullContext
from tornado.testing import AsyncHTTPTestCase, bind_unused_port, gen_test, ExpectLog
from tornado.test.util import unittest, skipOnTravis
from tornado.web import Application, RequestHandler, url
from tornado.httputil import format_timestamp, HTTPHeaders
class HelloWorldHandler(RequestHandler):
def get(self):
name = self.get_argument("name", "world")
self.set_header("Content-Type", "text/plain")
self.finish("Hello %s!" % name)
class PostHandler(RequestHandler):
def post(self):
self.finish("Post arg1: %s, arg2: %s" % (
self.get_argument("arg1"), self.get_argument("arg2")))
class PutHandler(RequestHandler):
def put(self):
self.write("Put body: ")
self.write(self.request.body)
class RedirectHandler(RequestHandler):
def prepare(self):
self.write('redirects can have bodies too')
self.redirect(self.get_argument("url"),
status=int(self.get_argument("status", "302")))
class ChunkHandler(RequestHandler):
@gen.coroutine
def get(self):
self.write("asdf")
self.flush()
# Wait a bit to ensure the chunks are sent and received separately.
yield gen.sleep(0.01)
self.write("qwer")
class AuthHandler(RequestHandler):
def get(self):
self.finish(self.request.headers["Authorization"])
class CountdownHandler(RequestHandler):
def get(self, count):
count = int(count)
if count > 0:
self.redirect(self.reverse_url("countdown", count - 1))
else:
self.write("Zero")
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
class UserAgentHandler(RequestHandler):
def get(self):
self.write(self.request.headers.get('User-Agent', 'User agent not set'))
class ContentLength304Handler(RequestHandler):
def get(self):
self.set_status(304)
self.set_header('Content-Length', 42)
def _clear_headers_for_304(self):
# Tornado strips content-length from 304 responses, but here we
# want to simulate servers that include the headers anyway.
pass
class PatchHandler(RequestHandler):
def patch(self):
"Return the request payload - so we can check it is being kept"
self.write(self.request.body)
class AllMethodsHandler(RequestHandler):
SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ('OTHER',)
def method(self):
self.write(self.request.method)
get = post = put = delete = options = patch = other = method
class SetHeaderHandler(RequestHandler):
def get(self):
# Use get_arguments for keys to get strings, but
# request.arguments for values to get bytes.
for k, v in zip(self.get_arguments('k'),
self.request.arguments['v']):
self.set_header(k, v)
# These tests end up getting run redundantly: once here with the default
# HTTPClient implementation, and then again in each implementation's own
# test suite.
class HTTPClientCommonTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([
url("/hello", HelloWorldHandler),
url("/post", PostHandler),
url("/put", PutHandler),
url("/redirect", RedirectHandler),
url("/chunk", ChunkHandler),
url("/auth", AuthHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/echopost", EchoPostHandler),
url("/user_agent", UserAgentHandler),
url("/304_with_content_length", ContentLength304Handler),
url("/all_methods", AllMethodsHandler),
url('/patch', PatchHandler),
url('/set_header', SetHeaderHandler),
], gzip=True)
def test_patch_receives_payload(self):
body = b"some patch data"
response = self.fetch("/patch", method='PATCH', body=body)
self.assertEqual(response.code, 200)
self.assertEqual(response.body, body)
@skipOnTravis
def test_hello_world(self):
response = self.fetch("/hello")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["Content-Type"], "text/plain")
self.assertEqual(response.body, b"Hello world!")
self.assertEqual(int(response.request_time), 0)
response = self.fetch("/hello?name=Ben")
self.assertEqual(response.body, b"Hello Ben!")
def test_streaming_callback(self):
# streaming_callback is also tested in test_chunked
chunks = []
response = self.fetch("/hello",
streaming_callback=chunks.append)
# with streaming_callback, data goes to the callback and not response.body
self.assertEqual(chunks, [b"Hello world!"])
self.assertFalse(response.body)
def test_post(self):
response = self.fetch("/post", method="POST",
body="arg1=foo&arg2=bar")
self.assertEqual(response.code, 200)
self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_chunked(self):
response = self.fetch("/chunk")
self.assertEqual(response.body, b"asdfqwer")
chunks = []
response = self.fetch("/chunk",
streaming_callback=chunks.append)
self.assertEqual(chunks, [b"asdf", b"qwer"])
self.assertFalse(response.body)
def test_chunked_close(self):
# test case in which chunks spread read-callback processing
# over several ioloop iterations, but the connection is already closed.
sock, port = bind_unused_port()
with closing(sock):
def write_response(stream, request_data):
if b"HTTP/1." not in request_data:
self.skipTest("requires HTTP/1.x")
stream.write(b"""\
HTTP/1.1 200 OK
Transfer-Encoding: chunked
1
1
1
2
0
""".replace(b"\n", b"\r\n"), callback=stream.close)
def accept_callback(conn, address):
# fake an HTTP server using chunked encoding where the final chunks
# and connection close all happen at once
stream = IOStream(conn, io_loop=self.io_loop)
stream.read_until(b"\r\n\r\n",
functools.partial(write_response, stream))
netutil.add_accept_handler(sock, accept_callback, self.io_loop)
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
resp = self.wait()
resp.rethrow()
self.assertEqual(resp.body, b"12")
self.io_loop.remove_handler(sock.fileno())
def test_streaming_stack_context(self):
chunks = []
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def streaming_cb(chunk):
chunks.append(chunk)
if chunk == b'qwer':
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', streaming_callback=streaming_cb)
self.assertEqual(chunks, [b'asdf', b'qwer'])
self.assertEqual(1, len(exc_info))
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_basic_auth(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_basic_auth_explicit_mode(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="basic").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_unsupported_auth_mode(self):
# curl and simple clients handle errors a bit differently; the
# important thing is that they don't fall back to basic auth
# on an unknown mode.
with ExpectLog(gen_log, "uncaught exception", required=False):
with self.assertRaises((ValueError, HTTPError)):
response = self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="asdf")
response.rethrow()
def test_follow_redirect(self):
response = self.fetch("/countdown/2", follow_redirects=False)
self.assertEqual(302, response.code)
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
response = self.fetch("/countdown/2")
self.assertEqual(200, response.code)
self.assertTrue(response.effective_url.endswith("/countdown/0"))
self.assertEqual(b"Zero", response.body)
def test_credentials_in_url(self):
url = self.get_url("/auth").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(b"Basic " + base64.b64encode(b"me:secret"),
response.body)
def test_body_encoding(self):
unicode_body = u"\xe9"
byte_body = binascii.a2b_hex(b"e9")
# unicode string in body gets converted to utf8
response = self.fetch("/echopost", method="POST", body=unicode_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "2")
self.assertEqual(response.body, utf8(unicode_body))
# byte strings pass through directly
response = self.fetch("/echopost", method="POST",
body=byte_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
# Mixing unicode in headers and byte string bodies shouldn't
# break anything
response = self.fetch("/echopost", method="POST", body=byte_body,
headers={"Content-Type": "application/blah"},
user_agent=u"foo")
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
def test_types(self):
response = self.fetch("/hello")
self.assertEqual(type(response.body), bytes)
self.assertEqual(type(response.headers["Content-Type"]), str)
self.assertEqual(type(response.code), int)
self.assertEqual(type(response.effective_url), str)
def test_header_callback(self):
first_line = []
headers = {}
chunks = []
def header_callback(header_line):
if header_line.startswith('HTTP/1.1 101'):
# Upgrading to HTTP/2
pass
elif header_line.startswith('HTTP/'):
first_line.append(header_line)
elif header_line != '\r\n':
k, v = header_line.split(':', 1)
headers[k.lower()] = v.strip()
def streaming_callback(chunk):
# All header callbacks are run before any streaming callbacks,
# so the header data is available to process the data as it
# comes in.
self.assertEqual(headers['content-type'], 'text/html; charset=UTF-8')
chunks.append(chunk)
self.fetch('/chunk', header_callback=header_callback,
streaming_callback=streaming_callback)
self.assertEqual(len(first_line), 1, first_line)
self.assertRegexpMatches(first_line[0], 'HTTP/[0-9]\\.[0-9] 200.*\r\n')
self.assertEqual(chunks, [b'asdf', b'qwer'])
def test_header_callback_stack_context(self):
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def header_callback(header_line):
if header_line.lower().startswith('content-type:'):
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', header_callback=header_callback)
self.assertEqual(len(exc_info), 1)
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_configure_defaults(self):
defaults = dict(user_agent='TestDefaultUserAgent', allow_ipv6=False)
# Construct a new instance of the configured client class
client = self.http_client.__class__(self.io_loop, force_instance=True,
defaults=defaults)
try:
client.fetch(self.get_url('/user_agent'), callback=self.stop)
response = self.wait()
self.assertEqual(response.body, b'TestDefaultUserAgent')
finally:
client.close()
def test_header_types(self):
# Header values may be passed as character or utf8 byte strings,
# in a plain dictionary or an HTTPHeaders object.
# Keys must always be the native str type.
# All combinations should have the same results on the wire.
for value in [u"MyUserAgent", b"MyUserAgent"]:
for container in [dict, HTTPHeaders]:
headers = container()
headers['User-Agent'] = value
resp = self.fetch('/user_agent', headers=headers)
self.assertEqual(
resp.body, b"MyUserAgent",
"response=%r, value=%r, container=%r" %
(resp.body, value, container))
def test_multi_line_headers(self):
# Multi-line http headers are rare but rfc-allowed
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
sock, port = bind_unused_port()
with closing(sock):
def write_response(stream, request_data):
if b"HTTP/1." not in request_data:
self.skipTest("requires HTTP/1.x")
stream.write(b"""\
HTTP/1.1 200 OK
X-XSS-Protection: 1;
\tmode=block
""".replace(b"\n", b"\r\n"), callback=stream.close)
def accept_callback(conn, address):
stream = IOStream(conn, io_loop=self.io_loop)
stream.read_until(b"\r\n\r\n",
functools.partial(write_response, stream))
netutil.add_accept_handler(sock, accept_callback, self.io_loop)
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
resp = self.wait()
resp.rethrow()
self.assertEqual(resp.headers['X-XSS-Protection'], "1; mode=block")
self.io_loop.remove_handler(sock.fileno())
def test_304_with_content_length(self):
# According to the spec 304 responses SHOULD NOT include
# Content-Length or other entity headers, but some servers do it
# anyway.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
response = self.fetch('/304_with_content_length')
self.assertEqual(response.code, 304)
self.assertEqual(response.headers['Content-Length'], '42')
def test_final_callback_stack_context(self):
# The final callback should be run outside of the httpclient's
# stack_context. We want to ensure that there is not stack_context
# between the user's callback and the IOLoop, so monkey-patch
# IOLoop.handle_callback_exception and disable the test harness's
# context with a NullContext.
# Note that this does not apply to secondary callbacks (header
# and streaming_callback), as errors there must be seen as errors
# by the http client so it can clean up the connection.
exc_info = []
def handle_callback_exception(callback):
exc_info.append(sys.exc_info())
self.stop()
self.io_loop.handle_callback_exception = handle_callback_exception
with NullContext():
self.http_client.fetch(self.get_url('/hello'),
lambda response: 1 / 0)
self.wait()
self.assertEqual(exc_info[0][0], ZeroDivisionError)
@gen_test
def test_future_interface(self):
response = yield self.http_client.fetch(self.get_url('/hello'))
self.assertEqual(response.body, b'Hello world!')
@gen_test
def test_future_http_error(self):
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(context.exception.code, 404)
self.assertEqual(context.exception.response.code, 404)
@gen_test
def test_future_http_error_no_raise(self):
response = yield self.http_client.fetch(self.get_url('/notfound'), raise_error=False)
self.assertEqual(response.code, 404)
@gen_test
def test_reuse_request_from_response(self):
# The response.request attribute should be an HTTPRequest, not
# a _RequestProxy.
# This test uses self.http_client.fetch because self.fetch calls
# self.get_url on the input unconditionally.
url = self.get_url('/hello')
response = yield self.http_client.fetch(url)
self.assertEqual(response.request.url, url)
self.assertTrue(isinstance(response.request, HTTPRequest))
response2 = yield self.http_client.fetch(response.request)
self.assertEqual(response2.body, b'Hello world!')
def test_all_methods(self):
for method in ['GET', 'DELETE', 'OPTIONS']:
response = self.fetch('/all_methods', method=method)
self.assertEqual(response.body, utf8(method))
for method in ['POST', 'PUT', 'PATCH']:
response = self.fetch('/all_methods', method=method, body=b'')
self.assertEqual(response.body, utf8(method))
response = self.fetch('/all_methods', method='HEAD')
self.assertEqual(response.body, b'')
response = self.fetch('/all_methods', method='OTHER',
allow_nonstandard_methods=True)
self.assertEqual(response.body, b'OTHER')
def test_body_sanity_checks(self):
# These methods require a body.
for method in ('POST', 'PUT', 'PATCH'):
with self.assertRaises(ValueError) as context:
resp = self.fetch('/all_methods', method=method)
resp.rethrow()
self.assertIn('must not be None', str(context.exception))
resp = self.fetch('/all_methods', method=method,
allow_nonstandard_methods=True)
self.assertEqual(resp.code, 200)
# These methods don't allow a body.
for method in ('GET', 'DELETE', 'OPTIONS'):
with self.assertRaises(ValueError) as context:
resp = self.fetch('/all_methods', method=method, body=b'asdf')
resp.rethrow()
self.assertIn('must be None', str(context.exception))
# In most cases this can be overridden, but curl_httpclient
# does not allow body with a GET at all.
if method != 'GET':
resp = self.fetch('/all_methods', method=method, body=b'asdf',
allow_nonstandard_methods=True)
resp.rethrow()
self.assertEqual(resp.code, 200)
# This test causes odd failures with the combination of
# curl_httpclient (at least with the version of libcurl available
# on ubuntu 12.04), TwistedIOLoop, and epoll. For POST (but not PUT),
# curl decides the response came back too soon and closes the connection
# to start again. It does this *before* telling the socket callback to
# unregister the FD. Some IOLoop implementations have special kernel
# integration to discover this immediately. Tornado's IOLoops
# ignore errors on remove_handler to accommodate this behavior, but
# Twisted's reactor does not. The removeReader call fails and so
# do all future removeAll calls (which our tests do at cleanup).
#
# def test_post_307(self):
# response = self.fetch("/redirect?status=307&url=/post",
# method="POST", body=b"arg1=foo&arg2=bar")
# self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_put_307(self):
response = self.fetch("/redirect?status=307&url=/put",
method="PUT", body=b"hello")
response.rethrow()
self.assertEqual(response.body, b"Put body: hello")
def test_non_ascii_header(self):
# Non-ascii headers are sent as latin1.
response = self.fetch("/set_header?k=foo&v=%E9")
response.rethrow()
self.assertEqual(response.headers["Foo"], native_str(u"\u00e9"))
class RequestProxyTest(unittest.TestCase):
def test_request_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
user_agent='foo'),
dict())
self.assertEqual(proxy.user_agent, 'foo')
def test_default_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict(network_interface='foo'))
self.assertEqual(proxy.network_interface, 'foo')
def test_both_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
proxy_host='foo'),
dict(proxy_host='bar'))
self.assertEqual(proxy.proxy_host, 'foo')
def test_neither_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
self.assertIs(proxy.auth_username, None)
def test_bad_attribute(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
with self.assertRaises(AttributeError):
proxy.foo
def test_defaults_none(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'), None)
self.assertIs(proxy.auth_username, None)
class HTTPResponseTestCase(unittest.TestCase):
def test_str(self):
response = HTTPResponse(HTTPRequest('http://example.com'),
200, headers={}, buffer=BytesIO())
s = str(response)
self.assertTrue(s.startswith('HTTPResponse('))
self.assertIn('code=200', s)
class SyncHTTPClientTest(unittest.TestCase):
def setUp(self):
if IOLoop.configured_class().__name__ in ('TwistedIOLoop',
'AsyncIOMainLoop'):
# TwistedIOLoop only supports the global reactor, so we can't have
# separate IOLoops for client and server threads.
# AsyncIOMainLoop doesn't work with the default policy
# (although it could with some tweaks to this test and a
# policy that created loops for non-main threads).
raise unittest.SkipTest(
'Sync HTTPClient not compatible with TwistedIOLoop or '
'AsyncIOMainLoop')
self.server_ioloop = IOLoop()
sock, self.port = bind_unused_port()
app = Application([('/', HelloWorldHandler)])
self.server = HTTPServer(app, io_loop=self.server_ioloop)
self.server.add_socket(sock)
self.server_thread = threading.Thread(target=self.server_ioloop.start)
self.server_thread.start()
self.http_client = HTTPClient()
def tearDown(self):
def stop_server():
self.server.stop()
# Delay the shutdown of the IOLoop by one iteration because
# the server may still have some cleanup work left when
# the client finishes with the response (this is noticable
# with http/2, which leaves a Future with an unexamined
# StreamClosedError on the loop).
self.server_ioloop.add_callback(self.server_ioloop.stop)
self.server_ioloop.add_callback(stop_server)
self.server_thread.join()
self.http_client.close()
self.server_ioloop.close(all_fds=True)
def get_url(self, path):
return 'http://127.0.0.1:%d%s' % (self.port, path)
def test_sync_client(self):
response = self.http_client.fetch(self.get_url('/'))
self.assertEqual(b'Hello world!', response.body)
def test_sync_client_error(self):
# Synchronous HTTPClient raises errors directly; no need for
# response.rethrow()
with self.assertRaises(HTTPError) as assertion:
self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(assertion.exception.code, 404)
class HTTPRequestTestCase(unittest.TestCase):
def test_headers(self):
request = HTTPRequest('http://example.com', headers={'foo': 'bar'})
self.assertEqual(request.headers, {'foo': 'bar'})
def test_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = {'bar': 'baz'}
self.assertEqual(request.headers, {'bar': 'baz'})
def test_null_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = None
self.assertEqual(request.headers, {})
def test_body(self):
request = HTTPRequest('http://example.com', body='foo')
self.assertEqual(request.body, utf8('foo'))
def test_body_setter(self):
request = HTTPRequest('http://example.com')
request.body = 'foo'
self.assertEqual(request.body, utf8('foo'))
def test_if_modified_since(self):
http_date = datetime.datetime.utcnow()
request = HTTPRequest('http://example.com', if_modified_since=http_date)
self.assertEqual(request.headers,
{'If-Modified-Since': format_timestamp(http_date)})
class HTTPErrorTestCase(unittest.TestCase):
def test_copy(self):
e = HTTPError(403)
e2 = copy.copy(e)
self.assertIsNot(e, e2)
self.assertEqual(e.code, e2.code)
def test_plain_error(self):
e = HTTPError(403)
self.assertEqual(str(e), "HTTP 403: Forbidden")
self.assertEqual(repr(e), "HTTP 403: Forbidden")
def test_error_with_response(self):
resp = HTTPResponse(HTTPRequest('http://example.com/'), 403)
with self.assertRaises(HTTPError) as cm:
resp.rethrow()
e = cm.exception
self.assertEqual(str(e), "HTTP 403: Forbidden")
self.assertEqual(repr(e), "HTTP 403: Forbidden")
|
fake_request.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from core.brain.main import Brain
from core.listen import listen
from core.config.settings import logger
from threading import Timer
from output import say
import sys
from multiprocessing import Process, Queue, Pipe
def testing_jabber():
"""docstring for testing_jabber"""
from core.brain.jabber_listener import jabber_listener
this_server, brain_jabber_listener = Pipe()
jproc = Process( target=jabber_listener, args=( brain_jabber_listener, ) )
jproc.start()
msg = {'body':'smarty'}
this_server.send( msg )
try:
response = this_server.recv()
except EOFError as exc:
logging.exception( exc )
print( response )
jproc.join()
#prepare fifo
def testing_sound_sensors():
"""docstring for testing_sound_sensors"""
from core.device.head.sensor.sound.source_direction import read_sensor_data
q = Queue()
p = Process(target=read_sensor_data, args=(q,))
p.start()
print q.get()
p.join()
if __name__ == '__main__':
#test sound source detection
#try:
#while 1:
#testing_sound_sensors()
#except KeyboardInterrupt:
#sys.exit(1)
#path = 'somedir/somedir'
#import os
#from config import settings
#os.makedirs(os.path.join( settings.APP_DIRS['brain_modules_dir'], path ))
#check reaction
#b = Brain()
#brain.react_on('what is your name')
#import sys
#if brain.continue_dialog():
#print 'continue'
#else:
#print('stop dialog')
#sys.exit()
#testing_jabber()
#import sys
#sys.exit(0)
brain = Brain()
#brain.react_on('what is broadcasting')
#brain.react_on('what is kjhdvlkjhasd')
#brain.react_on('start broadcast')
#brain.react_on('send email to vs@webdirect.md this is the body text')
# testing neck motor
#brain.react_on('turn head to the right')
#brain.react_on('turn head to the left')
#brain.react_on('turn head up')
#brain.react_on('turn head down')
#brain.react_on('say what time is it now')
#brain.react_on('update')
#brain.react_on('show')
#brain.react_on('play music')
#brain.react_on('Hi')
#brain.react_on('what is google')
#brain.react_on('play music')
brain.react_on('hi')
#brain.react_on('say hello')
#brain.react_on('who are you')
#import time
#time.sleep(2)
#brain.react_on('what time is it')
#time.sleep(2)
#brain.react_on('what is your name')
#time.sleep(2)
#brain.react_on("tell me please what's your name")
#time.sleep(2)
#brain.react_on("your name")
#time.sleep(2)
#brain.react_on("name")
#time.sleep(2)
#brain.react_on('could you please put the music')
sys.exit()
#logging.debug('Start listening...')
#while(1):
#listen()
#request_received = recognize_by_google()
#if request_received:
##react on request
#brain = Brain()
##first request can be greeting with answer in one file
##result can be None or the rest part of first request (greeings from beggining cut)
#rest = brain.react_on(request_received)
##in future I need dialog history
#if not brain.request_processed:
#start_dialog(rest)
|
EasyNMT.py
|
import os
import torch
from .util import http_get, import_from_string, fullname
import json
from . import __DOWNLOAD_SERVER__
from typing import List, Union, Dict, FrozenSet, Set, Iterable
import numpy as np
import tqdm
import nltk
import torch.multiprocessing as mp
import queue
import math
import re
import logging
import time
import os
logger = logging.getLogger(__name__)
class EasyNMT:
def __init__(self, model_name: str = None, cache_folder: str = None, translator=None, load_translator: bool = True, device=None, max_length: int = None, **kwargs):
"""
Easy-to-use, state-of-the-art machine translation
:param model_name: Model name (see Readme for available models)
:param cache_folder: Which folder should be used for caching models. Can also be set via the EASYNMT_CACHE env. variable
:param translator: Translator object. Set to None, to automatically load the model via the model name.
:param load_translator: If set to false, it will only load the config but not the translation engine
:param device: CPU / GPU device for PyTorch
:param max_length: Max number of token per sentence for translation. Longer text will be truncated
:param kwargs: Further optional parameters for the different models
"""
self._model_name = model_name
self._fasttext_lang_id = None
self._lang_detectors = [self.language_detection_fasttext, self.language_detection_langid, self.language_detection_langdetect]
self._lang_pairs = frozenset()
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = device
self.config = None
if cache_folder is None:
if 'EASYNMT_CACHE' in os.environ:
cache_folder = os.environ['EASYNMT_CACHE']
else:
cache_folder = os.path.join(torch.hub._get_torch_home(), 'easynmt_v2')
self._cache_folder = cache_folder
if translator is not None:
self.translator = translator
else:
if os.path.exists(model_name) and os.path.isdir(model_name):
model_path = model_name
else:
model_name = model_name.lower()
model_path = os.path.join(cache_folder, model_name)
if not os.path.exists(model_path) or not os.listdir(model_path):
logger.info("Downloading EasyNMT model {} and saving it at {}".format(model_name, model_path))
model_path_tmp = model_path.rstrip("/").rstrip("\\") + "_part"
os.makedirs(model_path_tmp, exist_ok=True)
#Download easynmt.json
config_url = __DOWNLOAD_SERVER__+"/{}/easynmt.json".format(model_name)
config_path = os.path.join(model_path_tmp, 'easynmt.json')
http_get(config_url, config_path)
with open(config_path) as fIn:
downloaded_config = json.load(fIn)
if 'files' in downloaded_config:
for filename, url in downloaded_config['files'].items():
logger.info("Download {} from {}".format(filename, url))
http_get(url, os.path.join(model_path_tmp, filename))
##Rename tmp path
try:
os.rename(model_path_tmp, model_path)
except Exception:
pass
with open(os.path.join(model_path, 'easynmt.json')) as fIn:
self.config = json.load(fIn)
if 'lang_pairs' in self.config:
self._lang_pairs = frozenset(self.config['lang_pairs'])
if load_translator:
module_class = import_from_string(self.config['model_class'])
self.translator = module_class(easynmt_path=model_path, **self.config['model_args'])
self.translator.max_length = max_length
def translate(self, documents: Union[str, List[str]], target_lang: str, source_lang: str = None,
show_progress_bar: bool = False, beam_size: int = 5, batch_size: int = 16,
perform_sentence_splitting: bool = True, paragraph_split: str = "\n", sentence_splitter=None, document_language_detection: bool = True,
**kwargs):
"""
This method translates the given set of documents
:param documents: If documents is a string, returns the translated document as string. If documents is a list of strings, translates all documents and returns a list.
:param target_lang: Target language for the translation
:param source_lang: Source language for all documents. If None, determines the source languages automatically.
:param show_progress_bar: If true, plot a progress bar on the progress for the translation
:param beam_size: Size for beam search
:param batch_size: Number of sentences to translate at the same time
:param perform_sentence_splitting: Longer documents are broken down sentences, which are translated individually
:param paragraph_split: Split symbol for paragraphs. No sentences can go across the paragraph_split symbol.
:param sentence_splitter: Method used to split sentences. If None, uses the default self.sentence_splitting method
:param document_language_detection: Perform language detection on document level
:param kwargs: Optional arguments for the translator model
:return: Returns a string or a list of string with the translated documents
"""
#Method_args will store all passed arguments to method
method_args = locals()
del method_args['self']
del method_args['kwargs']
method_args.update(kwargs)
if source_lang == target_lang:
return documents
is_single_doc = False
if isinstance(documents, str):
documents = [documents]
is_single_doc = True
if source_lang is None and document_language_detection:
src_langs = [self.language_detection(doc) for doc in documents]
# Group by languages
lang2id = {}
for idx, lng in enumerate(src_langs):
if lng not in lang2id:
lang2id[lng] = []
lang2id[lng].append(idx)
# Translate language wise
output = [None] * len(documents)
for lng, ids in lang2id.items():
logger.info("Translate documents of language: {}".format(lng))
try:
method_args['documents'] = [documents[idx] for idx in ids]
method_args['source_lang'] = lng
translated = self.translate(**method_args)
for idx, translated_sentences in zip(ids, translated):
output[idx] = translated_sentences
except Exception as e:
logger.warning("Exception: "+str(e))
raise e
if is_single_doc and len(output) == 1:
output = output[0]
return output
if perform_sentence_splitting:
if sentence_splitter is None:
sentence_splitter = self.sentence_splitting
# Split document into sentences
start_time = time.time()
splitted_sentences = []
sent2doc = []
for doc in documents:
paragraphs = doc.split(paragraph_split) if paragraph_split is not None else [doc]
for para in paragraphs:
for sent in sentence_splitter(para.strip(), source_lang):
sent = sent.strip()
if len(sent) > 0:
splitted_sentences.append(sent)
sent2doc.append(len(splitted_sentences))
#logger.info("Sentence splitting done after: {:.2f} sec".format(time.time() - start_time))
#logger.info("Translate {} sentences".format(len(splitted_sentences)))
translated_sentences = self.translate_sentences(splitted_sentences, target_lang=target_lang, source_lang=source_lang, show_progress_bar=show_progress_bar, beam_size=beam_size, batch_size=batch_size, **kwargs)
# Merge sentences back to documents
start_time = time.time()
translated_docs = []
for doc_idx in range(len(documents)):
start_idx = sent2doc[doc_idx - 1] if doc_idx > 0 else 0
end_idx = sent2doc[doc_idx]
translated_docs.append(self._reconstruct_document(documents[doc_idx], splitted_sentences[start_idx:end_idx], translated_sentences[start_idx:end_idx]))
#logger.info("Document reconstruction done after: {:.2f} sec".format(time.time() - start_time))
else:
translated_docs = self.translate_sentences(documents, target_lang=target_lang, source_lang=source_lang, show_progress_bar=show_progress_bar, beam_size=beam_size, batch_size=batch_size, **kwargs)
if is_single_doc:
translated_docs = translated_docs[0]
return translated_docs
@staticmethod
def _reconstruct_document(doc, org_sent, translated_sent):
"""
This method reconstructs the translated document and
keeps white space in the beginning / at the end of sentences.
"""
sent_idx = 0
char_idx = 0
translated_doc = ""
while char_idx < len(doc):
if sent_idx < len(org_sent) and doc[char_idx] == org_sent[sent_idx][0]:
translated_doc += translated_sent[sent_idx]
char_idx += len(org_sent[sent_idx])
sent_idx += 1
else:
translated_doc += doc[char_idx]
char_idx += 1
return translated_doc
def translate_sentences(self, sentences: Union[str, List[str]], target_lang: str, source_lang: str = None,
show_progress_bar: bool = False, beam_size: int = 5, batch_size: int = 32, **kwargs):
"""
This method translates individual sentences.
:param sentences: A single sentence or a list of sentences to be translated
:param source_lang: Source language for all sentences. If none, determines automatically the source language
:param target_lang: Target language for the translation
:param show_progress_bar: Show a progress bar
:param beam_size: Size for beam search
:param batch_size: Mini batch size
:return: List of translated sentences
"""
if source_lang == target_lang:
return sentences
is_single_sentence = False
if isinstance(sentences, str):
sentences = [sentences]
is_single_sentence = True
output = []
if source_lang is None:
#Determine languages for sentences
src_langs = [self.language_detection(sent) for sent in sentences]
logger.info("Detected languages: {}".format(set(src_langs)))
#Group by languages
lang2id = {}
for idx, lng in enumerate(src_langs):
if lng not in lang2id:
lang2id[lng] = []
lang2id[lng].append(idx)
#Translate language wise
output = [None] * len(sentences)
for lng, ids in lang2id.items():
logger.info("Translate sentences of language: {}".format(lng))
try:
grouped_sentences = [sentences[idx] for idx in ids]
translated = self.translate_sentences(grouped_sentences, source_lang=lng, target_lang=target_lang, show_progress_bar=show_progress_bar, beam_size=beam_size, batch_size=batch_size, **kwargs)
for idx, translated_sentences in zip(ids, translated):
output[idx] = translated_sentences
except Exception as e:
logger.warning("Exception: "+str(e))
raise e
else:
#Sort by length to speed up processing
length_sorted_idx = np.argsort([-len(sen) for sen in sentences])
sentences_sorted = [sentences[idx] for idx in length_sorted_idx]
iterator = range(0, len(sentences_sorted), batch_size)
if show_progress_bar:
scale = min(batch_size, len(sentences))
iterator = tqdm.tqdm(iterator, total=len(sentences)/scale, unit_scale=scale, smoothing=0)
for start_idx in iterator:
output.extend(self.translator.translate_sentences(sentences_sorted[start_idx:start_idx+batch_size], source_lang=source_lang, target_lang=target_lang, beam_size=beam_size, device=self.device, **kwargs))
#Restore original sorting of sentences
output = [output[idx] for idx in np.argsort(length_sorted_idx)]
if is_single_sentence:
output = output[0]
return output
def start_multi_process_pool(self, target_devices: List[str] = None):
"""
Starts multi process to process the encoding with several, independent processes.
This method is recommended if you want to encode on multiple GPUs. It is advised
to start only one process per GPU. This method works together with encode_multi_process
:param target_devices: PyTorch target devices, e.g. cuda:0, cuda:1... If None, all available CUDA devices will be used
:return: Returns a dict with the target processes, an input queue and and output queue.
"""
if target_devices is None:
if torch.cuda.is_available():
target_devices = ['cuda:{}'.format(i) for i in range(torch.cuda.device_count())]
else:
logger.info("CUDA is not available. Start 4 CPU worker")
target_devices = ['cpu'] * 4
logger.info("Start multi-process pool on devices: {}".format(', '.join(map(str, target_devices))))
ctx = mp.get_context('spawn')
input_queue = ctx.Queue()
output_queue = ctx.Queue()
processes = []
for cuda_id in target_devices:
p = ctx.Process(target=EasyNMT._encode_multi_process_worker, args=(cuda_id, self, input_queue, output_queue), daemon=True)
p.start()
processes.append(p)
return {'input': input_queue, 'output': output_queue, 'processes': processes}
def translate_multi_process(self, pool: Dict[str, object], documents: List[str], show_progress_bar: bool = True, chunk_size: int = None, **kwargs) -> List[str]:
"""
This method allows to run encode() on multiple GPUs. The sentences are chunked into smaller packages
and sent to individual processes, which encode these on the different GPUs. This method is only suitable
for encoding large sets of sentences
:param pool: A pool of workers started with SentenceTransformer.start_multi_process_pool
:param chunk_size: Sentences are chunked and sent to the individual processes. If none, it determine a sensible size.
"""
if chunk_size is None:
chunk_size = min(math.ceil(len(documents) / len(pool["processes"]) / 10), 1000)
logger.info("Chunk data into packages of size {}".format(chunk_size))
input_queue = pool['input']
last_chunk_id = 0
for start_idx in range(0, len(documents), chunk_size):
input_queue.put([last_chunk_id, documents[start_idx:start_idx+chunk_size], kwargs])
last_chunk_id += 1
output_queue = pool['output']
results_list = sorted([output_queue.get() for _ in tqdm.tqdm(range(last_chunk_id), total=last_chunk_id, unit_scale=chunk_size, smoothing=0, disable=not show_progress_bar)], key=lambda chunk: chunk[0])
translated = []
for chunk in results_list:
translated.extend(chunk[1])
return translated
def translate_stream(self, stream: Iterable[str], show_progress_bar: bool = True, chunk_size: int = 128, **kwargs) -> List[str]:
batch = []
for doc in tqdm.tqdm(stream, smoothing=0.0, disable=not show_progress_bar):
batch.append(doc)
if len(batch) >= chunk_size:
translated = self.translate(batch, show_progress_bar=False, **kwargs)
for trans_doc in translated:
yield trans_doc
batch = []
if len(batch) > 0:
translated = self.translate(batch, show_progress_bar=False, **kwargs)
for trans_doc in translated:
yield trans_doc
@staticmethod
def stop_multi_process_pool(pool):
"""
Stops all processes started with start_multi_process_pool
"""
for p in pool['processes']:
p.terminate()
for p in pool['processes']:
p.join()
p.close()
pool['input'].close()
pool['output'].close()
@staticmethod
def _encode_multi_process_worker(target_device: str, model, input_queue, results_queue):
"""
Internal working process to encode sentences in multi-process setup
"""
model.device = target_device
while True:
try:
id, documents, kwargs = input_queue.get()
translated = model.translate(documents, **kwargs)
results_queue.put([id, translated])
except queue.Empty:
break
def language_detection(self, text: Union[str, List[str]]) -> str:
"""
Given a text, detects the language code and returns the ISO language code.
It test different language detectors, based on what is available:
fastText, langid, langdetect.
You can change the language detector order by changing model._lang_detectors
:param text: Text or a List of Texts for which we want to determine the language
:return: ISO language code
"""
if isinstance(text, list):
return [self.language_detection(doc) for doc in text]
for lang_detector in self._lang_detectors:
try:
return lang_detector(text)
except:
pass
raise Exception("No method for automatic language detection was found. Please install at least one of the following: fasttext (pip install fasttext), langid (pip install langid), or langdetect (pip install langdetect)")
def language_detection_fasttext(self, text: str) -> str:
"""
Given a text, detects the language code and returns the ISO language code. It supports 176 languages. Uses
the fasttext model for language detection:
https://fasttext.cc/blog/2017/10/02/blog-post.html
https://fasttext.cc/docs/en/language-identification.html
"""
if self._fasttext_lang_id is None:
import fasttext
fasttext.FastText.eprint = lambda x: None #Silence useless warning: https://github.com/facebookresearch/fastText/issues/1067
model_path = os.path.join(self._cache_folder, 'lid.176.ftz')
if not os.path.exists(model_path):
http_get('https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.ftz', model_path)
self._fasttext_lang_id = fasttext.load_model(model_path)
return self._fasttext_lang_id.predict(text.lower().replace("\r\n", " ").replace("\n", " ").strip())[0][0].split('__')[-1]
def language_detection_langid(self, text: str) -> str:
import langid
return langid.classify(text.lower().replace("\r\n", " ").replace("\n", " ").strip())[0]
def language_detection_langdetect(self, text: str) -> str:
import langdetect
return langdetect.detect(text.lower().replace("\r\n", " ").replace("\n", " ").strip()).split("-")[0]
def sentence_splitting(self, text: str, lang: str = None):
if lang == 'th':
from thai_segmenter import sentence_segment
sentences = [str(sent) for sent in sentence_segment(text)]
elif lang in ['ar', 'jp', 'ko', 'zh']:
sentences = list(re.findall(u'[^!?。\.]+[!?。\.]*', text, flags=re.U))
else:
try:
nltk.data.find('tokenizers/punkt')
except LookupError:
nltk.download('punkt')
sentences = nltk.sent_tokenize(text)
return sentences
@property
def lang_pairs(self) -> FrozenSet[str]:
"""
Returns all allowed languages directions for the loaded model
"""
return self._lang_pairs
def get_languages(self, source_lang: str = None, target_lang: str = None) -> List[str]:
"""
Returns all available languages supported by the model
:param source_lang: If not None, then returns all languages to which we can translate for the given source_lang
:param target_lang: If not None, then returns all languages from which we can translate for the given target_lang
:return: Sorted list with the determined languages
"""
langs = set()
for lang_pair in self.lang_pairs:
source, target = lang_pair.split("-")
if source_lang is None and target_lang is None:
langs.add(source)
langs.add(target)
elif target_lang is not None and target == target_lang:
langs.add(source)
elif source_lang is not None and source == source_lang:
langs.add(target)
return sorted(list(langs))
def save(self, output_path):
os.makedirs(output_path, exist_ok=True)
filepath = os.path.join(output_path, 'easynmt.json')
config = {
'model_class': fullname(self.translator),
'lang_pairs': list(self.lang_pairs),
'model_args': self.translator.save(output_path)
}
with open(filepath, 'w') as fOut:
json.dump(config, fOut)
|
servers.py
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for managing server processes required by Oppia."""
from __future__ import absolute_import
from __future__ import unicode_literals
import contextlib
import logging
import os
import re
import shutil
import signal
import subprocess
import sys
import threading
from core import feconf
from core import python_utils
from scripts import common
@contextlib.contextmanager
def managed_process(
command_args, human_readable_name='Process', shell=False,
timeout_secs=60, **popen_kwargs):
"""Context manager for starting and stopping a process gracefully.
Args:
command_args: list(int|str). A sequence of program arguments, where the
program to execute is the first item. Ints are allowed in order to
accomodate e.g. port numbers.
human_readable_name: str. The human-readable name of the process. Used
by the function's logging logic to improve readability.
shell: bool. Whether the command should be run inside of its own shell.
WARNING: Executing shell commands that incorporate unsanitized input
from an untrusted source makes a program vulnerable to
[shell injection](https://w.wiki/_Ac2), a serious security flaw
which can result in arbitrary command execution. For this reason,
the use of `shell=True` is **strongly discouraged** in cases where
the command string is constructed from external input.
timeout_secs: int. The time allotted for the managed process and its
descendants to terminate themselves. After the timeout, any
remaining processes will be killed abruptly.
**popen_kwargs: dict(str: *). Same kwargs as `subprocess.Popen`.
Yields:
psutil.Process. The process managed by the context manager.
"""
# TODO(#11549): Move this to top of the file.
if common.PSUTIL_DIR not in sys.path:
sys.path.insert(1, common.PSUTIL_DIR)
import psutil
get_proc_info = lambda p: (
'%s(name="%s", pid=%d)' % (human_readable_name, p.name(), p.pid)
if p.is_running() else '%s(pid=%d)' % (human_readable_name, p.pid))
stripped_args = (('%s' % arg).strip() for arg in command_args)
non_empty_args = (s for s in stripped_args if s)
command = ' '.join(non_empty_args) if shell else list(non_empty_args)
human_readable_command = command if shell else ' '.join(command)
msg = 'Starting new %s: %s' % (human_readable_name, human_readable_command)
python_utils.PRINT(msg)
popen_proc = psutil.Popen(command, shell=shell, **popen_kwargs)
try:
yield popen_proc
finally:
python_utils.PRINT('Stopping %s...' % get_proc_info(popen_proc))
procs_still_alive = [popen_proc]
try:
if popen_proc.is_running():
# Children must be terminated before the parent, otherwise they
# may become zombie processes.
procs_still_alive = (
popen_proc.children(recursive=True) + [popen_proc])
procs_to_kill = []
for proc in procs_still_alive:
if proc.is_running():
logging.info('Terminating %s...' % get_proc_info(proc))
proc.terminate()
procs_to_kill.append(proc)
else:
logging.info('%s has already ended.' % get_proc_info(proc))
procs_gone, procs_still_alive = (
psutil.wait_procs(procs_to_kill, timeout=timeout_secs))
for proc in procs_still_alive:
logging.warn('Forced to kill %s!' % get_proc_info(proc))
proc.kill()
for proc in procs_gone:
logging.info('%s has already ended.' % get_proc_info(proc))
except Exception:
# NOTE: Raising an exception while exiting a context manager is bad
# practice, so we log and suppress exceptions instead.
logging.exception(
'Failed to stop %s gracefully!' % get_proc_info(popen_proc))
@contextlib.contextmanager
def managed_dev_appserver(
app_yaml_path, env=None, log_level='info',
host='0.0.0.0', port=8080, admin_host='0.0.0.0', admin_port=8000,
enable_host_checking=True, automatic_restart=False,
skip_sdk_update_check=False):
"""Returns a context manager to start up and shut down a GAE dev appserver.
Args:
app_yaml_path: str. Path to the app.yaml file which defines the
structure of the server.
env: dict(str: str) or None. Defines the environment variables for the
new process.
log_level: str. The lowest log level generated by the application code
and the development server. Expected values are: debug, info,
warning, error, critical.
host: str. The host name to which the app server should bind.
port: int. The lowest port to which application modules should bind.
admin_host: str. The host name to which the admin server should bind.
admin_port: int. The port to which the admin server should bind.
enable_host_checking: bool. Whether to enforce HTTP Host checking for
application modules, API server, and admin server. Host checking
protects against DNS rebinding attacks, so only disable after
understanding the security implications.
automatic_restart: bool. Whether to restart instances automatically when
files relevant to their module are changed.
skip_sdk_update_check: bool. Whether to skip checking for SDK updates.
If false, uses .appcfg_nag to decide.
Yields:
psutil.Process. The dev_appserver process.
"""
dev_appserver_args = [
common.CURRENT_PYTHON_BIN,
common.DEV_APPSERVER_PATH,
'--host', host,
'--port', port,
'--admin_host', admin_host,
'--admin_port', admin_port,
'--enable_host_checking', 'true' if enable_host_checking else 'false',
'--automatic_restart', 'true' if automatic_restart else 'false',
'--skip_sdk_update_check', 'true' if skip_sdk_update_check else 'false',
'--log_level', log_level,
'--dev_appserver_log_level', log_level,
app_yaml_path
]
# OK to use shell=True here because we are not passing anything that came
# from an untrusted user, only other callers of the script, so there's no
# risk of shell-injection attacks.
with python_utils.ExitStack() as stack:
proc = stack.enter_context(managed_process(
dev_appserver_args, human_readable_name='GAE Development Server',
shell=True, env=env))
common.wait_for_port_to_be_in_use(port)
yield proc
@contextlib.contextmanager
def managed_firebase_auth_emulator(recover_users=False):
"""Returns a context manager to manage the Firebase auth emulator.
Args:
recover_users: bool. Whether to recover users created by the previous
instance of the Firebase auth emulator.
Yields:
psutil.Process. The Firebase emulator process.
"""
emulator_args = [
common.FIREBASE_PATH, 'emulators:start', '--only', 'auth',
'--project', feconf.OPPIA_PROJECT_ID,
'--config', feconf.FIREBASE_EMULATOR_CONFIG_PATH,
]
emulator_args.extend(
['--import', common.FIREBASE_EMULATOR_CACHE_DIR, '--export-on-exit']
if recover_users else
['--export-on-exit', common.FIREBASE_EMULATOR_CACHE_DIR])
# OK to use shell=True here because we are passing string literals and
# constants, so there is no risk of a shell-injection attack.
proc_context = managed_process(
emulator_args, human_readable_name='Firebase Emulator', shell=True)
with proc_context as proc:
common.wait_for_port_to_be_in_use(feconf.FIREBASE_EMULATOR_PORT)
yield proc
@contextlib.contextmanager
def managed_elasticsearch_dev_server():
"""Returns a context manager for ElasticSearch server for running tests
in development mode and running a local dev server. This is only required
in a development environment.
Yields:
psutil.Process. The ElasticSearch server process.
"""
# Clear previous data stored in the local cluster.
if os.path.exists(common.ES_PATH_DATA_DIR):
shutil.rmtree(common.ES_PATH_DATA_DIR)
# -q is the quiet flag.
es_args = ['%s/bin/elasticsearch' % common.ES_PATH, '-q']
# Override the default path to ElasticSearch config files.
es_env = {'ES_PATH_CONF': common.ES_PATH_CONFIG_DIR}
# OK to use shell=True here because we are passing string literals and
# constants, so there is no risk of a shell-injection attack.
proc_context = managed_process(
es_args, human_readable_name='ElasticSearch Server', env=es_env,
shell=True)
with proc_context as proc:
common.wait_for_port_to_be_in_use(feconf.ES_LOCALHOST_PORT)
yield proc
@contextlib.contextmanager
def managed_cloud_datastore_emulator(clear_datastore=False):
"""Returns a context manager for the Cloud Datastore emulator.
Args:
clear_datastore: bool. Whether to delete the datastore's config and data
before starting the emulator.
Yields:
psutil.Process. The emulator process.
"""
emulator_hostport = '%s:%d' % (
feconf.CLOUD_DATASTORE_EMULATOR_HOST,
feconf.CLOUD_DATASTORE_EMULATOR_PORT)
emulator_args = [
common.GCLOUD_PATH, 'beta', 'emulators', 'datastore', 'start',
'--project', feconf.OPPIA_PROJECT_ID,
'--data-dir', common.CLOUD_DATASTORE_EMULATOR_DATA_DIR,
'--host-port', emulator_hostport,
'--no-store-on-disk', '--consistency=1.0', '--quiet',
]
with python_utils.ExitStack() as stack:
data_dir_exists = os.path.exists(
common.CLOUD_DATASTORE_EMULATOR_DATA_DIR)
if clear_datastore and data_dir_exists:
# Replace it with an empty directory.
shutil.rmtree(common.CLOUD_DATASTORE_EMULATOR_DATA_DIR)
os.makedirs(common.CLOUD_DATASTORE_EMULATOR_DATA_DIR)
elif not data_dir_exists:
os.makedirs(common.CLOUD_DATASTORE_EMULATOR_DATA_DIR)
# OK to use shell=True here because we are passing string literals and
# constants, so there is no risk of a shell-injection attack.
proc = stack.enter_context(managed_process(
emulator_args, human_readable_name='Cloud Datastore Emulator',
shell=True))
common.wait_for_port_to_be_in_use(feconf.CLOUD_DATASTORE_EMULATOR_PORT)
# Environment variables required to communicate with the emulator.
stack.enter_context(common.swap_env(
'DATASTORE_DATASET', feconf.OPPIA_PROJECT_ID))
stack.enter_context(common.swap_env(
'DATASTORE_EMULATOR_HOST', emulator_hostport))
stack.enter_context(common.swap_env(
'DATASTORE_EMULATOR_HOST_PATH', '%s/datastore' % emulator_hostport))
stack.enter_context(common.swap_env(
'DATASTORE_HOST', 'http://%s' % emulator_hostport))
stack.enter_context(common.swap_env(
'DATASTORE_PROJECT_ID', feconf.OPPIA_PROJECT_ID))
stack.enter_context(common.swap_env(
'DATASTORE_USE_PROJECT_ID_AS_APP_ID', 'true'))
stack.enter_context(common.swap_env(
'GOOGLE_CLOUD_PROJECT', feconf.OPPIA_PROJECT_ID))
yield proc
@contextlib.contextmanager
def managed_redis_server():
"""Run the redis server within a context manager that ends it gracefully."""
if common.is_windows_os():
raise Exception(
'The redis command line interface is not installed because your '
'machine is on the Windows operating system. The redis server '
'cannot start.')
# Check if a redis dump file currently exists. This file contains residual
# data from a previous run of the redis server. If it exists, removes the
# dump file so that the redis server starts with a clean slate.
if os.path.exists(common.REDIS_DUMP_PATH):
os.remove(common.REDIS_DUMP_PATH)
# OK to use shell=True here because we are passing string literals and
# constants, so there is no risk of a shell-injection attack.
proc_context = managed_process(
[common.REDIS_SERVER_PATH, common.REDIS_CONF_PATH],
human_readable_name='Redis Server', shell=True)
with proc_context as proc:
common.wait_for_port_to_be_in_use(feconf.REDISPORT)
try:
yield proc
finally:
subprocess.check_call([common.REDIS_CLI_PATH, 'shutdown', 'nosave'])
def create_managed_web_browser(port):
"""Returns a context manager for a web browser targeting the given port on
localhost. If a web browser cannot be opened on the current system by Oppia,
then returns None instead.
Args:
port: int. The port number to open in the web browser.
Returns:
context manager|None. The context manager to a web browser window, or
None if the current operating system does not support web browsers.
"""
url = 'http://localhost:%s/' % port
human_readable_name = 'Web Browser'
if common.is_linux_os():
if any(re.match('.*VBOX.*', d) for d in os.listdir('/dev/disk/by-id/')):
return None
else:
return managed_process(
['xdg-open', url], human_readable_name=human_readable_name)
elif common.is_mac_os():
return managed_process(
['open', url], human_readable_name=human_readable_name)
else:
return None
@contextlib.contextmanager
def managed_webpack_compiler(
config_path=None, use_prod_env=False, use_source_maps=False,
watch_mode=False, max_old_space_size=None):
"""Returns context manager to start/stop the webpack compiler gracefully.
Args:
config_path: str|None. Path to an explicit webpack config, or None to
determine it from the other args.
use_prod_env: bool. Whether to compile for use in production. Only
respected if config_path is None.
use_source_maps: bool. Whether to compile with source maps. Only
respected if config_path is None.
watch_mode: bool. Run the compiler in watch mode, which rebuilds on file
change.
max_old_space_size: int|None. Sets the max memory size of the compiler's
"old memory" section. As memory consumption approaches the limit,
the compiler will spend more time on garbage collection in an effort
to free unused memory.
Yields:
psutil.Process. The Webpack compiler process.
"""
if config_path is not None:
pass
elif use_prod_env:
config_path = (
common.WEBPACK_PROD_SOURCE_MAPS_CONFIG if use_source_maps else
common.WEBPACK_PROD_CONFIG)
else:
config_path = (
common.WEBPACK_DEV_SOURCE_MAPS_CONFIG if use_source_maps else
common.WEBPACK_DEV_CONFIG)
compiler_args = [
common.NODE_BIN_PATH, common.WEBPACK_BIN_PATH, '--config', config_path,
]
if max_old_space_size:
# NOTE: --max-old-space-size is a flag for Node.js, not the Webpack
# compiler, so we insert it immediately after NODE_BIN_PATH.
compiler_args.insert(1, '--max-old-space-size=%d' % max_old_space_size)
if watch_mode:
compiler_args.extend(['--color', '--watch', '--progress'])
with python_utils.ExitStack() as exit_stack:
# OK to use shell=True here because we are passing string literals and
# constants, so there is no risk of a shell-injection attack.
proc = exit_stack.enter_context(managed_process(
compiler_args, human_readable_name='Webpack Compiler', shell=True,
# Capture compiler's output to detect when builds have completed.
stdout=subprocess.PIPE))
if watch_mode:
for line in iter(lambda: proc.stdout.readline() or None, None):
common.write_stdout_safe(line)
# Message printed when a compilation has succeeded. We break
# after the first one to ensure the site is ready to be visited.
if b'Built at: ' in line:
break
else:
# If none of the lines contained the string 'Built at',
# raise an error because a build hasn't finished successfully.
raise IOError('First build never completed')
def print_proc_output():
"""Prints the proc's output until it is exhausted."""
for line in iter(lambda: proc.stdout.readline() or None, None):
common.write_stdout_safe(line)
# Start a thread to print the rest of the compiler's output to stdout.
printer_thread = threading.Thread(target=print_proc_output)
printer_thread.start()
exit_stack.callback(printer_thread.join)
yield proc
@contextlib.contextmanager
def managed_portserver():
"""Returns context manager to start/stop the portserver gracefully.
The portserver listens at PORTSERVER_SOCKET_FILEPATH and allocates free
ports to clients. This prevents race conditions when two clients request
ports in quick succession. The local Google App Engine server that we use to
serve the development version of Oppia uses python_portpicker, which is
compatible with the portserver this function starts, to request ports.
By "compatible" we mean that python_portpicker requests a port by sending a
request consisting of the PID of the requesting process and expects a
response consisting of the allocated port number. This is the interface
provided by this portserver.
Yields:
psutil.Popen. The Popen subprocess object.
"""
# TODO(#11549): Move this to top of the file.
if common.PSUTIL_DIR not in sys.path:
# Our unit tests already configure sys.path correctly, but the
# standalone scripts do not. Because of this, the following line cannot
# be covered. This is fine since we want to cleanup this code anyway in
# #11549.
sys.path.insert(1, common.PSUTIL_DIR) # pragma: nocover
import psutil
# Check if a socket file exists. This file can exist when previous instance
# of the portserver did not close properly. We need to remove as otherwise
# the portserver will fail to start.
if os.path.exists(common.PORTSERVER_SOCKET_FILEPATH):
os.remove(common.PORTSERVER_SOCKET_FILEPATH)
portserver_args = [
'python', '-m', 'scripts.run_portserver',
'--portserver_unix_socket_address', common.PORTSERVER_SOCKET_FILEPATH,
]
# OK to use shell=True here because we are passing string literals and
# constants, so there is no risk of a shell-injection attack.
proc_context = managed_process(
portserver_args, human_readable_name='Portserver', shell=True)
with proc_context as proc:
try:
yield proc
finally:
# Before exiting the proc_context, try to end the process with
# SIGINT. The portserver is configured to shut down cleanly upon
# receiving this signal.
try:
proc.send_signal(signal.SIGINT)
except OSError:
# Raises when the process has already shutdown, in which case we
# can just return immediately.
return # pylint: disable=lost-exception
else:
# Otherwise, give the portserver 10 seconds to shut down after
# sending CTRL-C (SIGINT).
try:
proc.wait(timeout=10)
except psutil.TimeoutExpired:
# If the server fails to shut down, allow proc_context to
# end it by calling terminate() and/or kill().
pass
@contextlib.contextmanager
def managed_webdriver_server(chrome_version=None):
"""Returns context manager to start/stop the Webdriver server gracefully.
This context manager updates Google Chrome before starting the server.
Args:
chrome_version: str|None. The version of Google Chrome to run the tests
on. If None, then the currently-installed version of Google Chrome
is used instead.
Yields:
psutil.Process. The Webdriver process.
"""
if chrome_version is None:
# Although there are spaces between Google and Chrome in the path, we
# don't need to escape them for Popen (as opposed to on the terminal, in
# which case we would need to escape them for the command to run).
chrome_command = (
'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'
if common.is_mac_os() else 'google-chrome')
try:
output = subprocess.check_output([chrome_command, '--version'])
except OSError:
# For the error message on macOS, we need to add the backslashes in.
# This is because it is likely that a user will try to run the
# command on their terminal and, as mentioned above, the macOS
# chrome version command has spaces in the path which need to be
# escaped for successful terminal use.
raise Exception(
'Failed to execute "%s --version" command. This is used to '
'determine the chromedriver version to use. Please set the '
'chromedriver version manually using --chrome_driver_version '
'flag. To determine the chromedriver version to be used, '
'please follow the instructions mentioned in the following '
'URL:\n'
'https://chromedriver.chromium.org/downloads/version-selection'
% chrome_command.replace(' ', r'\ '))
installed_version_parts = b''.join(re.findall(rb'[0-9.]', output))
installed_version = '.'.join(
installed_version_parts.decode('utf-8').split('.')[:-1])
response = python_utils.url_open(
'https://chromedriver.storage.googleapis.com/LATEST_RELEASE_%s' % (
installed_version))
chrome_version = response.read().decode('utf-8')
python_utils.PRINT('\n\nCHROME VERSION: %s' % chrome_version)
subprocess.check_call([
common.NODE_BIN_PATH, common.WEBDRIVER_MANAGER_BIN_PATH, 'update',
'--versions.chrome', chrome_version,
])
with python_utils.ExitStack() as exit_stack:
if common.is_windows_os():
# NOTE: webdriver-manager (version 13.0.0) uses `os.arch()` to
# determine the architecture of the operating system, however, this
# function can only be used to determine the architecture of the
# machine that compiled `node`. In the case of Windows, we are using
# the portable version, which was compiled on `ia32` machine so that
# is the value returned by this `os.arch` function. Unfortunately,
# webdriver-manager seems to assume that Windows wouldn't run on the
# ia32 architecture, so its help function used to determine download
# link returns null for this, which means that the application has
# no idea about where to download the correct version.
#
# https://github.com/angular/webdriver-manager/blob/b7539a5a3897a8a76abae7245f0de8175718b142/lib/provider/chromedriver.ts#L16
# https://github.com/angular/webdriver-manager/blob/b7539a5a3897a8a76abae7245f0de8175718b142/lib/provider/geckodriver.ts#L21
# https://github.com/angular/webdriver-manager/blob/b7539a5a3897a8a76abae7245f0de8175718b142/lib/provider/chromedriver.ts#L167
# https://github.com/nodejs/node/issues/17036
regex_pattern = re.escape('this.osArch = os.arch();')
arch = 'x64' if common.is_x64_architecture() else 'x86'
replacement_string = 'this.osArch = "%s";' % arch
exit_stack.enter_context(common.inplace_replace_file_context(
common.CHROME_PROVIDER_FILE_PATH, regex_pattern,
replacement_string))
exit_stack.enter_context(common.inplace_replace_file_context(
common.GECKO_PROVIDER_FILE_PATH, regex_pattern,
replacement_string))
# OK to use shell=True here because we are passing string literals and
# constants, so there is no risk of a shell-injection attack.
proc = exit_stack.enter_context(managed_process([
common.NODE_BIN_PATH, common.WEBDRIVER_MANAGER_BIN_PATH, 'start',
'--versions.chrome', chrome_version, '--quiet', '--standalone',
], human_readable_name='Webdriver manager', shell=True))
common.wait_for_port_to_be_in_use(4444)
yield proc
@contextlib.contextmanager
def managed_protractor_server(
suite_name='full', dev_mode=True, debug_mode=False,
sharding_instances=1, **kwargs):
"""Returns context manager to start/stop the Protractor server gracefully.
Args:
suite_name: str. The suite name whose tests should be run. If the value
is `full`, all tests will run.
dev_mode: bool. Whether the test is running on dev_mode.
debug_mode: bool. Whether to run the protractor tests in debugging mode.
Read the following instructions to learn how to run e2e tests in
debugging mode:
https://www.protractortest.org/#/debugging#disabled-control-flow.
sharding_instances: int. How many sharding instances to be running.
**kwargs: dict(str: *). Keyword arguments passed to psutil.Popen.
Yields:
psutil.Process. The protractor process.
"""
if sharding_instances <= 0:
raise ValueError('Sharding instance should be larger than 0')
protractor_args = [
common.NODE_BIN_PATH,
# This flag ensures tests fail if the `waitFor()` calls time out.
'--unhandled-rejections=strict',
common.PROTRACTOR_BIN_PATH, common.PROTRACTOR_CONFIG_FILE_PATH,
'--params.devMode=%s' % dev_mode,
'--suite', suite_name,
]
if debug_mode:
# NOTE: This is a flag for Node.js, not Protractor, so we insert it
# immediately after NODE_BIN_PATH.
protractor_args.insert(1, '--inspect-brk')
if sharding_instances > 1:
protractor_args.extend([
'--capabilities.shardTestFiles=True',
'--capabilities.maxInstances=%d' % sharding_instances,
])
# OK to use shell=True here because we are passing string literals and
# constants, so there is no risk of a shell-injection attack.
managed_protractor_proc = managed_process(
protractor_args, human_readable_name='Protractor Server', shell=True,
**kwargs)
with managed_protractor_proc as proc:
yield proc
|
extract.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
" Extract 10000 episodes from SC2 "
__author__ = "Ruo-Ze Liu"
debug = True
USED_DEVICES = "-1"
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = USED_DEVICES
import sys
import threading
import time
import tensorflow as tf
from absl import app
from absl import flags
from pysc2 import maps
from pysc2.lib import stopwatch
import lib.config as C
import param as P
import mini_source_agent as mini_source_agent
# from pysc2.env import sc2_env
from lib import my_sc2_env as sc2_env
from lib.replay_buffer import Buffer
from mini_network import MiniNetwork
from strategy.terran_agent import DummyTerran
from strategy_env import SimulatePlatform
import unit.protoss_unit as P
import unit.terran_unit as T
from datetime import datetime
import multiprocessing as mp
import numpy as np
from logging import warning as logging
FLAGS = flags.FLAGS
flags.DEFINE_bool("training", False, "Whether to train agents.")
flags.DEFINE_bool("on_server", True, "Whether is running on server.")
flags.DEFINE_integer("num_for_update", 100, "Number of episodes for each train.")
flags.DEFINE_string("log_path", "./logs/", "Path for log.")
flags.DEFINE_string("device", USED_DEVICES, "Device for training.")
# Simple64
flags.DEFINE_string("map", "Simple64", "Name of a map to use.")
flags.DEFINE_bool("render", False, "Whether to render with pygame.")
flags.DEFINE_integer("screen_resolution", 64, "Resolution for screen feature layers.")
flags.DEFINE_integer("minimap_resolution", 64, "Resolution for minimap feature layers.")
flags.DEFINE_enum("agent_race", "P", sc2_env.races.keys(), "Agent's race.")
flags.DEFINE_enum("bot_race", "T", sc2_env.races.keys(), "Bot's race.")
flags.DEFINE_enum("difficulty", "7", sc2_env.difficulties.keys(), "Bot's strength.")
flags.DEFINE_integer("max_agent_steps", 18000 * 2, "Total agent steps.")
flags.DEFINE_integer("step_mul", 8, "Game steps per agent step.")
flags.DEFINE_bool("profile", False, "Whether to turn on code profiling.")
flags.DEFINE_bool("trace", False, "Whether to trace the code execution.")
flags.DEFINE_bool("save_replay", False, "Whether to replays_save a replay at the end.")
flags.DEFINE_string("replay_dir", "multi-agent/", "dir of replay to replays_save.")
flags.DEFINE_string("restore_model_path", "./model/20190122-215114_source/", "path for restore model")
flags.DEFINE_bool("restore_model", False, "Whether to restore old model")
flags.DEFINE_integer("parallel", 10, "How many processes to run in parallel.")
flags.DEFINE_integer("thread_num", 5, "How many thread to run in the process.")
flags.DEFINE_integer("port_num", 4370, "the start port to create distribute tf")
flags.DEFINE_integer("max_iters", 100, "the rl agent max run iters")
flags.DEFINE_string("game_version", None, "game version of SC2")
FLAGS(sys.argv)
# set the play map
play_map = C.get_map_class('lib.config.' + FLAGS.map)
C.my_sub_pos = play_map.my_sub_pos
C.enemy_sub_pos = play_map.enemy_sub_pos
C.enemy_main_pos = play_map.enemy_main_pos
C.base_camera_pos = play_map.base_camera_pos
if not FLAGS.on_server:
PARALLEL = 1
THREAD_NUM = 1
MAX_AGENT_STEPS = 18000
DEVICE = ['/gpu:0']
NUM_FOR_UPDATE = 2
TRAIN_ITERS = 1
PORT_NUM = FLAGS.port_num
else:
PARALLEL = FLAGS.parallel
THREAD_NUM = FLAGS.thread_num
MAX_AGENT_STEPS = FLAGS.max_agent_steps
#DEVICE = ['/gpu:' + dev for dev in FLAGS.device.split(',')]
DEVICE = ['/cpu:0']
NUM_FOR_UPDATE = FLAGS.num_for_update
TRAIN_ITERS = FLAGS.max_iters
PORT_NUM = FLAGS.port_num
LOG = FLAGS.log_path
if not os.path.exists(LOG):
os.makedirs(LOG)
SERVER_DICT = {"worker": [], "ps": []}
# define some global variable
UPDATE_EVENT, ROLLING_EVENT = threading.Event(), threading.Event()
Counter = 0
Waiting_Counter = 0
Update_Counter = 0
Result_List = []
def run_thread(agent, game_num, Synchronizer, difficulty):
global UPDATE_EVENT, ROLLING_EVENT, Counter, Waiting_Counter, Update_Counter, Result_List
num = 0
all_num = 0
proc_name = mp.current_process().name
C._FPS = 22.4 / FLAGS.step_mul # 5.6
step_mul = FLAGS.step_mul # 4
C.difficulty = difficulty
with sc2_env.SC2Env(
map_name=FLAGS.map,
agent_race=FLAGS.agent_race,
bot_race=FLAGS.bot_race,
difficulty=difficulty,
step_mul=step_mul,
score_index=-1,
game_steps_per_episode=MAX_AGENT_STEPS,
screen_size_px=(FLAGS.screen_resolution, FLAGS.screen_resolution),
minimap_size_px=(FLAGS.minimap_resolution, FLAGS.minimap_resolution),
visualize=False,
game_version=FLAGS.game_version) as env:
# env = available_actions_printer.AvailableActionsPrinter(env)
agent.set_env(env)
while all_num != game_num * TRAIN_ITERS:
agent.play()
if FLAGS.training:
# check if the num of episodes is enough to update
num += 1
all_num += 1
reward = agent.result['reward']
Counter += 1
Result_List.append(reward)
logging("(diff: %d) %d epoch: %s get %d/%d episodes! return: %d!" %
(int(difficulty), Update_Counter, proc_name, len(Result_List), game_num * THREAD_NUM, reward))
# time for update
if num == game_num:
num = 0
ROLLING_EVENT.clear()
# worker stops rolling, wait for update
if agent.index != 0 and THREAD_NUM > 1:
Waiting_Counter += 1
if Waiting_Counter == THREAD_NUM - 1: # wait for all the workers stop
UPDATE_EVENT.set()
ROLLING_EVENT.wait()
# update!
else:
if THREAD_NUM > 1:
UPDATE_EVENT.wait()
Synchronizer.wait() # wait for other processes to update
agent.update_network(Result_List)
Result_List.clear()
agent.global_buffer.reset()
Synchronizer.wait()
Update_Counter += 1
# finish update
UPDATE_EVENT.clear()
Waiting_Counter = 0
ROLLING_EVENT.set()
if FLAGS.save_replay:
env.save_replay(FLAGS.replay_dir)
agent.reset()
def Worker(index, update_game_num, Synchronizer, cluster, model_path):
config = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False,
)
config.gpu_options.allow_growth = True
worker = tf.train.Server(cluster, job_name="worker", task_index=index, config=config)
sess = tf.Session(target=worker.target, config=config)
Net = MiniNetwork(sess=sess, summary_writer=None, rl_training=FLAGS.training,
cluster=cluster, index=index, device=DEVICE[index % len(DEVICE)],
ppo_load_path=FLAGS.restore_model_path, ppo_save_path=model_path)
global_buffer = Buffer()
agents = []
for i in range(THREAD_NUM):
agent = mini_source_agent.MiniSourceAgent(index=i, global_buffer=global_buffer, net=Net,
restore_model=FLAGS.restore_model, rl_training=FLAGS.training,
strategy_agent=None)
agents.append(agent)
print("Worker %d: waiting for cluster connection..." % index)
sess.run(tf.report_uninitialized_variables())
print("Worker %d: cluster ready!" % index)
while len(sess.run(tf.report_uninitialized_variables())):
print("Worker %d: waiting for variable initialization..." % index)
time.sleep(1)
print("Worker %d: variables initialized" % index)
game_num = np.ceil(update_game_num // THREAD_NUM)
UPDATE_EVENT.clear()
ROLLING_EVENT.set()
# Run threads
threads = []
for i in range(THREAD_NUM - 1):
t = threading.Thread(target=run_thread, args=(agents[i], game_num, Synchronizer, FLAGS.difficulty))
threads.append(t)
t.daemon = True
t.start()
time.sleep(3)
run_thread(agents[-1], game_num, Synchronizer, FLAGS.difficulty)
for t in threads:
t.join()
def Parameter_Server(Synchronizer, cluster, log_path, model_path, procs):
config = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False,
)
config.gpu_options.allow_growth = True
server = tf.train.Server(cluster, job_name="ps", task_index=0, config=config)
sess = tf.Session(target=server.target, config=config)
summary_writer = tf.summary.FileWriter(log_path)
Net = MiniNetwork(sess=sess, summary_writer=summary_writer, rl_training=FLAGS.training,
cluster=cluster, index=0, device=DEVICE[0 % len(DEVICE)],
ppo_load_path=FLAGS.restore_model_path, ppo_save_path=model_path)
agent = mini_source_agent.MiniSourceAgent(index=-1, net=Net, restore_model=FLAGS.restore_model, rl_training=FLAGS.training)
print("Parameter server: waiting for cluster connection...")
sess.run(tf.report_uninitialized_variables())
print("Parameter server: cluster ready!")
print("Parameter server: initializing variables...")
agent.init_network()
print("Parameter server: variables initialized")
update_counter = 0
max_win_rate = 0.
while update_counter < TRAIN_ITERS:
agent.reset_old_network()
# wait for update
Synchronizer.wait()
logging("Update Network!")
# TODO count the time , compare cpu and gpu
time.sleep(1)
# update finish
Synchronizer.wait()
logging("Update Network finished!")
steps, win_rate = agent.update_summary(update_counter)
logging("Steps: %d, win rate: %f" % (steps, win_rate))
update_counter += 1
if win_rate >= max_win_rate:
agent.save_model()
max_win_rate = win_rate
return max_win_rate
def _main(unused_argv):
# create distribute tf cluster
start_port = PORT_NUM
SERVER_DICT["ps"].append("localhost:%d" % start_port)
for i in range(PARALLEL):
SERVER_DICT["worker"].append("localhost:%d" % (start_port + 1 + i))
Cluster = tf.train.ClusterSpec(SERVER_DICT)
now = datetime.now()
model_path = "./model/" + now.strftime("%Y%m%d-%H%M%S") + "_source/"
if not os.path.exists(model_path):
os.makedirs(model_path)
log_path = "./logs/" + now.strftime("%Y%m%d-%H%M%S") + "_source/"
UPDATE_GAME_NUM = NUM_FOR_UPDATE
per_update_num = np.ceil(UPDATE_GAME_NUM / PARALLEL)
Synchronizer = mp.Barrier(PARALLEL + 1)
# Run parallel process
procs = []
for index in range(PARALLEL):
p = mp.Process(name="Worker_%d" % index, target=Worker, args=(index, per_update_num, Synchronizer, Cluster, model_path))
procs.append(p)
p.daemon = True
p.start()
time.sleep(1)
win_rate = Parameter_Server(Synchronizer, Cluster, log_path, model_path, procs)
print('#######################')
print('Final Win_rate:', win_rate)
print('#######################')
for p in procs:
p.join()
'''
if FLAGS.profile:
print(stopwatch.sw)
'''
if __name__ == "__main__":
app.run(_main)
|
SimulatedDevice.py
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
import random
import time
import threading
# Using the Python Device SDK for IoT Hub:
# https://github.com/Azure/azure-iot-sdk-python
# The sample connects to a device-specific MQTT endpoint on your IoT Hub.
from azure.iot.device import IoTHubDeviceClient, Message
# The device connection string to authenticate the device with your IoT hub.
# Using the Azure CLI:
# az iot hub device-identity show-connection-string --hub-name {YourIoTHubName} --device-id MyNodeDevice --output table
# CONNECTION_STRING = "HostName=hub-test1.azure-devices.net;DeviceId=simulate1;SharedAccessKey=FmjOA1axWH37RaQ24Qx9qcakk/pXuxmGhoELlZElv40="
CONNECTION_STRING = "HostName=thingspro-IoTHub-newTwin.azure-devices.net;DeviceId=andrew-simulate2;SharedAccessKey=vNBsge34JTrkFKndzqhI7OlotbfhgDcHoWqBm8F70VY="
# Define the JSON message to send to IoT Hub.
TEMPERATURE = 20.0
HUMIDITY = 60
MSG_TXT = '{{"temperature": {temperature},"humidity": {humidity}}}'
NAME = "simulate1"
def iothub_client_init():
# Create an IoT Hub client
client = IoTHubDeviceClient.create_from_connection_string(CONNECTION_STRING)
return client
def twin_update_listener(client):
global NAME
while True:
patch = client.receive_twin_desired_properties_patch() # blocking call
print("Twin patch received:")
NAME=patch["Name"]
print("name level is set to",NAME)
reported_patch = {"Name":NAME}
client.patch_twin_reported_properties(reported_patch)
def iothub_client_telemetry_sample_run():
try:
client = iothub_client_init()
print ( "IoT Hub device sending periodic messages, press Ctrl-C to exit" )
twin_update_thread = threading.Thread(target=twin_update_listener, args=(client,))
twin_update_thread.daemon = True
twin_update_thread.start()
while True:
# Build the message with simulated telemetry values.
temperature = TEMPERATURE + (random.random() * 15)
humidity = HUMIDITY + (random.random() * 20)
msg_txt_formatted = MSG_TXT.format(temperature=temperature, humidity=humidity)
message = Message(msg_txt_formatted)
# Add a custom application property to the message.
# An IoT hub can filter on these properties without access to the message body.
if temperature > 30:
message.custom_properties["temperatureAlert"] = "true"
else:
message.custom_properties["temperatureAlert"] = "false"
# Send the message.
print( "Sending message: {}".format(message) )
client.send_message(message)
print ( "Message successfully sent" )
time.sleep(1)
except KeyboardInterrupt:
print ( "IoTHubClient sample stopped" )
if __name__ == '__main__':
print ( "IoT Hub Quickstart #1 - Simulated device" )
print ( "Press Ctrl-C to exit" )
iothub_client_telemetry_sample_run()
|
proxy_test.py
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Tests for proxy app.
import threading
import logging
import BaseHTTPServer
import StringIO
from nose.tools import assert_true, assert_false
from django.test.client import Client
from desktop.lib.django_test_util import make_logged_in_client
from proxy.views import _rewrite_links
import proxy.conf
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
"""
To avoid mocking out urllib, we setup a web server
that does very little, and test proxying against it.
"""
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/html; charset=utf8")
self.end_headers()
self.wfile.write("Hello there.")
self.wfile.write("You requested: " + self.path + ".")
self.wfile.write("Image: <img src='/foo.jpg'>")
self.wfile.write("Link: <a href='/baz?with=parameter'>link</a>")
def do_POST(self):
self.send_response(200)
self.send_header("Content-type", "text/html; charset=utf8")
self.end_headers()
self.wfile.write("Hello there.")
self.wfile.write("You requested: " + self.path + ".")
# Somehow in this architecture read() blocks, so we read the exact
# number of bytes the test sends.
self.wfile.write("Data: " + self.rfile.read(16))
def log_message(self, fmt, *args):
logging.debug("%s - - [%s] %s" %
(self.address_string(),
self.log_date_time_string(),
fmt % args))
def run_test_server():
"""
Returns the server, and a method to close it out.
"""
# We need to proxy a server, so we go ahead and create one.
httpd = BaseHTTPServer.HTTPServer(("127.0.0.1", 0), Handler)
# Spawn a thread that serves exactly one request.
thread = threading.Thread(target=httpd.handle_request)
thread.daemon = True
thread.start()
def finish():
# Make sure the server thread is done.
print "Closing thread " + str(thread)
thread.join(10.0) # Wait at most 10 seconds
assert_false(thread.isAlive())
return httpd, finish
run_test_server.__test__ = False
def test_proxy_get():
"""
Proxying test.
"""
client = Client()
# All apps require login.
client.login(username="test", password="test")
httpd, finish = run_test_server()
try:
# Test the proxying
finish_conf = proxy.conf.WHITELIST.set_for_testing(r"127\.0\.0\.1:\d*")
try:
response_get = client.get('/proxy/127.0.0.1/%s/' % httpd.server_port, dict(foo="bar"))
finally:
finish_conf()
assert_true("Hello there" in response_get.content)
assert_true("You requested: /?foo=bar." in response_get.content)
assert_true("/proxy/127.0.0.1/%s/foo.jpg" % httpd.server_port in response_get.content)
assert_true("/proxy/127.0.0.1/%s/baz?with=parameter" % httpd.server_port in response_get.content)
finally:
finish()
def test_proxy_post():
"""
Proxying test, using POST.
"""
client = Client()
# All apps require login.
client.login(username="test", password="test")
httpd, finish = run_test_server()
try:
# Test the proxying
finish_conf = proxy.conf.WHITELIST.set_for_testing(r"127\.0\.0\.1:\d*")
try:
response_post = client.post('/proxy/127.0.0.1/%s/' % httpd.server_port, dict(foo="bar", foo2="bar"))
finally:
finish_conf()
assert_true("Hello there" in response_post.content)
assert_true("You requested: /." in response_post.content)
assert_true("foo=bar" in response_post.content)
assert_true("foo2=bar" in response_post.content)
finally:
finish()
def test_blacklist():
client = make_logged_in_client('test')
finish_confs = [
proxy.conf.WHITELIST.set_for_testing(r"localhost:\d*"),
proxy.conf.BLACKLIST.set_for_testing(r"localhost:\d*/(foo|bar)/fred/"),
]
try:
# Request 1: Hit the blacklist
resp = client.get('/proxy/localhost/1234//foo//fred/')
assert_true("is blocked" in resp.content)
# Request 2: This is not a match
httpd, finish = run_test_server()
try:
resp = client.get('/proxy/localhost/%s//foo//fred_ok' % (httpd.server_port,))
assert_true("Hello there" in resp.content)
finally:
finish()
finally:
for fin in finish_confs:
fin()
class UrlLibFileWrapper(StringIO.StringIO):
"""
urllib2.urlopen returns a file-like object; we fake it here.
"""
def __init__(self, buf, url):
StringIO.StringIO.__init__(self, buf)
self.url = url
def geturl(self):
"""URL we were initialized with."""
return self.url
def test_rewriting():
"""
Tests that simple re-writing is working.
"""
html = "<a href='foo'>bar</a><a href='http://alpha.com'>baz</a>"
assert_true('<a href="/proxy/abc.com/80/sub/foo">bar</a>' in _rewrite_links(UrlLibFileWrapper(html, "http://abc.com/sub/")),
msg="Relative links")
assert_true('<a href="/proxy/alpha.com/80/">baz</a>' in _rewrite_links(UrlLibFileWrapper(html, "http://abc.com/sub/")),
msg="Absolute links")
# Test url with port and invalid port
html = "<a href='http://alpha.com:1234/bar'>bar</a><a href='http://alpha.com:-1/baz'>baz</a>"
assert_true('<a href="/proxy/alpha.com/1234/bar">bar</a><a>baz</a>' in
_rewrite_links(UrlLibFileWrapper(html, "http://abc.com/sub/")),
msg="URL with invalid port")
html = """
<img src="/static/hadoop-logo.jpg"/><br>
"""
rewritten = _rewrite_links(UrlLibFileWrapper(html, "http://abc.com/sub/"))
assert_true('<img src="/proxy/abc.com/80/static/hadoop-logo.jpg">' in
rewritten,
msg="Rewrite images")
|
test_postgresql.py
|
import mock # for the mock.call method, importing it without a namespace breaks python3
import os
import psutil
import psycopg2
import re
import subprocess
import time
from mock import Mock, MagicMock, PropertyMock, patch, mock_open
from patroni.async_executor import CriticalTask
from patroni.dcs import Cluster, ClusterConfig, Member, RemoteMember, SyncState
from patroni.exceptions import PostgresConnectionException, PatroniException
from patroni.postgresql import Postgresql, STATE_REJECT, STATE_NO_RESPONSE
from patroni.postgresql.postmaster import PostmasterProcess
from patroni.postgresql.slots import SlotsHandler
from patroni.utils import RetryFailedError
from six.moves import builtins
from threading import Thread, current_thread
from . import BaseTestPostgresql, MockCursor, MockPostmaster, psycopg2_connect
mtime_ret = {}
def mock_mtime(filename):
if filename not in mtime_ret:
mtime_ret[filename] = time.time()
else:
mtime_ret[filename] += 1
return mtime_ret[filename]
def pg_controldata_string(*args, **kwargs):
return b"""
pg_control version number: 942
Catalog version number: 201509161
Database system identifier: 6200971513092291716
Database cluster state: shut down in recovery
pg_control last modified: Fri Oct 2 10:57:06 2015
Latest checkpoint location: 0/30000C8
Prior checkpoint location: 0/2000060
Latest checkpoint's REDO location: 0/3000090
Latest checkpoint's REDO WAL file: 000000020000000000000003
Latest checkpoint's TimeLineID: 2
Latest checkpoint's PrevTimeLineID: 2
Latest checkpoint's full_page_writes: on
Latest checkpoint's NextXID: 0/943
Latest checkpoint's NextOID: 24576
Latest checkpoint's NextMultiXactId: 1
Latest checkpoint's NextMultiOffset: 0
Latest checkpoint's oldestXID: 931
Latest checkpoint's oldestXID's DB: 1
Latest checkpoint's oldestActiveXID: 943
Latest checkpoint's oldestMultiXid: 1
Latest checkpoint's oldestMulti's DB: 1
Latest checkpoint's oldestCommitTs: 0
Latest checkpoint's newestCommitTs: 0
Time of latest checkpoint: Fri Oct 2 10:56:54 2015
Fake LSN counter for unlogged rels: 0/1
Minimum recovery ending location: 0/30241F8
Min recovery ending loc's timeline: 2
Backup start location: 0/0
Backup end location: 0/0
End-of-backup record required: no
wal_level setting: hot_standby
Current wal_log_hints setting: on
Current max_connections setting: 100
Current max_worker_processes setting: 8
Current max_prepared_xacts setting: 0
Current max_locks_per_xact setting: 64
Current track_commit_timestamp setting: off
Maximum data alignment: 8
Database block size: 8192
Blocks per segment of large relation: 131072
WAL block size: 8192
Bytes per WAL segment: 16777216
Maximum length of identifiers: 64
Maximum columns in an index: 32
Maximum size of a TOAST chunk: 1996
Size of a large-object chunk: 2048
Date/time type storage: 64-bit integers
Float4 argument passing: by value
Float8 argument passing: by value
Data page checksum version: 0
"""
@patch('subprocess.call', Mock(return_value=0))
@patch('psycopg2.connect', psycopg2_connect)
class TestPostgresql(BaseTestPostgresql):
@patch('subprocess.call', Mock(return_value=0))
@patch('os.rename', Mock())
@patch('patroni.postgresql.CallbackExecutor', Mock())
@patch.object(Postgresql, 'get_major_version', Mock(return_value=130000))
@patch.object(Postgresql, 'is_running', Mock(return_value=True))
def setUp(self):
super(TestPostgresql, self).setUp()
self.p.config.write_postgresql_conf()
@patch('subprocess.Popen')
@patch.object(Postgresql, 'wait_for_startup')
@patch.object(Postgresql, 'wait_for_port_open')
@patch.object(Postgresql, 'is_running')
@patch.object(Postgresql, 'controldata', Mock())
def test_start(self, mock_is_running, mock_wait_for_port_open, mock_wait_for_startup, mock_popen):
mock_is_running.return_value = MockPostmaster()
mock_wait_for_port_open.return_value = True
mock_wait_for_startup.return_value = False
mock_popen.return_value.stdout.readline.return_value = '123'
self.assertTrue(self.p.start())
mock_is_running.return_value = None
mock_postmaster = MockPostmaster()
with patch.object(PostmasterProcess, 'start', return_value=mock_postmaster):
pg_conf = os.path.join(self.p.data_dir, 'postgresql.conf')
open(pg_conf, 'w').close()
self.assertFalse(self.p.start(task=CriticalTask()))
with open(pg_conf) as f:
lines = f.readlines()
self.assertTrue("f.oo = 'bar'\n" in lines)
mock_wait_for_startup.return_value = None
self.assertFalse(self.p.start(10))
self.assertIsNone(self.p.start())
mock_wait_for_port_open.return_value = False
self.assertFalse(self.p.start())
task = CriticalTask()
task.cancel()
self.assertFalse(self.p.start(task=task))
self.p.cancellable.cancel()
self.assertFalse(self.p.start())
with patch('patroni.postgresql.config.ConfigHandler.effective_configuration',
PropertyMock(side_effect=Exception)):
self.assertIsNone(self.p.start())
@patch.object(Postgresql, 'pg_isready')
@patch('patroni.postgresql.polling_loop', Mock(return_value=range(1)))
def test_wait_for_port_open(self, mock_pg_isready):
mock_pg_isready.return_value = STATE_NO_RESPONSE
mock_postmaster = MockPostmaster(is_running=False)
# No pid file and postmaster death
self.assertFalse(self.p.wait_for_port_open(mock_postmaster, 1))
mock_postmaster.is_running.return_value = True
# timeout
self.assertFalse(self.p.wait_for_port_open(mock_postmaster, 1))
# pg_isready failure
mock_pg_isready.return_value = 'garbage'
self.assertTrue(self.p.wait_for_port_open(mock_postmaster, 1))
# cancelled
self.p.cancellable.cancel()
self.assertFalse(self.p.wait_for_port_open(mock_postmaster, 1))
@patch('time.sleep', Mock())
@patch.object(Postgresql, 'is_running')
@patch.object(Postgresql, '_wait_for_connection_close', Mock())
def test_stop(self, mock_is_running):
# Postmaster is not running
mock_callback = Mock()
mock_is_running.return_value = None
self.assertTrue(self.p.stop(on_safepoint=mock_callback))
mock_callback.assert_called()
# Is running, stopped successfully
mock_is_running.return_value = mock_postmaster = MockPostmaster()
mock_callback.reset_mock()
self.assertTrue(self.p.stop(on_safepoint=mock_callback))
mock_callback.assert_called()
mock_postmaster.signal_stop.assert_called()
# Timed out waiting for fast shutdown triggers immediate shutdown
mock_postmaster.wait.side_effect = [psutil.TimeoutExpired(30), psutil.TimeoutExpired(30), Mock()]
mock_callback.reset_mock()
self.assertTrue(self.p.stop(on_safepoint=mock_callback, stop_timeout=30))
mock_callback.assert_called()
mock_postmaster.signal_stop.assert_called()
# Immediate shutdown succeeded
mock_postmaster.wait.side_effect = [psutil.TimeoutExpired(30), Mock()]
self.assertTrue(self.p.stop(on_safepoint=mock_callback, stop_timeout=30))
# Stop signal failed
mock_postmaster.signal_stop.return_value = False
self.assertFalse(self.p.stop())
# Stop signal failed to find process
mock_postmaster.signal_stop.return_value = True
mock_callback.reset_mock()
self.assertTrue(self.p.stop(on_safepoint=mock_callback))
mock_callback.assert_called()
# Fast shutdown is timed out but when immediate postmaster is already gone
mock_postmaster.wait.side_effect = [psutil.TimeoutExpired(30), Mock()]
mock_postmaster.signal_stop.side_effect = [None, True]
self.assertTrue(self.p.stop(on_safepoint=mock_callback, stop_timeout=30))
def test_restart(self):
self.p.start = Mock(return_value=False)
self.assertFalse(self.p.restart())
self.assertEqual(self.p.state, 'restart failed (restarting)')
@patch('os.chmod', Mock())
@patch.object(builtins, 'open', MagicMock())
def test_write_pgpass(self):
self.p.config.write_pgpass({'host': 'localhost', 'port': '5432', 'user': 'foo'})
self.p.config.write_pgpass({'host': 'localhost', 'port': '5432', 'user': 'foo', 'password': 'bar'})
def test_checkpoint(self):
with patch.object(MockCursor, 'fetchone', Mock(return_value=(True, ))):
self.assertEqual(self.p.checkpoint({'user': 'postgres'}), 'is_in_recovery=true')
with patch.object(MockCursor, 'execute', Mock(return_value=None)):
self.assertIsNone(self.p.checkpoint())
self.assertEqual(self.p.checkpoint(timeout=10), 'not accessible or not healty')
@patch('patroni.postgresql.config.mtime', mock_mtime)
@patch('patroni.postgresql.config.ConfigHandler._get_pg_settings')
def test_check_recovery_conf(self, mock_get_pg_settings):
mock_get_pg_settings.return_value = {
'primary_conninfo': ['primary_conninfo', 'foo=', None, 'string', 'postmaster', self.p.config._auto_conf],
'recovery_min_apply_delay': ['recovery_min_apply_delay', '0', 'ms', 'integer', 'sighup', 'foo']
}
self.assertEqual(self.p.config.check_recovery_conf(None), (True, True))
self.p.config.write_recovery_conf({'standby_mode': 'on'})
self.assertEqual(self.p.config.check_recovery_conf(None), (True, True))
mock_get_pg_settings.return_value['primary_conninfo'][1] = ''
mock_get_pg_settings.return_value['recovery_min_apply_delay'][1] = '1'
self.assertEqual(self.p.config.check_recovery_conf(None), (False, False))
mock_get_pg_settings.return_value['recovery_min_apply_delay'][5] = self.p.config._auto_conf
self.assertEqual(self.p.config.check_recovery_conf(None), (True, False))
mock_get_pg_settings.return_value['recovery_min_apply_delay'][1] = '0'
self.assertEqual(self.p.config.check_recovery_conf(None), (False, False))
conninfo = {'host': '1', 'password': 'bar'}
with patch('patroni.postgresql.config.ConfigHandler.primary_conninfo_params', Mock(return_value=conninfo)):
mock_get_pg_settings.return_value['recovery_min_apply_delay'][1] = '1'
self.assertEqual(self.p.config.check_recovery_conf(None), (True, True))
mock_get_pg_settings.return_value['primary_conninfo'][1] = 'host=1 passfile='\
+ re.sub(r'([\'\\ ])', r'\\\1', self.p.config._pgpass)
mock_get_pg_settings.return_value['recovery_min_apply_delay'][1] = '0'
self.assertEqual(self.p.config.check_recovery_conf(None), (True, True))
self.p.config.write_recovery_conf({'standby_mode': 'on', 'primary_conninfo': conninfo.copy()})
self.p.config.write_postgresql_conf()
self.assertEqual(self.p.config.check_recovery_conf(None), (False, False))
with patch.object(Postgresql, 'primary_conninfo', Mock(return_value='host=1')):
mock_get_pg_settings.return_value['primary_slot_name'] = [
'primary_slot_name', '', '', 'string', 'postmaster', self.p.config._postgresql_conf]
self.assertEqual(self.p.config.check_recovery_conf(None), (True, True))
@patch.object(Postgresql, 'major_version', PropertyMock(return_value=120000))
@patch.object(Postgresql, 'is_running', MockPostmaster)
@patch.object(MockPostmaster, 'create_time', Mock(return_value=1234567), create=True)
@patch('patroni.postgresql.config.ConfigHandler._get_pg_settings')
def test__read_recovery_params(self, mock_get_pg_settings):
mock_get_pg_settings.return_value = {'primary_conninfo': ['primary_conninfo', '', None, 'string',
'postmaster', self.p.config._postgresql_conf]}
self.p.config.write_recovery_conf({'standby_mode': 'on', 'primary_conninfo': {'password': 'foo'}})
self.p.config.write_postgresql_conf()
self.assertEqual(self.p.config.check_recovery_conf(None), (False, False))
self.assertEqual(self.p.config.check_recovery_conf(None), (False, False))
mock_get_pg_settings.side_effect = Exception
with patch('patroni.postgresql.config.mtime', mock_mtime):
self.assertEqual(self.p.config.check_recovery_conf(None), (True, True))
@patch.object(Postgresql, 'major_version', PropertyMock(return_value=100000))
@patch.object(Postgresql, 'primary_conninfo', Mock(return_value='host=1'))
def test__read_recovery_params_pre_v12(self):
self.p.config.write_recovery_conf({'standby_mode': 'off', 'primary_conninfo': {'password': 'foo'}})
self.assertEqual(self.p.config.check_recovery_conf(None), (True, True))
self.assertEqual(self.p.config.check_recovery_conf(None), (True, True))
self.p.config.write_recovery_conf({'restore_command': '\n'})
with patch('patroni.postgresql.config.mtime', mock_mtime):
self.assertEqual(self.p.config.check_recovery_conf(None), (True, True))
def test_write_postgresql_and_sanitize_auto_conf(self):
read_data = 'primary_conninfo = foo\nfoo = bar\n'
with open(os.path.join(self.p.data_dir, 'postgresql.auto.conf'), 'w') as f:
f.write(read_data)
mock_read_auto = mock_open(read_data=read_data)
mock_read_auto.return_value.__iter__ = lambda o: iter(o.readline, '')
with patch.object(builtins, 'open', Mock(side_effect=[mock_open()(), mock_read_auto(), IOError])),\
patch('os.chmod', Mock()):
self.p.config.write_postgresql_conf()
with patch.object(builtins, 'open', Mock(side_effect=[mock_open()(), IOError])), patch('os.chmod', Mock()):
self.p.config.write_postgresql_conf()
self.p.config.write_recovery_conf({'foo': 'bar'})
self.p.config.write_postgresql_conf()
@patch.object(Postgresql, 'is_running', Mock(return_value=False))
@patch.object(Postgresql, 'start', Mock())
def test_follow(self):
self.p.call_nowait('on_start')
m = RemoteMember('1', {'restore_command': '2', 'primary_slot_name': 'foo', 'conn_kwargs': {'host': 'bar'}})
self.p.follow(m)
@patch.object(Postgresql, 'is_running', Mock(return_value=True))
def test_sync_replication_slots(self):
self.p.start()
config = ClusterConfig(1, {'slots': {'test_3': {'database': 'a', 'plugin': 'b'},
'A': 0, 'ls': 0, 'b': {'type': 'logical', 'plugin': '1'}},
'ignore_slots': [{'name': 'blabla'}]}, 1)
cluster = Cluster(True, config, self.leader, 0, [self.me, self.other, self.leadermem], None, None, None)
with mock.patch('patroni.postgresql.Postgresql._query', Mock(side_effect=psycopg2.OperationalError)):
self.p.slots_handler.sync_replication_slots(cluster)
self.p.slots_handler.sync_replication_slots(cluster)
with mock.patch('patroni.postgresql.Postgresql.role', new_callable=PropertyMock(return_value='replica')):
self.p.slots_handler.sync_replication_slots(cluster)
with patch.object(SlotsHandler, 'drop_replication_slot', Mock(return_value=True)),\
patch('patroni.dcs.logger.error', new_callable=Mock()) as errorlog_mock:
alias1 = Member(0, 'test-3', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5436/postgres'})
alias2 = Member(0, 'test.3', 28, {'conn_url': 'postgres://replicator:rep-pass@127.0.0.1:5436/postgres'})
cluster.members.extend([alias1, alias2])
self.p.slots_handler.sync_replication_slots(cluster)
self.assertEqual(errorlog_mock.call_count, 5)
ca = errorlog_mock.call_args_list[0][0][1]
self.assertTrue("test-3" in ca, "non matching {0}".format(ca))
self.assertTrue("test.3" in ca, "non matching {0}".format(ca))
@patch.object(MockCursor, 'execute', Mock(side_effect=psycopg2.OperationalError))
def test__query(self):
self.assertRaises(PostgresConnectionException, self.p._query, 'blabla')
self.p._state = 'restarting'
self.assertRaises(RetryFailedError, self.p._query, 'blabla')
def test_query(self):
self.p.query('select 1')
self.assertRaises(PostgresConnectionException, self.p.query, 'RetryFailedError')
self.assertRaises(psycopg2.ProgrammingError, self.p.query, 'blabla')
@patch.object(Postgresql, 'pg_isready', Mock(return_value=STATE_REJECT))
def test_is_leader(self):
self.assertTrue(self.p.is_leader())
self.p.reset_cluster_info_state()
with patch.object(Postgresql, '_query', Mock(side_effect=RetryFailedError(''))):
self.assertRaises(PostgresConnectionException, self.p.is_leader)
@patch.object(Postgresql, 'controldata',
Mock(return_value={'Database cluster state': 'shut down', 'Latest checkpoint location': 'X/678'}))
def test_latest_checkpoint_location(self):
self.assertIsNone(self.p.latest_checkpoint_location())
def test_reload(self):
self.assertTrue(self.p.reload())
@patch.object(Postgresql, 'is_running')
def test_is_healthy(self, mock_is_running):
mock_is_running.return_value = True
self.assertTrue(self.p.is_healthy())
mock_is_running.return_value = False
self.assertFalse(self.p.is_healthy())
@patch('psutil.Popen')
def test_promote(self, mock_popen):
mock_popen.return_value.wait.return_value = 0
task = CriticalTask()
self.assertTrue(self.p.promote(0, task))
self.p.set_role('replica')
self.p.config._config['pre_promote'] = 'test'
with patch('patroni.postgresql.cancellable.CancellableSubprocess.is_cancelled', PropertyMock(return_value=1)):
self.assertFalse(self.p.promote(0, task))
mock_popen.side_effect = Exception
self.assertFalse(self.p.promote(0, task))
task.reset()
task.cancel()
self.assertFalse(self.p.promote(0, task))
def test_timeline_wal_position(self):
self.assertEqual(self.p.timeline_wal_position(), (1, 2, 1))
Thread(target=self.p.timeline_wal_position).start()
@patch.object(PostmasterProcess, 'from_pidfile')
def test_is_running(self, mock_frompidfile):
# Cached postmaster running
mock_postmaster = self.p._postmaster_proc = MockPostmaster()
self.assertEqual(self.p.is_running(), mock_postmaster)
# Cached postmaster not running, no postmaster running
mock_postmaster.is_running.return_value = False
mock_frompidfile.return_value = None
self.assertEqual(self.p.is_running(), None)
self.assertEqual(self.p._postmaster_proc, None)
# No cached postmaster, postmaster running
mock_frompidfile.return_value = mock_postmaster2 = MockPostmaster()
self.assertEqual(self.p.is_running(), mock_postmaster2)
self.assertEqual(self.p._postmaster_proc, mock_postmaster2)
@patch('shlex.split', Mock(side_effect=OSError))
def test_call_nowait(self):
self.p.set_role('replica')
self.assertIsNone(self.p.call_nowait('on_start'))
self.p.bootstrapping = True
self.assertIsNone(self.p.call_nowait('on_start'))
def test_non_existing_callback(self):
self.assertFalse(self.p.call_nowait('foobar'))
@patch.object(Postgresql, 'is_running', Mock(return_value=MockPostmaster()))
def test_is_leader_exception(self):
self.p.start()
self.p.query = Mock(side_effect=psycopg2.OperationalError("not supported"))
self.assertTrue(self.p.stop())
@patch('os.rename', Mock())
@patch('os.path.isdir', Mock(return_value=True))
@patch('os.unlink', Mock())
@patch('os.symlink', Mock())
@patch('patroni.postgresql.Postgresql.pg_wal_realpath', Mock(return_value={'pg_wal': '/mnt/pg_wal'}))
@patch('patroni.postgresql.Postgresql.pg_tblspc_realpaths', Mock(return_value={'42': '/mnt/tablespaces/archive'}))
def test_move_data_directory(self):
self.p.move_data_directory()
with patch('os.rename', Mock(side_effect=OSError)):
self.p.move_data_directory()
@patch('os.listdir', Mock(return_value=['recovery.conf']))
@patch('os.path.exists', Mock(return_value=True))
@patch.object(Postgresql, 'controldata', Mock())
def test_get_postgres_role_from_data_directory(self):
self.assertEqual(self.p.get_postgres_role_from_data_directory(), 'replica')
def test_remove_data_directory(self):
def _symlink(src, dst):
if os.name != 'nt': # os.symlink under Windows needs admin rights skip it
os.symlink(src, dst)
os.makedirs(os.path.join(self.p.data_dir, 'foo'))
_symlink('foo', os.path.join(self.p.data_dir, 'pg_wal'))
os.makedirs(os.path.join(self.p.data_dir, 'foo_tsp'))
pg_tblspc = os.path.join(self.p.data_dir, 'pg_tblspc')
os.makedirs(pg_tblspc)
_symlink('../foo_tsp', os.path.join(pg_tblspc, '12345'))
self.p.remove_data_directory()
open(self.p.data_dir, 'w').close()
self.p.remove_data_directory()
_symlink('unexisting', self.p.data_dir)
with patch('os.unlink', Mock(side_effect=OSError)):
self.p.remove_data_directory()
self.p.remove_data_directory()
@patch('patroni.postgresql.Postgresql._version_file_exists', Mock(return_value=True))
def test_controldata(self):
with patch('subprocess.check_output', Mock(return_value=0, side_effect=pg_controldata_string)):
data = self.p.controldata()
self.assertEqual(len(data), 50)
self.assertEqual(data['Database cluster state'], 'shut down in recovery')
self.assertEqual(data['wal_log_hints setting'], 'on')
self.assertEqual(int(data['Database block size']), 8192)
with patch('subprocess.check_output', Mock(side_effect=subprocess.CalledProcessError(1, ''))):
self.assertEqual(self.p.controldata(), {})
@patch('patroni.postgresql.Postgresql._version_file_exists', Mock(return_value=True))
@patch('subprocess.check_output', MagicMock(return_value=0, side_effect=pg_controldata_string))
def test_sysid(self):
self.assertEqual(self.p.sysid, "6200971513092291716")
@patch('os.path.isfile', Mock(return_value=True))
@patch('shutil.copy', Mock(side_effect=IOError))
def test_save_configuration_files(self):
self.p.config.save_configuration_files()
@patch('os.path.isfile', Mock(side_effect=[False, True]))
@patch('shutil.copy', Mock(side_effect=IOError))
def test_restore_configuration_files(self):
self.p.config.restore_configuration_files()
def test_can_create_replica_without_replication_connection(self):
self.p.config._config['create_replica_method'] = []
self.assertFalse(self.p.can_create_replica_without_replication_connection())
self.p.config._config['create_replica_method'] = ['wale', 'basebackup']
self.p.config._config['wale'] = {'command': 'foo', 'no_master': 1}
self.assertTrue(self.p.can_create_replica_without_replication_connection())
def test_replica_method_can_work_without_replication_connection(self):
self.assertFalse(self.p.replica_method_can_work_without_replication_connection('basebackup'))
self.assertFalse(self.p.replica_method_can_work_without_replication_connection('foobar'))
self.p.config._config['foo'] = {'command': 'bar', 'no_master': 1}
self.assertTrue(self.p.replica_method_can_work_without_replication_connection('foo'))
self.p.config._config['foo'] = {'command': 'bar'}
self.assertFalse(self.p.replica_method_can_work_without_replication_connection('foo'))
@patch('time.sleep', Mock())
@patch.object(Postgresql, 'is_running', Mock(return_value=True))
@patch.object(MockCursor, 'fetchone')
def test_reload_config(self, mock_fetchone):
mock_fetchone.return_value = (1,)
parameters = self._PARAMETERS.copy()
parameters.pop('f.oo')
parameters['wal_buffers'] = '512'
config = {'pg_hba': [''], 'pg_ident': [''], 'use_unix_socket': True, 'authentication': {},
'retry_timeout': 10, 'listen': '*', 'krbsrvname': 'postgres', 'parameters': parameters}
self.p.reload_config(config)
mock_fetchone.side_effect = Exception
parameters['b.ar'] = 'bar'
self.p.reload_config(config)
parameters['autovacuum'] = 'on'
self.p.reload_config(config)
parameters['autovacuum'] = 'off'
parameters.pop('search_path')
config['listen'] = '*:5433'
self.p.reload_config(config)
parameters['unix_socket_directories'] = '.'
self.p.reload_config(config)
self.p.config.resolve_connection_addresses()
@patch.object(Postgresql, '_version_file_exists', Mock(return_value=True))
def test_get_major_version(self):
with patch.object(builtins, 'open', mock_open(read_data='9.4')):
self.assertEqual(self.p.get_major_version(), 90400)
with patch.object(builtins, 'open', Mock(side_effect=Exception)):
self.assertEqual(self.p.get_major_version(), 0)
def test_postmaster_start_time(self):
with patch.object(MockCursor, "fetchone", Mock(return_value=('foo', True, '', '', '', '', False))):
self.assertEqual(self.p.postmaster_start_time(), 'foo')
t = Thread(target=self.p.postmaster_start_time)
t.start()
t.join()
with patch.object(MockCursor, "execute", side_effect=psycopg2.Error):
self.assertIsNone(self.p.postmaster_start_time())
def test_check_for_startup(self):
with patch('subprocess.call', return_value=0):
self.p._state = 'starting'
self.assertFalse(self.p.check_for_startup())
self.assertEqual(self.p.state, 'running')
with patch('subprocess.call', return_value=1):
self.p._state = 'starting'
self.assertTrue(self.p.check_for_startup())
self.assertEqual(self.p.state, 'starting')
with patch('subprocess.call', return_value=2):
self.p._state = 'starting'
self.assertFalse(self.p.check_for_startup())
self.assertEqual(self.p.state, 'start failed')
with patch('subprocess.call', return_value=0):
self.p._state = 'running'
self.assertFalse(self.p.check_for_startup())
self.assertEqual(self.p.state, 'running')
with patch('subprocess.call', return_value=127):
self.p._state = 'running'
self.assertFalse(self.p.check_for_startup())
self.assertEqual(self.p.state, 'running')
self.p._state = 'starting'
self.assertFalse(self.p.check_for_startup())
self.assertEqual(self.p.state, 'running')
def test_wait_for_startup(self):
state = {'sleeps': 0, 'num_rejects': 0, 'final_return': 0}
self.__thread_ident = current_thread().ident
def increment_sleeps(*args):
if current_thread().ident == self.__thread_ident:
print("Sleep")
state['sleeps'] += 1
def isready_return(*args):
ret = 1 if state['sleeps'] < state['num_rejects'] else state['final_return']
print("Isready {0} {1}".format(ret, state))
return ret
def time_in_state(*args):
return state['sleeps']
with patch('subprocess.call', side_effect=isready_return):
with patch('time.sleep', side_effect=increment_sleeps):
self.p.time_in_state = Mock(side_effect=time_in_state)
self.p._state = 'stopped'
self.assertTrue(self.p.wait_for_startup())
self.assertEqual(state['sleeps'], 0)
self.p._state = 'starting'
state['num_rejects'] = 5
self.assertTrue(self.p.wait_for_startup())
self.assertEqual(state['sleeps'], 5)
self.p._state = 'starting'
state['sleeps'] = 0
state['final_return'] = 2
self.assertFalse(self.p.wait_for_startup())
self.p._state = 'starting'
state['sleeps'] = 0
state['final_return'] = 0
self.assertFalse(self.p.wait_for_startup(timeout=2))
self.assertEqual(state['sleeps'], 3)
with patch.object(Postgresql, 'check_startup_state_changed', Mock(return_value=False)):
self.p.cancellable.cancel()
self.p._state = 'starting'
self.assertIsNone(self.p.wait_for_startup())
def test_pick_sync_standby(self):
cluster = Cluster(True, None, self.leader, 0, [self.me, self.other, self.leadermem], None,
SyncState(0, self.me.name, self.leadermem.name), None)
mock_cursor = Mock()
mock_cursor.fetchone.return_value = ('remote_apply',)
with patch.object(Postgresql, "query", side_effect=[
mock_cursor,
[(self.leadermem.name, 'streaming', 'sync'),
(self.me.name, 'streaming', 'async'),
(self.other.name, 'streaming', 'async')]
]):
self.assertEqual(self.p.pick_synchronous_standby(cluster), ([self.leadermem.name], [self.leadermem.name]))
with patch.object(Postgresql, "query", side_effect=[
mock_cursor,
[(self.leadermem.name, 'streaming', 'potential'),
(self.me.name, 'streaming', 'async'),
(self.other.name, 'streaming', 'async')]
]):
self.assertEqual(self.p.pick_synchronous_standby(cluster), ([self.leadermem.name], []))
with patch.object(Postgresql, "query", side_effect=[
mock_cursor,
[(self.me.name, 'streaming', 'async'),
(self.other.name, 'streaming', 'async')]
]):
self.assertEqual(self.p.pick_synchronous_standby(cluster), ([self.me.name], []))
with patch.object(Postgresql, "query", side_effect=[
mock_cursor,
[('missing', 'streaming', 'sync'),
(self.me.name, 'streaming', 'async'),
(self.other.name, 'streaming', 'async')]
]):
self.assertEqual(self.p.pick_synchronous_standby(cluster), ([self.me.name], []))
with patch.object(Postgresql, "query", side_effect=[mock_cursor, []]):
self.p._major_version = 90400
self.assertEqual(self.p.pick_synchronous_standby(cluster), ([], []))
def test_set_sync_standby(self):
def value_in_conf():
with open(os.path.join(self.p.data_dir, 'postgresql.conf')) as f:
for line in f:
if line.startswith('synchronous_standby_names'):
return line.strip()
mock_reload = self.p.reload = Mock()
self.p.config.set_synchronous_standby(['n1'])
self.assertEqual(value_in_conf(), "synchronous_standby_names = 'n1'")
mock_reload.assert_called()
mock_reload.reset_mock()
self.p.config.set_synchronous_standby(['n1'])
mock_reload.assert_not_called()
self.assertEqual(value_in_conf(), "synchronous_standby_names = 'n1'")
self.p.config.set_synchronous_standby(['n1', 'n2'])
mock_reload.assert_called()
self.assertEqual(value_in_conf(), "synchronous_standby_names = '2 (n1,n2)'")
mock_reload.reset_mock()
self.p.config.set_synchronous_standby([])
mock_reload.assert_called()
self.assertEqual(value_in_conf(), None)
def test_get_server_parameters(self):
config = {'synchronous_mode': True, 'parameters': {'wal_level': 'hot_standby'}, 'listen': '0'}
self.p.config.get_server_parameters(config)
config['synchronous_mode_strict'] = True
self.p.config.get_server_parameters(config)
self.p.config.set_synchronous_standby('foo')
self.assertTrue(str(self.p.config.get_server_parameters(config)).startswith('{'))
@patch('time.sleep', Mock())
def test__wait_for_connection_close(self):
mock_postmaster = MockPostmaster()
with patch.object(Postgresql, 'is_running', Mock(return_value=mock_postmaster)):
mock_postmaster.is_running.side_effect = [True, False, False]
mock_callback = Mock()
self.p.stop(on_safepoint=mock_callback)
mock_postmaster.is_running.side_effect = [True, False, False]
with patch.object(MockCursor, "execute", Mock(side_effect=psycopg2.Error)):
self.p.stop(on_safepoint=mock_callback)
def test_terminate_starting_postmaster(self):
mock_postmaster = MockPostmaster()
self.p.terminate_starting_postmaster(mock_postmaster)
mock_postmaster.signal_stop.assert_called()
mock_postmaster.wait.assert_called()
def test_replica_cached_timeline(self):
self.assertEqual(self.p.replica_cached_timeline(2), 3)
def test_get_master_timeline(self):
self.assertEqual(self.p.get_master_timeline(), 1)
@patch.object(Postgresql, 'get_postgres_role_from_data_directory', Mock(return_value='replica'))
def test__build_effective_configuration(self):
with patch.object(Postgresql, 'controldata',
Mock(return_value={'max_connections setting': '200',
'max_worker_processes setting': '20',
'max_locks_per_xact setting': '100',
'max_wal_senders setting': 10})):
self.p.cancellable.cancel()
self.assertFalse(self.p.start())
self.assertTrue(self.p.pending_restart)
@patch('os.path.exists', Mock(return_value=True))
@patch('os.path.isfile', Mock(return_value=False))
def test_pgpass_is_dir(self):
self.assertRaises(PatroniException, self.setUp)
@patch.object(Postgresql, '_query', Mock(side_effect=RetryFailedError('')))
def test_received_timeline(self):
self.p.set_role('standby_leader')
self.p.reset_cluster_info_state()
self.assertRaises(PostgresConnectionException, self.p.received_timeline)
def test__write_recovery_params(self):
self.p.config._write_recovery_params(Mock(), {'pause_at_recovery_target': 'false'})
with patch.object(Postgresql, 'major_version', PropertyMock(return_value=90400)):
self.p.config._write_recovery_params(Mock(), {'recovery_target_action': 'PROMOTE'})
|
engine.py
|
"""
所有的gateway都放在self.gateways字典里面,对应vnpy UI界面的连接菜单的内容。
Subscribe逻辑:
1.add_gateway生成一个self.gateways字典
2.调用get_gateway函数取出CtpGateway实例
3.调用CtpGateway实例的subscribe函数
4.底层API通过sambol,exchange的形式subscribe
"""
import logging
import smtplib
import os
from abc import ABC
from datetime import datetime
from email.message import EmailMessage
from queue import Empty, Queue
from threading import Thread
from typing import Any, Sequence, Type
from vnpy.event import Event, EventEngine
from .app import BaseApp
from .event import (
EVENT_TICK,
EVENT_ORDER,
EVENT_TRADE,
EVENT_POSITION,
EVENT_ACCOUNT,
EVENT_CONTRACT,
EVENT_LOG
)
from .gateway import BaseGateway
from .object import (
CancelRequest,
LogData,
OrderRequest,
SubscribeRequest,
HistoryRequest
)
from .setting import SETTINGS
from .utility import get_folder_path, TRADER_DIR
class MainEngine:
"""
主引擎,负责对API的调度
"""
def __init__(self, event_engine: EventEngine = None):
""""""
if event_engine:
self.event_engine = event_engine
else:
self.event_engine = EventEngine() # 如果时间引擎不存在则创建一个
self.event_engine.start() # 事件驱动引擎启动,默认阻塞事件为1秒
self.gateways = {} # 一个gateways字典
self.engines = {} # 一个engines字典
self.apps = {} # 一个app字典
self.exchanges = [] # 交易所列表
os.chdir(TRADER_DIR) # Change working directory,更改工作目录,加入c\用户名\.vntrader 目录
self.init_engines() # Initialize function engines,初始化功能引擎
def add_engine(self, engine_class: Any):
"""
Add function engine.
添加功能引擎
"""
engine = engine_class(self, self.event_engine)
self.engines[engine.engine_name] = engine
return engine
def add_gateway(self, gateway_class: Type[BaseGateway]):
"""
Add gateway.
添加交易所方法,底层机构
这个函数的作用是传入CTPGateway,将传入的CTPGateway实例化(将event_engine作为参数,将存入self.gateways这个字典中,最后返回CTPGateway实例)
"""
# 这里得到一个gateway_class(是CTPGateway之类,不是BaseGateway)的实例,实例的参数是init MainEngine的时候传入的event_engine
# 调用上面的实例的gateway_name属性,并作为字典的键
gateway = gateway_class(self.event_engine)
# 这里得到了gateways字典,在下面的get_gateway函数要用,取出gateway。
self.gateways[gateway.gateway_name] = gateway
# Add gateway supported exchanges into engine
# 取出gateway的exchanges类属性(列表,非实例属性),
for exchange in gateway.exchanges:
# 如果类的exchanges,不在当前实例的exchanges里面,则添加进来。
if exchange not in self.exchanges:
self.exchanges.append(exchange)
return gateway # 返回CTPGateway
def add_app(self, app_class: Type[BaseApp]):
"""
Add app.
添加上层应用
"""
app = app_class()
self.apps[app.app_name] = app
engine = self.add_engine(app.engine_class)
return engine
def init_engines(self):
"""
Init all engines.
"""
self.add_engine(LogEngine)
self.add_engine(OmsEngine)
self.add_engine(EmailEngine)
def write_log(self, msg: str, source: str = ""):
"""
Put log event with specific message.
推送特定消息的日志事件。
"""
# LogData继承自BaseData,BaseData有gateway_name,所以这里可以传gateway_name,得到LogData对象。
log = LogData(msg=msg, gateway_name=source)
event = Event(EVENT_LOG, log)
self.event_engine.put(event)
def get_gateway(self, gateway_name: str):
"""
Return gateway object by name.
作用是传入CtpGateway,从字典中取出CtpGateway实例,再返回这个实例
"""
gateway = self.gateways.get(gateway_name, None)
if not gateway:
self.write_log(f"找不到底层接口:{gateway_name}")
return gateway
def get_engine(self, engine_name: str):
"""
Return engine object by name.
"""
engine = self.engines.get(engine_name, None)
if not engine:
self.write_log(f"找不到引擎:{engine_name}")
return engine
def get_default_setting(self, gateway_name: str):
"""
Get default setting dict of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.get_default_setting()
return None
def get_all_gateway_names(self):
"""
Get all names of gatewasy added in main engine.
"""
return list(self.gateways.keys())
def get_all_apps(self):
"""
Get all app objects.
"""
return list(self.apps.values())
def get_all_exchanges(self):
"""
Get all exchanges.
"""
return self.exchanges
def connect(self, setting: dict, gateway_name: str):
"""
Start connection of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.connect(setting)
def subscribe(self, req: SubscribeRequest, gateway_name: str):
"""
Subscribe tick data update of a specific gateway.
作用:根据传入的CtpGateway,调用get_gateway函数取出CtpGateway实例,然后订阅行情。
"""
# 得到CTPGateway实例
gateway = self.get_gateway(gateway_name)
# 调用CTPGateway实例的subscribe方法,而self.md_api.subscribe(req)的方法就是self.md_api.subscribe(req),即底层API,而传入的参数是SubscribeRequest(一个类),应该是{self.symbol}.{self.exchange.value}这样的形式。
if gateway:
gateway.subscribe(req)
def send_order(self, req: OrderRequest, gateway_name: str):
"""
Send new order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_order(req)
else:
return ""
def cancel_order(self, req: CancelRequest, gateway_name: str):
"""
Send cancel order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.cancel_order(req)
def send_orders(self, reqs: Sequence[OrderRequest], gateway_name: str):
"""
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_orders(reqs)
else:
return ["" for req in reqs]
def cancel_orders(self, reqs: Sequence[CancelRequest], gateway_name: str):
"""
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.cancel_orders(reqs)
def query_history(self, req: HistoryRequest, gateway_name: str):
"""
Send cancel order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.query_history(req)
else:
return None
def close(self):
"""
Make sure every gateway and app is closed properly before
programme exit.
"""
# Stop event engine first to prevent new timer event.
self.event_engine.stop()
for engine in self.engines.values():
engine.close()
for gateway in self.gateways.values():
gateway.close()
class BaseEngine(ABC):
"""
Abstract class for implementing an function engine.
"""
def __init__(
self,
main_engine: MainEngine,
event_engine: EventEngine,
engine_name: str,
):
""""""
self.main_engine = main_engine
self.event_engine = event_engine
self.engine_name = engine_name
def close(self):
""""""
pass
class LogEngine(BaseEngine):
"""
Processes log event and output with logging module.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(LogEngine, self).__init__(main_engine, event_engine, "log")
if not SETTINGS["log.active"]:
return
self.level = SETTINGS["log.level"]
self.logger = logging.getLogger("VN Trader")
self.logger.setLevel(self.level)
self.formatter = logging.Formatter(
"%(asctime)s %(levelname)s: %(message)s"
)
self.add_null_handler()
if SETTINGS["log.console"]:
self.add_console_handler()
if SETTINGS["log.file"]:
self.add_file_handler()
self.register_event()
def add_null_handler(self):
"""
Add null handler for logger.
"""
null_handler = logging.NullHandler()
self.logger.addHandler(null_handler)
def add_console_handler(self):
"""
Add console output of log.
"""
console_handler = logging.StreamHandler()
console_handler.setLevel(self.level)
console_handler.setFormatter(self.formatter)
self.logger.addHandler(console_handler)
def add_file_handler(self):
"""
Add file output of log.
"""
today_date = datetime.now().strftime("%Y%m%d")
filename = f"vt_{today_date}.log"
log_path = get_folder_path("log")
file_path = log_path.joinpath(filename)
file_handler = logging.FileHandler(
file_path, mode="a", encoding="utf8"
)
file_handler.setLevel(self.level)
file_handler.setFormatter(self.formatter)
self.logger.addHandler(file_handler)
def register_event(self):
""""""
self.event_engine.register(EVENT_LOG, self.process_log_event)
def process_log_event(self, event: Event):
"""
Process log event.
"""
log = event.data
self.logger.log(log.level, log.msg)
class OmsEngine(BaseEngine):
"""
Provides order management system function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(OmsEngine, self).__init__(main_engine, event_engine, "oms")
self.ticks = {}
self.orders = {}
self.trades = {}
self.positions = {}
self.accounts = {}
self.contracts = {}
self.active_orders = {}
self.add_function()
self.register_event()
def add_function(self):
"""Add query function to main engine."""
self.main_engine.get_tick = self.get_tick
self.main_engine.get_order = self.get_order
self.main_engine.get_trade = self.get_trade
self.main_engine.get_position = self.get_position
self.main_engine.get_account = self.get_account
self.main_engine.get_contract = self.get_contract
self.main_engine.get_all_ticks = self.get_all_ticks
self.main_engine.get_all_orders = self.get_all_orders
self.main_engine.get_all_trades = self.get_all_trades
self.main_engine.get_all_positions = self.get_all_positions
self.main_engine.get_all_accounts = self.get_all_accounts
self.main_engine.get_all_contracts = self.get_all_contracts
self.main_engine.get_all_active_orders = self.get_all_active_orders
def register_event(self):
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
self.event_engine.register(EVENT_ACCOUNT, self.process_account_event)
self.event_engine.register(EVENT_CONTRACT, self.process_contract_event)
def process_tick_event(self, event: Event):
""""""
tick = event.data
self.ticks[tick.vt_symbol] = tick
def process_order_event(self, event: Event):
""""""
order = event.data
self.orders[order.vt_orderid] = order
# If order is active, then update data in dict.
if order.is_active():
self.active_orders[order.vt_orderid] = order
# Otherwise, pop inactive order from in dict
elif order.vt_orderid in self.active_orders:
self.active_orders.pop(order.vt_orderid)
def process_trade_event(self, event: Event):
""""""
trade = event.data
self.trades[trade.vt_tradeid] = trade
def process_position_event(self, event: Event):
""""""
position = event.data
self.positions[position.vt_positionid] = position
def process_account_event(self, event: Event):
""""""
account = event.data
self.accounts[account.vt_accountid] = account
def process_contract_event(self, event: Event):
""""""
contract = event.data
self.contracts[contract.vt_symbol] = contract
def get_tick(self, vt_symbol):
"""
Get latest market tick data by vt_symbol.
"""
return self.ticks.get(vt_symbol, None)
def get_order(self, vt_orderid):
"""
Get latest order data by vt_orderid.
"""
return self.orders.get(vt_orderid, None)
def get_trade(self, vt_tradeid):
"""
Get trade data by vt_tradeid.
"""
return self.trades.get(vt_tradeid, None)
def get_position(self, vt_positionid):
"""
Get latest position data by vt_positionid.
"""
return self.positions.get(vt_positionid, None)
def get_account(self, vt_accountid):
"""
Get latest account data by vt_accountid.
"""
return self.accounts.get(vt_accountid, None)
def get_contract(self, vt_symbol):
"""
Get contract data by vt_symbol.
"""
return self.contracts.get(vt_symbol, None)
def get_all_ticks(self):
"""
Get all tick data.
"""
return list(self.ticks.values())
def get_all_orders(self):
"""
Get all order data.
"""
return list(self.orders.values())
def get_all_trades(self):
"""
Get all trade data.
"""
return list(self.trades.values())
def get_all_positions(self):
"""
Get all position data.
"""
return list(self.positions.values())
def get_all_accounts(self):
"""
Get all account data.
"""
return list(self.accounts.values())
def get_all_contracts(self):
"""
Get all contract data.
"""
return list(self.contracts.values())
def get_all_active_orders(self, vt_symbol: str = ""):
"""
Get all active orders by vt_symbol.
If vt_symbol is empty, return all active orders.
"""
if not vt_symbol:
return list(self.active_orders.values())
else:
active_orders = [
order
for order in self.active_orders.values()
if order.vt_symbol == vt_symbol
]
return active_orders
class EmailEngine(BaseEngine):
"""
Provides email sending function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(EmailEngine, self).__init__(main_engine, event_engine, "email")
self.thread = Thread(target=self.run)
self.queue = Queue()
self.active = False
self.main_engine.send_email = self.send_email
def send_email(self, subject: str, content: str, receiver: str = ""):
""""""
# Start email engine when sending first email.
if not self.active:
self.start()
# Use default receiver if not specified.
if not receiver:
receiver = SETTINGS["email.receiver"]
msg = EmailMessage()
msg["From"] = SETTINGS["email.sender"]
msg["To"] = SETTINGS["email.receiver"]
msg["Subject"] = subject
msg.set_content(content)
self.queue.put(msg)
def run(self):
""""""
while self.active:
try:
msg = self.queue.get(block=True, timeout=1)
with smtplib.SMTP_SSL(
SETTINGS["email.server"], SETTINGS["email.port"]
) as smtp:
smtp.login(
SETTINGS["email.username"], SETTINGS["email.password"]
)
smtp.send_message(msg)
except Empty:
pass
def start(self):
""""""
self.active = True
self.thread.start()
def close(self):
""""""
if not self.active:
return
self.active = False
self.thread.join()
|
test_fetcher.py
|
import tempfile
import os.path as op
import sys
import os
import numpy.testing as npt
from nibabel.tmpdirs import TemporaryDirectory
import dipy.data.fetcher as fetcher
from dipy.data import SPHERE_FILES
from threading import Thread
if sys.version_info[0] < 3:
from SimpleHTTPServer import SimpleHTTPRequestHandler # Python 2
from SocketServer import TCPServer as HTTPServer
from urllib import pathname2url
else:
from http.server import HTTPServer, SimpleHTTPRequestHandler # Python 3
from urllib.request import pathname2url
def test_check_md5():
fd, fname = tempfile.mkstemp()
stored_md5 = fetcher._get_file_md5(fname)
# If all is well, this shouldn't return anything:
npt.assert_equal(fetcher.check_md5(fname, stored_md5), None)
# If None is provided as input, it should silently not check either:
npt.assert_equal(fetcher.check_md5(fname, None), None)
# Otherwise, it will raise its exception class:
npt.assert_raises(fetcher.FetcherError, fetcher.check_md5, fname, 'foo')
def test_make_fetcher():
symmetric362 = SPHERE_FILES['symmetric362']
with TemporaryDirectory() as tmpdir:
stored_md5 = fetcher._get_file_md5(symmetric362)
# create local HTTP Server
testfile_folder = op.split(symmetric362)[0] + os.sep
testfile_url = 'file:' + pathname2url(testfile_folder)
test_server_url = "http://127.0.0.1:8000/"
print(testfile_url)
print(symmetric362)
current_dir = os.getcwd()
# change pwd to directory containing testfile.
os.chdir(testfile_folder)
server = HTTPServer(('localhost', 8000), SimpleHTTPRequestHandler)
server_thread = Thread(target=server.serve_forever)
server_thread.deamon = True
server_thread.start()
# test make_fetcher
sphere_fetcher = fetcher._make_fetcher("sphere_fetcher",
tmpdir, testfile_url,
[op.sep +
op.split(symmetric362)[-1]],
["sphere_name"],
md5_list=[stored_md5])
try:
sphere_fetcher()
except Exception as e:
print(e)
# stop local HTTP Server
server.shutdown()
assert op.isfile(op.join(tmpdir, "sphere_name"))
npt.assert_equal(fetcher._get_file_md5(op.join(tmpdir, "sphere_name")),
stored_md5)
# stop local HTTP Server
server.shutdown()
# change to original working directory
os.chdir(current_dir)
def test_fetch_data():
symmetric362 = SPHERE_FILES['symmetric362']
with TemporaryDirectory() as tmpdir:
md5 = fetcher._get_file_md5(symmetric362)
bad_md5 = '8' * len(md5)
newfile = op.join(tmpdir, "testfile.txt")
# Test that the fetcher can get a file
testfile_url = symmetric362
print(testfile_url)
testfile_dir, testfile_name = op.split(testfile_url)
# create local HTTP Server
test_server_url = "http://127.0.0.1:8001/" + testfile_name
current_dir = os.getcwd()
# change pwd to directory containing testfile.
os.chdir(testfile_dir + os.sep)
# use different port as shutdown() takes time to release socket.
server = HTTPServer(('localhost', 8001), SimpleHTTPRequestHandler)
server_thread = Thread(target=server.serve_forever)
server_thread.deamon = True
server_thread.start()
files = {"testfile.txt": (test_server_url, md5)}
try:
fetcher.fetch_data(files, tmpdir)
except Exception as e:
print(e)
# stop local HTTP Server
server.shutdown()
npt.assert_(op.exists(newfile))
# Test that the file is replaced when the md5 doesn't match
with open(newfile, 'a') as f:
f.write("some junk")
try:
fetcher.fetch_data(files, tmpdir)
except Exception as e:
print(e)
# stop local HTTP Server
server.shutdown()
npt.assert_(op.exists(newfile))
npt.assert_equal(fetcher._get_file_md5(newfile), md5)
# Test that an error is raised when the md5 checksum of the download
# file does not match the expected value
files = {"testfile.txt": (test_server_url, bad_md5)}
npt.assert_raises(fetcher.FetcherError,
fetcher.fetch_data, files, tmpdir)
# stop local HTTP Server
server.shutdown()
# change to original working directory
os.chdir(current_dir)
def test_dipy_home():
test_path = 'TEST_PATH'
if 'DIPY_HOME' in os.environ:
old_home = os.environ['DIPY_HOME']
del os.environ['DIPY_HOME']
else:
old_home = None
reload(fetcher)
npt.assert_string_equal(fetcher.dipy_home,
op.join(os.path.expanduser('~'), '.dipy'))
os.environ['DIPY_HOME'] = test_path
reload(fetcher)
npt.assert_string_equal(fetcher.dipy_home, test_path)
# return to previous state
if old_home:
os.environ['DIPY_HOME'] = old_home
|
connections.py
|
import socket
import queue
import threading
import logging
import binascii
from abc import ABC, abstractmethod
from udsoncan.Request import Request
from udsoncan.Response import Response
from udsoncan.exceptions import TimeoutException
class BaseConnection(ABC):
def __init__(self, name=None):
if name is None:
self.name = 'Connection'
else:
self.name = 'Connection[%s]' % (name)
self.logger = logging.getLogger(self.name)
def send(self, data):
"""Sends data to the underlying transport protocol
:param data: The data or object to send. If a Request or Response is given, the value returned by get_payload() will be sent.
:type data: bytes, Request, Response
:returns: None
"""
if isinstance(data, Request) or isinstance(data, Response):
payload = data.get_payload()
else :
payload = data
self.logger.debug('Sending %d bytes : [%s]' % (len(payload), binascii.hexlify(payload) ))
self.specific_send(payload)
def wait_frame(self, timeout=2, exception=False):
"""Waits for the reception of a frame of data from the underlying transport protocol
:param timeout: The maximum amount of time to wait before giving up in seconds
:type timeout: int
:param exception: Boolean value indicating if this function may return exceptions.
When ``True``, all exceptions may be raised, including ``TimeoutException``
When ``False``, all exceptions will be logged as ``DEBUG`` and ``None`` will be returned.
:type exception: bool
:returns: Received data
:rtype: bytes or None
"""
try:
frame = self.specific_wait_frame(timeout=timeout)
except Exception as e:
self.logger.debug('No data received: [%s] - %s ' % (e.__class__.__name__, str(e)))
if exception == True:
raise
else:
frame = None
if frame is not None:
self.logger.debug('Received %d bytes : [%s]' % (len(frame), binascii.hexlify(frame) ))
return frame
def __enter__(self):
return self
@abstractmethod
def specific_send(self, payload):
"""The implementation of the send method.
:param payload: Data to send
:type payload: bytes
:returns: None
"""
pass
@abstractmethod
def specific_wait_frame(self, timeout=2):
"""The implementation of the ``wait_frame`` method.
:param timeout: The maximum amount of time to wait before giving up
:type timeout: int
:returns: Received data
:rtype: bytes or None
"""
pass
@abstractmethod
def open(self):
""" Set up the connection object.
:returns: None
"""
pass
@abstractmethod
def close(self):
""" Close the connection object
:returns: None
"""
pass
@abstractmethod
def empty_rxqueue(self):
""" Empty all unread data in the reception buffer.
:returns: None
"""
pass
def __exit__(self, type, value, traceback):
pass
class DoIPConnection(BaseConnection):
def __init__(self):
pass
def open(self):
return self
def close(self):
pass
def empty_rxqueue(self):
pass
def specific_send(self, payload):
pass
def specific_wait_frame(self, timeout=2):
pass
class SocketConnection(BaseConnection):
"""
Sends and receives data through a socket.
:param sock: The socket to use. This socket must be bound and ready to use. Only ``send()`` and ``recv()`` will be called by this Connection
:type sock: socket.socket
:param bufsize: Maximum buffer size of the socket, this value is passed to ``recv()``
:type bufsize: int
:param name: This name is included in the logger name so that its output can be redirected. The logger name will be ``Connection[<name>]``
:type name: string
"""
def __init__(self, sock, bufsize=4095, name=None):
BaseConnection.__init__(self, name)
self.rxqueue = queue.Queue()
self.exit_requested = False
self.opened = False
self.rxthread = threading.Thread(target=self.rxthread_task)
self.sock = sock
self.sock.settimeout(0.1) # for recv
self.bufsize=bufsize
def open(self):
self.exit_requested = False
self.rxthread.start()
self.opened = True
self.logger.info('Connection opened')
return self
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def is_open(self):
return self.opened
def rxthread_task(self):
while not self.exit_requested:
try:
data = self.sock.recv(self.bufsize)
if data is not None:
self.rxqueue.put(data)
except socket.timeout:
pass
except Exception:
self.exit_requested = True
def close(self):
self.exit_requested = True
self.opened = False
self.logger.info('Connection closed')
def specific_send(self, payload):
self.sock.send(payload)
def specific_wait_frame(self, timeout=2):
if not self.opened:
raise RuntimeError("Connection is not open")
timedout = False
frame = None
try:
frame = self.rxqueue.get(block=True, timeout=timeout)
except queue.Empty:
timedout = True
if timedout:
raise TimeoutException("Did not received frame in time (timeout=%s sec)" % timeout)
return frame
def empty_rxqueue(self):
while not self.rxqueue.empty():
self.rxqueue.get()
class IsoTPConnection(BaseConnection):
"""
Sends and receives data through an ISO-TP socket. Makes cleaner code than SocketConnection but offers no additional functionality.
The `isotp module <https://github.com/pylessard/python-can-isotp>`_ must be installed in order to use this connection
:param interface: The can interface to use (example: `can0`)
:type interface: string
:param rxid: The reception CAN id
:type rxid: int
:param txid: The transmission CAN id
:type txid: int
:param name: This name is included in the logger name so that its output can be redirected. The logger name will be ``Connection[<name>]``
:type name: string
:param tpsock: An optional ISO-TP socket to use instead of creating one.
:type tpsock: isotp.socket
"""
def __init__(self, interface, rxid, txid, name=None, tpsock=None):
import isotp
BaseConnection.__init__(self, name)
self.interface=interface
self.rxid=rxid
self.txid=txid
self.rxqueue = queue.Queue()
self.exit_requested = False
self.opened = False
self.rxthread = threading.Thread(target=self.rxthread_task)
self.tpsock = isotp.socket(timeout=0.1) if tpsock is None else tpsock
def open(self):
self.tpsock.bind(self.interface, rxid=self.rxid, txid=self.txid)
self.exit_requested = False
self.rxthread.start()
self.opened = True
self.logger.info('Connection opened')
return self
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def is_open(self):
return self.tpsock.bound
def rxthread_task(self):
while not self.exit_requested:
try:
data = self.tpsock.recv()
if data is not None:
self.rxqueue.put(data)
except socket.timeout:
pass
except Exception:
self.exit_requested = True
def close(self):
self.exit_requested = True
self.tpsock.close()
self.opened = False
self.logger.info('Connection closed')
def specific_send(self, payload):
self.tpsock.send(payload)
def specific_wait_frame(self, timeout=2):
if not self.opened:
raise RuntimeError("Connection is not open")
timedout = False
frame = None
try:
frame = self.rxqueue.get(block=True, timeout=timeout)
except queue.Empty:
timedout = True
if timedout:
raise TimeoutException("Did not received ISOTP frame in time (timeout=%s sec)" % timeout)
return frame
def empty_rxqueue(self):
while not self.rxqueue.empty():
self.rxqueue.get()
class QueueConnection(BaseConnection):
"""
Sends and receives data using 2 Python native queues.
- ``MyConnection.fromuserqueue`` : Data read from this queue when ``wait_frame`` is called
- ``MyConnection.touserqueue`` : Data written to this queue when ``send`` is called
:param mtu: Optional maximum frame size. Messages will be truncated to this size
:type mtu: int
:param name: This name is included in the logger name so that its output can be redirected. The logger name will be ``Connection[<name>]``
:type name: string
"""
def __init__(self, name=None, mtu=4095):
BaseConnection.__init__(self, name)
self.fromuserqueue = queue.Queue() # Client reads from this queue. Other end is simulated
self.touserqueue = queue.Queue() # Client writes to this queue. Other end is simulated
self.opened = False
self.mtu = mtu
def open(self):
self.opened = True
self.logger.info('Connection opened')
return self
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def is_open(self):
return self.opened
def close(self):
self.empty_rxqueue()
self.empty_txqueue()
self.opened = False
self.logger.info('Connection closed')
def specific_send(self, payload):
if self.mtu is not None:
if len(payload) > self.mtu:
self.logger.warning("Truncating payload to be set to a length of %d" % (self.mtu))
payload = payload[0:self.mtu]
self.touserqueue.put(payload)
def specific_wait_frame(self, timeout=2):
if not self.opened:
raise RuntimeException("Connection is not open")
timedout = False
frame = None
try:
frame = self.fromuserqueue.get(block=True, timeout=timeout)
except queue.Empty:
timedout = True
if timedout:
raise TimeoutException("Did not receive frame from user queue in time (timeout=%s sec)" % timeout)
if self.mtu is not None:
if frame is not None and len(frame) > self.mtu:
self.logger.warning("Truncating received payload to a length of %d" % (self.mtu))
frame = frame[0:self.mtu]
return frame
def empty_rxqueue(self):
while not self.fromuserqueue.empty():
self.fromuserqueue.get()
def empty_txqueue(self):
while not self.touserqueue.empty():
self.touserqueue.get()
|
ydl.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from flask_restful import Resource, fields, marshal, reqparse, inputs
from auth import basic_auth
import os
import shutil
import tempfile
import youtube_dl
from pathlib import Path
from os import listdir, stat
from os.path import isfile, join, relpath, dirname
from queue import Queue
from threading import Thread
# input
parser = reqparse.RequestParser()
parser.add_argument('url', type=str, required=True, help='url to be downloaded')
parser.add_argument('audio', type=inputs.boolean, default=False, help='audio only')
parser.add_argument(
'acodec', choices=('aac', 'flac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav'),
default='mp3', help='preferred audio codec when extracting'
)
# output
fields_on_success = {
'success': fields.Boolean,
'queue': fields.List(fields.String),
'completed': fields.List(fields.String),
'incomplete': fields.List(fields.String),
}
fields_on_failed = {
'success': fields.Boolean,
'message': fields.String,
}
class YoutubeDLAPI(Resource):
decorators = [basic_auth.login_required]
def get(self):
try:
all_files = [str(f) for f in Path(dl_path).glob('**/*') if isfile(f)]
completed = [f for f in all_files if 'incomplete_' not in f]
incomplete = [f for f in all_files if 'incomplete_' in f]
mtime = lambda f: stat(f).st_mtime
return marshal({
'success': True,
'queue': [q['url'] for q in list(dl_q.queue)],
'completed': [relpath(f, dl_path) for f in sorted(completed, key=mtime, reverse=True)],
'incomplete': [relpath(f, dl_path) for f in sorted(incomplete, key=mtime, reverse=True)],
}, fields_on_success), 200
except Exception as e:
return marshal({
'success': False,
'message': str(e),
}, fields_on_failed), 200
def post(self):
args = parser.parse_args()
dl_q.put(args)
return self.get()
def dl_worker():
while not done:
args = dl_q.get()
download(args)
dl_q.task_done()
def parse_request_args(args):
# format
if args.get('audio', False):
ydl_format = 'bestaudio/best'
else:
ydl_format = os.getenv('YTBDL_F', 'bestvideo[ext=mp4][height<=1080]+bestaudio[ext=m4a]/best[ext=mp4]/best')
# postprocessor
postprocessor = []
if args.get('audio', False):
postprocessor.append({
'key': 'FFmpegExtractAudio',
'preferredcodec': args['acodec'],
})
# output template
outtmpl = os.getenv('YTBDL_O', '%(uploader)s/%(title)s.%(ext)s')
return args['url'], {
'format': ydl_format,
'postprocessors': postprocessor,
'outtmpl': outtmpl,
'ignoreerrors': os.getenv('YTBDL_I','false').lower() == 'true',
}
def download(args):
tmpdir = tempfile.mkdtemp(prefix='incomplete_', dir=dl_path)
url, ydl_options = parse_request_args(args)
ydl_options.update({'outtmpl': join(tmpdir, ydl_options['outtmpl'])})
with youtube_dl.YoutubeDL(ydl_options) as ytb_dl:
try:
ytb_dl.download([url])
for f in Path(tmpdir).glob('**/*'):
subdir = join(dl_path, dirname(relpath(f, tmpdir)))
os.makedirs(subdir, exist_ok=True)
shutil.move(f, join(dl_path, relpath(f, tmpdir)))
except Exception as e:
print(e)
print('Consider setting "YTBDL_I=true" to ignore youtube-dl errors')
print('Cleaning incomplete directory {}'.format(tmpdir))
try:
os.rmdir(tmpdir)
except Exception as e:
print('Failed to delete incomplete directory: {}'.format(e))
dl_path = '/youtube-dl'
dl_q = Queue()
done = False
dl_thread = Thread(target=dl_worker)
dl_thread.start()
|
user.py
|
from util.util import *
from flask import Blueprint
user_bp = Blueprint('user_bp', __name__)
@user_bp.route('/home')
def home():
return getUUID()
@user_bp.route('/register', methods=["POST"])
def user_register():
try:
client_id = request.json["client_id"]
key = request.json["api_key"]
name = request.json["name"]
email = request.json["email"]
password = request.json["password"]
res = org_api_verifiy(client_id, key)
if res:
cur().execute(
"select * from users where org_id=%s and email=%s", (client_id, email))
res = cur().fetchall()
if len(res) != 0:
return jsonify(error="Email Already Registered"), 400
u_id = getUUID()
passd = bcrypt.generate_password_hash(password).decode('utf-8')
cur().execute("insert into users values(%s,%s,%s,%s,%s,%s)",
(client_id, u_id, name, email, passd, "false"))
conn().commit()
return jsonify(message="USER REGISTERED", email_verify="true"), 200
else:
return jsonify(error="unauthorized access"), 401
except psycopg2.errors.UniqueViolation as e:
return jsonify(error="Email Already Registered"), 400
except KeyError as e:
return jsonify(error="missing data"), 400
except Exception as e:
print(e)
conn().rollback()
return jsonify(error="malformed data"), 400
@user_bp.route('/login', methods=["POST"])
def user_login():
try:
client_id = request.json["client_id"]
key = request.json["api_key"]
email = request.json["email"]
password = request.json["password"]
res = org_api_verifiy(client_id, key)
if res:
cur().execute(
"select * from users where org_id=%s and email=%s", (client_id, email))
res = cur().fetchall()
if len(res) == 0:
return jsonify(error="incorrect credentials"), 401
res = res[0]
if bcrypt.check_password_hash(res[4], password):
# print(res)
if not res[6]:
return jsonify(error="EMAIL NOT VERIFIED", email=email), 403
if res[5]:
cur().execute(
"select * from user_2_factor where org_id=%s and u_id=%s", (client_id, res[1]))
res1 = cur().fetchall()
if len(res1) != 0:
res1 = res1[0]
if float(time.time())-float(res1[4]) > 60:
cur().execute(
"delete from user_2_factor where org_id=%s and u_id=%s", (client_id, res[1]))
conn().commit()
else:
return jsonify(error="2FA has been already initialized please complete it first or wait for it to expire"), 400
tim = time.time()
cur().execute("insert into user_2_factor values(%s,%s,%s,%s,%s)",
(client_id, res[1], "LOG", "", tim))
conn().commit()
jwt_token = jwt.encode({"id": res[1], "name": res[2], "email": res[3],
"issue_time": tim, "complete": False}, secret(), algorithm="HS256").decode('utf-8')
return jsonify(token=jwt_token, two_factor=True, valid_period="60"), 200
else:
jwt_token = jwt.encode({"id": res[1], "name": res[2], "email": res[3], "issue_time": time.time(
), "complete": True}, secret(), algorithm="HS256").decode('utf-8')
return jsonify(token=jwt_token, valid_period="3600"), 200
else:
return jsonify(error="incorrect credentials"), 401
else:
return jsonify(error="unauthorized access"), 401
except psycopg2.errors.UniqueViolation as e:
return jsonify(error="2FA has been already initialized please complete it first or wait for it to expire"), 400
except KeyError as e:
return jsonify(error="missing data"), 400
except Exception as e:
print(e)
conn().rollback()
return jsonify(error="malformed data"), 400
@user_bp.route('/init_email_verify', methods=["POST"])
def init_email_verify():
try:
client_id = request.json["client_id"]
key = request.json["api_key"]
email = request.json["email"]
res1 = org_api_verifiy(client_id, key)
if res1:
cur().execute(
"select * from users where org_id=%s and email=%s", (client_id, email))
res = cur().fetchall()
if len(res) == 0:
return jsonify(error="email doesn't exist"), 400
cur().execute("delete from code_verify where org_id=%s and email=%s and type=%s",
(client_id, email, "EMAIL"))
conn().commit()
code = getUUID()[:7]
tim = time.time()
cur().execute("insert into code_verify values(%s,%s,%s,%s,%s)",
(client_id, email, code, tim, "EMAIL"))
conn().commit()
# print(res1)
@copy_current_request_context
def work():
sendMail(res1[3], "EMAIL VERIFICATION", "CODE : " +
code+"\nCode Valid for 5 mins", [email])
thread1 = threading.Thread(target=work)
thread1.start()
return jsonify(message="VERIFICATION CODE SENT CHECK EMAIL", valid_period="300"), 200
else:
return jsonify(error="unauthorized access"), 401
except KeyError as e:
return jsonify(error="missing data"), 400
except Exception as e:
print(e)
conn().rollback()
return jsonify(error="malformed data"), 400
@user_bp.route('/email_verify', methods=["POST"])
def email_verify():
try:
client_id = request.json["client_id"]
key = request.json["api_key"]
code = request.json["code"]
email = request.json["email"]
res1 = org_api_verifiy(client_id, key)
if res1:
cur().execute("select * from code_verify where org_id=%s and code=%s and email=%s and type=%s",
(client_id, code, email, "EMAIL"))
res = cur().fetchall()
if len(res) == 0:
return jsonify(error="wrong code"), 400
res = res[0]
cur().execute(
"delete from code_verify where org_id=%s and code=%s and type=%s", (client_id, code, "EMAIL"))
if float(time.time())-float(res[3]) > 360:
conn().commit()
return jsonify(error="code expired"), 400
cur().execute(
"update users set verified=%s where org_id=%s and email=%s", (True, client_id, email))
conn().commit()
return jsonify(message="success"), 200
else:
return jsonify(error="unauthorized access"), 401
except KeyError as e:
return jsonify(error="missing data"), 400
except Exception as e:
print(e)
conn().rollback()
return jsonify(error="malformed data"), 400
@user_bp.route('/forgot_password', methods=["POST"])
def forgot_password():
try:
client_id = request.json["client_id"]
key = request.json["api_key"]
email = request.json["email"]
res1 = org_api_verifiy(client_id, key)
if res1:
cur().execute(
"select * from users where org_id=%s and email=%s", (client_id, email))
res = cur().fetchall()
if len(res) == 0:
return jsonify(error="email doesn't exist"), 400
res = res[0]
if not res[6]:
return jsonify(error="EMAIL NOT VERIFIED", email=email), 403
cur().execute(
"delete from code_verify where org_id=%s and email=%s and type=%s", (client_id, email, "PASS"))
conn().commit()
code = getUUID()[:7]
tim = time.time()
cur().execute("insert into code_verify values(%s,%s,%s,%s,%s)",
(client_id, email, code, tim, "PASS"))
conn().commit()
# print(res1)
@copy_current_request_context
def work():
sendMail(res1[3], "PASSWORD RESET", "CODE : " +
code+"\nCode Valid for 5 mins", [email])
thread1 = threading.Thread(target=work)
thread1.start()
return jsonify(message="RESET CODE SENT CHECK EMAIL", valid_period="300"), 200
else:
return jsonify(error="unauthorized access"), 401
except KeyError as e:
return jsonify(error="missing data"), 400
except Exception as e:
print(e)
conn().rollback()
return jsonify(error="malformed data"), 400
@user_bp.route('/reset_password', methods=["POST"])
def reset_password():
try:
client_id = request.json["client_id"]
key = request.json["api_key"]
code = request.json["code"]
email = request.json["email"]
password = request.json["password"]
res1 = org_api_verifiy(client_id, key)
if res1:
cur().execute("select * from code_verify where org_id=%s and code=%s and email=%s and type=%s",
(client_id, code, email, "PASS"))
res = cur().fetchall()
if len(res) == 0:
return jsonify(error="wrong code"), 400
res = res[0]
cur().execute(
"delete from code_verify where org_id=%s and code=%s and type=%s", (client_id, code, "PASS"))
if float(time.time())-float(res[3]) > 360:
conn().commit()
return jsonify(error="code expired"), 400
passd = bcrypt.generate_password_hash(password).decode('utf-8')
cur().execute(
"update users set password=%s where org_id=%s and email=%s", (passd, client_id, email))
conn().commit()
return jsonify(message="success"), 200
else:
return jsonify(error="unauthorized access"), 401
except KeyError as e:
return jsonify(error="missing data"), 400
except Exception as e:
print(e)
conn().rollback()
return jsonify(error="malformed data"), 400
@user_bp.route('/self_reset_password', methods=["POST"])
def user_self_reset_password():
try:
old_password = request.json["old_password"]
token = request.json["token"]
new_password = request.json["new_password"]
client_id = request.json["client_id"]
key = request.json["api_key"]
res = org_api_verifiy(client_id, key)
res2 = token_verify(token, 3600)
if res:
if not res2:
return jsonify(error="token invalid"), 401
cur().execute(
"select * from users where org_id=%s and email=%s", (client_id, res2["email"]))
res = cur().fetchall()
if len(res) == 0:
return jsonify(error="incorrect credentials"), 401
res = res[0]
if bcrypt.check_password_hash(res[4], old_password):
passd = bcrypt.generate_password_hash(
new_password).decode('utf-8')
cur().execute("update users set password=%s where org_id=%s and email=%s",
(passd, client_id, res2["email"]))
conn().commit()
return jsonify(message="success"), 200
else:
return jsonify(error="incorrect credentials"), 401
else:
return jsonify(error="unauthorized access"), 401
except KeyError as e:
return jsonify(error="missing data"), 400
except Exception as e:
print(e)
conn().rollback()
return jsonify(error="malformed data"), 400
@user_bp.route('/token_verify', methods=["POST"])
def user_token_verify():
try:
client_id = request.json["client_id"]
key = request.json["api_key"]
token = request.json["token"]
res = org_api_verifiy(client_id, key)
res2 = token_verify(token, 3600)
if res:
if not res2:
return jsonify(error="token invalid"), 401
else:
return jsonify(message="valid token"), 200
else:
return jsonify(error="unauthorized access"), 401
except KeyError as e:
print(e)
return jsonify(error="missing data"), 400
except Exception as e:
print(e)
return jsonify(error="malformed data"), 400
|
httpd.py
|
#!/usr/bin/env python
"""
Copyright (c) 2014-2019 Maltrail developers (https://github.com/stamparm/maltrail/)
See the file 'LICENSE' for copying permission
"""
from __future__ import print_function
import datetime
import glob
import gzip
import hashlib
import io
import json
import mimetypes
import os
import re
import socket
import subprocess
import threading
import time
import traceback
from core.addr import addr_to_int
from core.addr import int_to_addr
from core.addr import make_mask
from core.attribdict import AttribDict
from core.common import get_regex
from core.common import ipcat_lookup
from core.common import worst_asns
from core.compat import xrange
from core.enums import HTTP_HEADER
from core.settings import config
from core.settings import CONTENT_EXTENSIONS_EXCLUSIONS
from core.settings import DATE_FORMAT
from core.settings import DISABLED_CONTENT_EXTENSIONS
from core.settings import DISPOSED_NONCES
from core.settings import HTML_DIR
from core.settings import HTTP_TIME_FORMAT
from core.settings import IS_WIN
from core.settings import MAX_NOFILE
from core.settings import NAME
from core.settings import PING_RESPONSE
from core.settings import SERVER_HEADER
from core.settings import SESSION_COOKIE_NAME
from core.settings import SESSION_COOKIE_FLAG_SAMESITE
from core.settings import SESSION_EXPIRATION_HOURS
from core.settings import SESSION_ID_LENGTH
from core.settings import SESSIONS
from core.settings import UNAUTHORIZED_SLEEP_TIME
from core.settings import UNICODE_ENCODING
from core.settings import VERSION
from thirdparty import six
from thirdparty.six.moves import BaseHTTPServer as _BaseHTTPServer
from thirdparty.six.moves import http_client as _http_client
from thirdparty.six.moves import socketserver as _socketserver
from thirdparty.six.moves import urllib as _urllib
try:
# Reference: https://bugs.python.org/issue7980
# Reference: http://code-trick.com/python-bug-attribute-error-_strptime/
import _strptime
except ImportError:
pass
try:
import resource
resource.setrlimit(resource.RLIMIT_NOFILE, (MAX_NOFILE, MAX_NOFILE))
except:
pass
def start_httpd(address=None, port=None, join=False, pem=None):
"""
Starts HTTP server
"""
class ThreadingServer(_socketserver.ThreadingMixIn, _BaseHTTPServer.HTTPServer):
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_BaseHTTPServer.HTTPServer.server_bind(self)
def finish_request(self, *args, **kwargs):
try:
_BaseHTTPServer.HTTPServer.finish_request(self, *args, **kwargs)
except:
if config.SHOW_DEBUG:
traceback.print_exc()
class SSLThreadingServer(ThreadingServer):
def __init__(self, server_address, pem, HandlerClass):
import OpenSSL # python-openssl
ThreadingServer.__init__(self, server_address, HandlerClass)
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_2_METHOD)
ctx.use_privatekey_file(pem)
ctx.use_certificate_file(pem)
self.socket = OpenSSL.SSL.Connection(ctx, socket.socket(self.address_family, self.socket_type))
self.server_bind()
self.server_activate()
def shutdown_request(self, request):
try:
request.shutdown()
except:
pass
class ReqHandler(_BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
path, query = self.path.split('?', 1) if '?' in self.path else (self.path, "")
params = {}
content = None
skip = False
if hasattr(self, "data"):
params.update(_urllib.parse.parse_qs(self.data))
if query:
params.update(_urllib.parse.parse_qs(query))
for key in params:
if params[key]:
params[key] = params[key][-1]
if path == '/':
path = "index.html"
path = path.strip('/')
extension = os.path.splitext(path)[-1].lower()
if hasattr(self, "_%s" % path):
content = getattr(self, "_%s" % path)(params)
else:
path = path.replace('/', os.path.sep)
path = os.path.abspath(os.path.join(HTML_DIR, path)).strip()
if not os.path.isfile(path) and os.path.isfile("%s.html" % path):
path = "%s.html" % path
if any((config.IP_ALIASES,)) and self.path.split('?')[0] == "/js/main.js":
content = open(path, "rb").read()
content = re.sub(r"\bvar IP_ALIASES =.+", "var IP_ALIASES = {%s};" % ", ".join('"%s": "%s"' % (_.split(':', 1)[0].strip(), _.split(':', 1)[-1].strip()) for _ in config.IP_ALIASES), content)
self.send_response(_http_client.OK)
elif ".." not in os.path.relpath(path, HTML_DIR) and os.path.isfile(path) and (extension not in DISABLED_CONTENT_EXTENSIONS or os.path.split(path)[-1] in CONTENT_EXTENSIONS_EXCLUSIONS):
mtime = time.gmtime(os.path.getmtime(path))
if_modified_since = self.headers.get(HTTP_HEADER.IF_MODIFIED_SINCE)
if if_modified_since and extension not in (".htm", ".html"):
if_modified_since = [_ for _ in if_modified_since.split(';') if _.upper().endswith("GMT")][0]
if time.mktime(mtime) <= time.mktime(time.strptime(if_modified_since, HTTP_TIME_FORMAT)):
self.send_response(_http_client.NOT_MODIFIED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
skip = True
if not skip:
content = open(path, "rb").read()
last_modified = time.strftime(HTTP_TIME_FORMAT, mtime)
self.send_response(_http_client.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, mimetypes.guess_type(path)[0] or "application/octet-stream")
self.send_header(HTTP_HEADER.LAST_MODIFIED, last_modified)
# For CSP policy directives see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/
self.send_header(HTTP_HEADER.CONTENT_SECURITY_POLICY, "default-src 'self'; style-src 'self' 'unsafe-inline'; img-src *; " +
"script-src 'self' 'unsafe-eval' https://stat.ripe.net; " +
"frame-src *; object-src 'none'; block-all-mixed-content;")
if extension not in (".htm", ".html"):
self.send_header(HTTP_HEADER.EXPIRES, "Sun, 17-Jan-2038 19:14:07 GMT") # Reference: http://blog.httpwatch.com/2007/12/10/two-simple-rules-for-http-caching/
self.send_header(HTTP_HEADER.CACHE_CONTROL, "max-age=3600, must-revalidate") # Reference: http://stackoverflow.com/a/5084555
else:
self.send_header(HTTP_HEADER.CACHE_CONTROL, "no-cache")
else:
self.send_response(_http_client.NOT_FOUND)
self.send_header(HTTP_HEADER.CONNECTION, "close")
content = b'<!DOCTYPE html><html lang="en"><head><title>404 Not Found</title></head><body><h1>Not Found</h1><p>The requested URL %s was not found on this server.</p></body></html>' % self.path.split('?')[0]
if content is not None:
if isinstance(content, six.text_type):
content = content.encode(UNICODE_ENCODING)
for match in re.finditer(b"<\\!(\\w+)\\!>", content):
name = match.group(1).decode(UNICODE_ENCODING)
_ = getattr(self, "_%s" % name.lower(), None)
if _:
content = self._format(content, **{ name: _() })
if "gzip" in self.headers.get(HTTP_HEADER.ACCEPT_ENCODING):
self.send_header(HTTP_HEADER.CONTENT_ENCODING, "gzip")
_ = six.BytesIO()
compress = gzip.GzipFile("", "w+b", 9, _)
compress._stream = _
compress.write(content)
compress.flush()
compress.close()
content = compress._stream.getvalue()
self.send_header(HTTP_HEADER.CONTENT_LENGTH, str(len(content)))
self.end_headers()
try:
if content:
self.wfile.write(content)
self.wfile.flush()
except:
pass
def do_POST(self):
length = self.headers.get(HTTP_HEADER.CONTENT_LENGTH)
data = self.rfile.read(int(length)).decode(UNICODE_ENCODING)
data = _urllib.parse.unquote_plus(data)
self.data = data
self.do_GET()
def get_session(self):
retval = None
cookie = self.headers.get(HTTP_HEADER.COOKIE)
if cookie:
match = re.search(r"%s\s*=\s*([^;]+)" % SESSION_COOKIE_NAME, cookie)
if match:
session = match.group(1)
if session in SESSIONS:
if SESSIONS[session].client_ip != self.client_address[0]:
pass
elif SESSIONS[session].expiration > time.time():
retval = SESSIONS[session]
else:
del SESSIONS[session]
if retval is None and not config.USERS:
retval = AttribDict({"username": "?"})
return retval
def delete_session(self):
cookie = self.headers.get(HTTP_HEADER.COOKIE)
if cookie:
match = re.search(r"%s=(.+)" % SESSION_COOKIE_NAME, cookie)
if match:
session = match.group(1)
if session in SESSIONS:
del SESSIONS[session]
def version_string(self):
return SERVER_HEADER
def end_headers(self):
if not hasattr(self, "_headers_ended"):
_BaseHTTPServer.BaseHTTPRequestHandler.end_headers(self)
self._headers_ended = True
def log_message(self, format, *args):
return
def finish(self):
try:
_BaseHTTPServer.BaseHTTPRequestHandler.finish(self)
except:
if config.SHOW_DEBUG:
traceback.print_exc()
def _version(self):
return VERSION
def _logo(self):
if config.HEADER_LOGO:
retval = config.HEADER_LOGO
else:
retval = '<img src="images/mlogo.png" style="width: 25px">altrail'
return retval
def _format(self, content, **params):
if content:
for key, value in params.items():
content = content.replace(b"<!%s!>" % key.encode(UNICODE_ENCODING), value.encode(UNICODE_ENCODING))
return content
def _login(self, params):
valid = False
if params.get("username") and params.get("hash") and params.get("nonce"):
if params.get("nonce") not in DISPOSED_NONCES:
DISPOSED_NONCES.add(params.get("nonce"))
for entry in (config.USERS or []):
entry = re.sub(r"\s", "", entry)
username, stored_hash, uid, netfilter = entry.split(':')
try:
uid = int(uid)
except ValueError:
uid = None
if username == params.get("username"):
try:
if params.get("hash") == hashlib.sha256((stored_hash.strip() + params.get("nonce")).encode(UNICODE_ENCODING)).hexdigest():
valid = True
break
except:
if config.SHOW_DEBUG:
traceback.print_exc()
if valid:
_ = os.urandom(SESSION_ID_LENGTH)
session_id = _.hex() if hasattr(_, "hex") else _.encode("hex")
expiration = time.time() + 3600 * SESSION_EXPIRATION_HOURS
self.send_response(_http_client.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
cookie = "%s=%s; expires=%s; path=/; HttpOnly" % (SESSION_COOKIE_NAME, session_id, time.strftime(HTTP_TIME_FORMAT, time.gmtime(expiration)))
if config.USE_SSL:
cookie += "; Secure"
if SESSION_COOKIE_FLAG_SAMESITE:
cookie += "; SameSite=strict"
self.send_header(HTTP_HEADER.SET_COOKIE, cookie)
if netfilter in ("", '*', "::", "0.0.0.0/0"):
netfilters = None
else:
addresses = set()
netmasks = set()
for item in set(re.split(r"[;,]", netfilter)):
item = item.strip()
if '/' in item:
_ = item.split('/')[-1]
if _.isdigit() and int(_) >= 16:
lower = addr_to_int(item.split('/')[0])
mask = make_mask(int(_))
upper = lower | (0xffffffff ^ mask)
while lower <= upper:
addresses.add(int_to_addr(lower))
lower += 1
else:
netmasks.add(item)
elif '-' in item:
_ = item.split('-')
lower, upper = addr_to_int(_[0]), addr_to_int(_[1])
while lower <= upper:
addresses.add(int_to_addr(lower))
lower += 1
elif re.search(r"\d+\.\d+\.\d+\.\d+", item):
addresses.add(item)
netfilters = netmasks
if addresses:
netfilters.add(get_regex(addresses))
SESSIONS[session_id] = AttribDict({"username": username, "uid": uid, "netfilters": netfilters, "mask_custom": config.ENABLE_MASK_CUSTOM and uid >= 1000, "expiration": expiration, "client_ip": self.client_address[0]})
else:
time.sleep(UNAUTHORIZED_SLEEP_TIME)
self.send_response(_http_client.UNAUTHORIZED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
content = "Login %s" % ("success" if valid else "failed")
if not IS_WIN:
try:
subprocess.check_output("logger -p auth.info -t \"%s[%d]\" \"%s password for %s from %s port %s\"" % (NAME.lower(), os.getpid(), "Accepted" if valid else "Failed", params.get("username"), self.client_address[0], self.client_address[1]), stderr=subprocess.STDOUT, shell=True)
except Exception:
if config.SHOW_DEBUG:
traceback.print_exc()
return content
def _logout(self, params):
self.delete_session()
self.send_response(_http_client.FOUND)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.LOCATION, "/")
def _whoami(self, params):
session = self.get_session()
username = session.username if session else ""
self.send_response(_http_client.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
return username
def _check_ip(self, params):
session = self.get_session()
if session is None:
self.send_response(_http_client.UNAUTHORIZED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
return None
self.send_response(_http_client.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
try:
result_worst = worst_asns(params.get("address"))
if result_worst:
result_ipcat = result_worst
else:
_ = (ipcat_lookup(params.get("address")) or "").lower().split(' ')
result_ipcat = _[1] if _[0] == 'the' else _[0]
return ("%s" if not params.get("callback") else "%s(%%s)" % params.get("callback")) % json.dumps({"ipcat": result_ipcat, "worst_asns": str(result_worst is not None).lower()})
except:
if config.SHOW_DEBUG:
traceback.print_exc()
def _trails(self, params):
self.send_response(_http_client.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
return open(config.TRAILS_FILE, "rb").read()
def _ping(self, params):
self.send_response(_http_client.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
return PING_RESPONSE
def _events(self, params):
session = self.get_session()
if session is None:
self.send_response(_http_client.UNAUTHORIZED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
return None
start, end, size, total = None, None, -1, None
content = None
log_exists = False
dates = params.get("date", "")
if ".." in dates:
pass
elif '_' not in dates:
try:
date = datetime.datetime.strptime(dates, "%Y-%m-%d").strftime("%Y-%m-%d")
event_log_path = os.path.join(config.LOG_DIR, "%s.log" % date)
if os.path.exists(event_log_path):
range_handle = open(event_log_path, "rb")
log_exists = True
except ValueError:
print("[!] invalid date format in request")
log_exists = False
else:
logs_data = ""
date_interval = dates.split("_", 1)
try:
start_date = datetime.datetime.strptime(date_interval[0], "%Y-%m-%d").date()
end_date = datetime.datetime.strptime(date_interval[1], "%Y-%m-%d").date()
for i in xrange(int((end_date - start_date).days) + 1):
date = start_date + datetime.timedelta(i)
event_log_path = os.path.join(config.LOG_DIR, "%s.log" % date.strftime("%Y-%m-%d"))
if os.path.exists(event_log_path):
log_handle = open(event_log_path, "rb")
logs_data += log_handle.read()
log_handle.close()
range_handle = io.BytesIO(logs_data)
log_exists = True
except ValueError:
print("[!] invalid date format in request")
log_exists = False
if log_exists:
range_handle.seek(0, 2)
total = range_handle.tell()
range_handle.seek(0)
if self.headers.get(HTTP_HEADER.RANGE):
match = re.search(r"bytes=(\d+)-(\d+)", self.headers[HTTP_HEADER.RANGE])
if match:
start, end = int(match.group(1)), int(match.group(2))
max_size = end - start + 1
end = min(total - 1, end)
size = end - start + 1
if start == 0 or not session.range_handle:
session.range_handle = range_handle
if session.netfilters is None and not session.mask_custom:
session.range_handle.seek(start)
self.send_response(_http_client.PARTIAL_CONTENT)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
self.send_header(HTTP_HEADER.CONTENT_RANGE, "bytes %d-%d/%d" % (start, end, total))
content = session.range_handle.read(size)
else:
self.send_response(_http_client.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
buffer, addresses, netmasks, regex = io.StringIO(), set(), [], ""
for netfilter in session.netfilters or []:
if not netfilter:
continue
if '/' in netfilter:
netmasks.append(netfilter)
elif re.search(r"\A[\d.]+\Z", netfilter):
addresses.add(netfilter)
elif "\\." in netfilter:
regex = r"\b(%s)\b" % netfilter
else:
print("[!] invalid network filter '%s'" % netfilter)
return
for line in session.range_handle:
display = session.netfilters is None
ip = None
line = line.decode(UNICODE_ENCODING, "ignore")
if regex:
match = re.search(regex, line)
if match:
ip = match.group(1)
display = True
if not display and (addresses or netmasks):
for match in re.finditer(r"\b(\d+\.\d+\.\d+\.\d+)\b", line):
if not display:
ip = match.group(1)
else:
break
if ip in addresses:
display = True
break
elif netmasks:
for _ in netmasks:
prefix, mask = _.split('/')
if addr_to_int(ip) & make_mask(int(mask)) == addr_to_int(prefix):
addresses.add(ip)
display = True
break
if session.mask_custom and "(custom)" in line:
line = re.sub(r'("[^"]+"|[^ ]+) \(custom\)', "- (custom)", line)
if display:
if ",%s" % ip in line or "%s," % ip in line:
line = re.sub(r" ([\d.,]+,)?%s(,[\d.,]+)? " % re.escape(ip), " %s " % ip, line)
buffer.write(line)
if buffer.tell() >= max_size:
break
content = buffer.getvalue()
end = start + len(content) - 1
self.send_header(HTTP_HEADER.CONTENT_RANGE, "bytes %d-%d/%d" % (start, end, end + 1 + max_size * (len(content) >= max_size)))
if len(content) < max_size:
session.range_handle.close()
session.range_handle = None
if size == -1:
self.send_response(_http_client.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
self.end_headers()
with range_handle as f:
while True:
data = f.read(io.DEFAULT_BUFFER_SIZE)
if not data:
break
else:
self.wfile.write(data)
else:
self.send_response(_http_client.OK) # instead of _http_client.NO_CONTENT (compatibility reasons)
self.send_header(HTTP_HEADER.CONNECTION, "close")
if self.headers.get(HTTP_HEADER.RANGE):
self.send_header(HTTP_HEADER.CONTENT_RANGE, "bytes 0-0/0")
return content
def _counts(self, params):
counts = {}
session = self.get_session()
if session is None:
self.send_response(_http_client.UNAUTHORIZED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
return None
self.send_response(_http_client.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "application/json")
match = re.search(r"\d+\-\d+\-\d+", params.get("from", ""))
if match:
min_ = datetime.datetime.strptime(match.group(0), DATE_FORMAT)
else:
min_ = datetime.datetime.fromtimestamp(0)
match = re.search(r"\d+\-\d+\-\d+", params.get("to", ""))
if match:
max_ = datetime.datetime.strptime(match.group(0), DATE_FORMAT)
else:
max_ = datetime.datetime.now()
min_ = min_.replace(hour=0, minute=0, second=0, microsecond=0)
max_ = max_.replace(hour=23, minute=59, second=59, microsecond=999999)
for filepath in sorted(glob.glob(os.path.join(config.LOG_DIR, "*.log"))):
filename = os.path.basename(filepath)
if not re.search(r"\A\d{4}-\d{2}-\d{2}\.log\Z", filename):
continue
try:
current = datetime.datetime.strptime(os.path.splitext(filename)[0], DATE_FORMAT)
except:
if config.SHOW_DEBUG:
traceback.print_exc()
else:
if min_ <= current <= max_:
timestamp = int(time.mktime(current.timetuple()))
size = os.path.getsize(filepath)
with open(filepath, "rb") as f:
content = f.read(io.DEFAULT_BUFFER_SIZE)
if size >= io.DEFAULT_BUFFER_SIZE:
total = 1.0 * content.count(b'\n') * size / io.DEFAULT_BUFFER_SIZE
counts[timestamp] = int(round(total / 100) * 100)
else:
counts[timestamp] = content.count(b'\n')
return json.dumps(counts)
class SSLReqHandler(ReqHandler):
def setup(self):
self.connection = self.request
self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
# IPv6 support
if ':' in (address or ""):
address = address.strip("[]")
_BaseHTTPServer.HTTPServer.address_family = socket.AF_INET6
# Reference: https://github.com/squeaky-pl/zenchmarks/blob/master/vendor/twisted/internet/tcp.py
_AI_NUMERICSERV = getattr(socket, "AI_NUMERICSERV", 0)
_NUMERIC_ONLY = socket.AI_NUMERICHOST | _AI_NUMERICSERV
_address = socket.getaddrinfo(address, int(port) if str(port or "").isdigit() else 0, 0, 0, 0, _NUMERIC_ONLY)[0][4]
else:
_address = (address or '', int(port) if str(port or "").isdigit() else 0)
try:
if pem:
server = SSLThreadingServer(_address, pem, SSLReqHandler)
else:
server = ThreadingServer(_address, ReqHandler)
except Exception as ex:
if "Address already in use" in str(ex):
exit("[!] another instance already running")
elif "Name or service not known" in str(ex):
exit("[!] invalid configuration value for 'HTTP_ADDRESS' ('%s')" % config.HTTP_ADDRESS)
elif "Cannot assign requested address" in str(ex):
exit("[!] can't use configuration value for 'HTTP_ADDRESS' ('%s')" % config.HTTP_ADDRESS)
else:
raise
print("[i] starting HTTP%s server at 'http%s://%s:%d/'" % ('S' if pem else "", 's' if pem else "", server.server_address[0], server.server_address[1]))
print("[o] running...")
if join:
server.serve_forever()
else:
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
|
AVR_Miner.py
|
#!/usr/bin/env python3
##########################################
# Duino-Coin Python AVR Miner (v2.2)
# https://github.com/revoxhere/duino-coin
# Distributed under MIT license
# © Duino-Coin Community 2019-2021
##########################################
import socket, threading, time, re, subprocess, configparser, sys, datetime, os # Import libraries
from pathlib import Path
from signal import signal, SIGINT
import locale, json
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
os.execl(sys.executable, sys.executable, *sys.argv)
def now():
return datetime.datetime.now()
try: # Check if pyserial is installed
import serial
import serial.tools.list_ports
except:
print(
now().strftime("%H:%M:%S ")
+ 'Pyserial is not installed. Miner will try to install it. If it fails, please manually install "pyserial" python3 package.\nIf you can\'t install it, use the Minimal-PC_Miner.'
)
install("pyserial")
try: # Check if colorama is installed
from colorama import init, Fore, Back, Style
except:
print(
now().strftime("%H:%M:%S ")
+ 'Colorama is not installed. Miner will try to install it. If it fails, please manually install "colorama" python3 package.\nIf you can\'t install it, use the Minimal-PC_Miner.'
)
install("colorama")
try: # Check if requests is installed
import requests
except:
print(
now().strftime("%H:%M:%S ")
+ 'Requests is not installed. Miner will try to install it. If it fails, please manually install "requests" python3 package.\nIf you can\'t install it, use the Minimal-PC_Miner.'
)
install("requests")
try:
from pypresence import Presence
except:
print(
'Pypresence is not installed. Wallet will try to install it. If it fails, please manually install "pypresence" python3 package.'
)
install("pypresence")
# Global variables
minerVersion = "2.2" # Version number
timeout = 30 # Socket timeout
resourcesFolder = "AVRMiner_" + str(minerVersion) + "_resources"
shares = [0, 0]
diff = 0
donatorrunning = False
job = ""
debug = "n"
rigIdentifier = "None"
serveripfile = "https://raw.githubusercontent.com/revoxhere/duino-coin/gh-pages/serverip.txt" # Serverip file
config = configparser.ConfigParser()
donationlevel = 0
hashrate = 0
connectionMessageShown = False
if not os.path.exists(resourcesFolder):
os.mkdir(resourcesFolder) # Create resources folder if it doesn't exist
if not Path( # Check if languages file exists
resourcesFolder + "/langs.json"
).is_file(): # Initial miner executable section
url = "https://raw.githubusercontent.com/revoxhere/duino-coin/master/Resources/AVR_Miner_langs.json"
r = requests.get(url)
with open(resourcesFolder + "/langs.json", "wb") as f:
f.write(r.content)
with open(f"{resourcesFolder}/langs.json", "r", encoding="utf8") as lang_file:
lang_file = json.load(lang_file)
if not Path( # Check if miner is configured, if it isn't, autodetect language
resourcesFolder + "/Miner_config.cfg"
).is_file():
locale = locale.getdefaultlocale()[0]
if locale.startswith("es"):
lang = "spanish"
elif locale.startswith("sk"):
lang = "slovak"
elif locale.startswith("ru"):
lang = "russian"
elif locale.startswith("pl"):
lang = "polish"
elif locale.startswith("fr"):
lang = "french"
else:
lang = "english"
else:
try: # Read language from configfile
config.read(resourcesFolder + "/Miner_config.cfg")
lang = config["arduminer"]["language"]
except: # If it fails, fallback to english
lang = "english"
def getString(string_name):
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return "String not found: " + string_name
def debugOutput(text):
if debug == "y":
print(
now().strftime(Style.RESET_ALL + Style.DIM + "%H:%M:%S.%f ")
+ "DEBUG: "
+ text
)
def title(title):
if os.name == "nt":
os.system("title " + title)
else:
print("\33]0;" + title + "\a", end="")
sys.stdout.flush()
def handler(
signal_received, frame
): # If CTRL+C or SIGINT received, send CLOSE request to server in order to exit gracefully.
print(
now().strftime(Style.RESET_ALL + Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ Back.GREEN
+ Fore.WHITE
+ " sys0 "
+ Back.RESET
+ Fore.YELLOW
+ getString("sigint_detected")
+ Style.NORMAL
+ Fore.WHITE
+ getString("goodbye")
)
try:
soc.close()
except:
pass
os._exit(0)
signal(SIGINT, handler) # Enable signal handler
def loadConfig(): # Config loading section
global pool_address, pool_port, username, donationlevel, avrport, debug, requestedDiff, rigIdentifier
if not Path(
str(resourcesFolder) + "/Miner_config.cfg"
).is_file(): # Initial configuration section
print(
Style.BRIGHT
+ getString("basic_config_tool")
+ resourcesFolder
+ getString("edit_config_file_warning")
)
print(
Style.RESET_ALL
+ getString("dont_have_account")
+ Fore.YELLOW
+ getString("wallet")
+ Fore.WHITE
+ getString("register_warning")
)
username = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_username")
+ Fore.WHITE
+ Style.BRIGHT
)
print(Style.RESET_ALL + Fore.YELLOW + getString("ports_message"))
portlist = serial.tools.list_ports.comports()
for port in portlist:
print(Style.RESET_ALL + Style.BRIGHT + Fore.WHITE + " " + str(port))
print(Style.RESET_ALL + Fore.YELLOW + getString("ports_notice"))
avrport = ""
while True:
avrport += input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_avrport")
+ Fore.WHITE
+ Style.BRIGHT
)
confirmation = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_anotherport")
+ Fore.WHITE
+ Style.BRIGHT
)
if confirmation == "y" or confirmation == "Y":
avrport += ","
else:
break
requestedDiffSelection = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_higherdiff")
+ Fore.WHITE
+ Style.BRIGHT
)
if requestedDiffSelection == "y" or requestedDiffSelection == "Y":
requestedDiff = "ESP32"
else:
requestedDiff = "AVR"
rigIdentifier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_rig_identifier")
+ Fore.WHITE
+ Style.BRIGHT
)
if rigIdentifier == "y" or rigIdentifier == "Y":
rigIdentifier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_rig_name")
+ Fore.WHITE
+ Style.BRIGHT
)
else:
rigIdentifier = "None"
donationlevel = "0"
if os.name == "nt" or os.name == "posix":
donationlevel = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_donation_level")
+ Fore.WHITE
+ Style.BRIGHT
)
donationlevel = re.sub(
"\D", "", donationlevel
) # Check wheter donationlevel is correct
if float(donationlevel) > int(5):
donationlevel = 5
if float(donationlevel) < int(0):
donationlevel = 0
config["arduminer"] = { # Format data
"username": username,
"avrport": avrport,
"donate": donationlevel,
"language": lang,
"identifier": rigIdentifier,
"difficulty": requestedDiff,
"debug": "n",
}
with open(
str(resourcesFolder) + "/Miner_config.cfg", "w"
) as configfile: # Write data to file
config.write(configfile)
avrport = avrport.split(",")
print(Style.RESET_ALL + getString("config_saved"))
else: # If config already exists, load from it
config.read(str(resourcesFolder) + "/Miner_config.cfg")
username = config["arduminer"]["username"]
avrport = config["arduminer"]["avrport"]
avrport = avrport.split(",")
donationlevel = config["arduminer"]["donate"]
debug = config["arduminer"]["debug"]
rigIdentifier = config["arduminer"]["identifier"]
requestedDiff = config["arduminer"]["difficulty"]
def Greeting(): # Greeting message depending on time
global greeting
print(Style.RESET_ALL)
current_hour = time.strptime(time.ctime(time.time())).tm_hour
if current_hour < 12:
greeting = getString("greeting_morning")
elif current_hour == 12:
greeting = getString("greeting_noon")
elif current_hour > 12 and current_hour < 18:
greeting = getString("greeting_afternoon")
elif current_hour >= 18:
greeting = getString("greeting_evening")
else:
greeting = getString("greeting_back")
print(
" > "
+ Fore.YELLOW
+ Style.BRIGHT
+ getString("banner")
+ Style.RESET_ALL
+ Fore.WHITE
+ " (v"
+ str(minerVersion)
+ ") 2019-2021"
) # Startup message
print(" > " + Fore.YELLOW + "https://github.com/revoxhere/duino-coin")
print(
" > "
+ Fore.WHITE
+ getString("avr_on_port")
+ Style.BRIGHT
+ Fore.YELLOW
+ " ".join(avrport)
)
if os.name == "nt" or os.name == "posix":
print(
" > "
+ Fore.WHITE
+ getString("donation_level")
+ Style.BRIGHT
+ Fore.YELLOW
+ str(donationlevel)
)
print(
" > "
+ Fore.WHITE
+ getString("algorithm")
+ Style.BRIGHT
+ Fore.YELLOW
+ "DUCO-S1A @ "
+ str(requestedDiff)
+ " diff"
)
print(
Style.RESET_ALL
+ " > "
+ Fore.WHITE
+ getString("rig_identifier")
+ Style.BRIGHT
+ Fore.YELLOW
+ rigIdentifier
)
print(
" > "
+ Fore.WHITE
+ str(greeting)
+ ", "
+ Style.BRIGHT
+ Fore.YELLOW
+ str(username)
+ "!\n"
)
if os.name == "nt":
if not Path(
resourcesFolder + "/Donate_executable.exe"
).is_file(): # Initial miner executable section
debugOutput("OS is Windows, downloading developer donation executable")
url = "https://github.com/revoxhere/duino-coin/blob/useful-tools/DonateExecutableWindows.exe?raw=true"
r = requests.get(url)
with open(resourcesFolder + "/Donate_executable.exe", "wb") as f:
f.write(r.content)
elif os.name == "posix":
if not Path(
resourcesFolder + "/Donate_executable"
).is_file(): # Initial miner executable section
debugOutput("OS is Windows, downloading developer donation executable")
url = "https://github.com/revoxhere/duino-coin/blob/useful-tools/DonateExecutableLinux?raw=true"
r = requests.get(url)
with open(resourcesFolder + "/Donate_executable", "wb") as f:
f.write(r.content)
def Donate():
global donationlevel, donatorrunning, donateExecutable
if os.name == "nt":
cmd = (
"cd "
+ resourcesFolder
+ "& Donate_executable.exe -o stratum+tcp://blockmasters.co:6033 -u 9RTb3ikRrWExsF6fis85g7vKqU1tQYVFuR -p AVRmW,c=XMG,d=16 -s 4 -e "
)
elif os.name == "posix":
cmd = (
"cd "
+ resourcesFolder
+ "&& chmod +x Donate_executable && ./Donate_executable -o stratum+tcp://blockmasters.co:6033 -u 9RTb3ikRrWExsF6fis85g7vKqU1tQYVFuR -p AVRmL,c=XMG,d=16 -s 4 -e "
)
if int(donationlevel) <= 0:
print(
now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.GREEN
+ Fore.WHITE
+ " sys0 "
+ Back.RESET
+ Fore.YELLOW
+ getString("free_network_warning")
+ Style.BRIGHT
+ Fore.YELLOW
+ getString("donate_warning")
+ Style.RESET_ALL
+ Fore.GREEN
+ "https://duinocoin.com/donate"
+ Style.BRIGHT
+ Fore.YELLOW
+ getString("learn_more_donate")
+ Style.RESET_ALL
)
time.sleep(10)
if donatorrunning == False:
if int(donationlevel) == 5:
cmd += "100"
elif int(donationlevel) == 4:
cmd += "85"
elif int(donationlevel) == 3:
cmd += "60"
elif int(donationlevel) == 2:
cmd += "30"
elif int(donationlevel) == 1:
cmd += "15"
if int(donationlevel) > 0: # Launch CMD as subprocess
debugOutput(getString("starting_donation"))
donatorrunning = True
donateExecutable = subprocess.Popen(
cmd, shell=True, stderr=subprocess.DEVNULL
)
print(
now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.GREEN
+ Fore.WHITE
+ " sys0 "
+ Back.RESET
+ Fore.RED
+ getString("thanks_donation")
+ Style.RESET_ALL
)
def initRichPresence():
global RPC
try:
RPC = Presence(808056068113563701)
RPC.connect()
except: # Discord not launched
pass
def updateRichPresence():
startTime = int(time.time())
while True:
try:
RPC.update(
details="Hashrate: " + str(hashrate) + " H/s",
start=startTime,
state="Acc. shares: "
+ str(shares[0])
+ "/"
+ str(shares[0] + shares[1]),
large_image="ducol",
large_text="Duino-Coin, a coin that can be mined with almost everything, including AVR boards",
buttons=[
{"label": "Learn more", "url": "https://duinocoin.com"},
{"label": "Discord Server", "url": "https://discord.gg/k48Ht5y"},
],
)
except: # Discord not launched
pass
time.sleep(15) # 15 seconds to respect discord's rate limit
def AVRMine(com): # Mining section
global hash_count, connectionMessageShown, hashrate
while True:
while True:
try:
res = requests.get(
serveripfile, data=None
) # Use request to grab data from raw github file
if res.status_code == 200: # Check for response
content = (
res.content.decode().splitlines()
) # Read content and split into lines
masterServer_address = content[0] # Line 1 = pool address
masterServer_port = content[1] # Line 2 = pool port
debugOutput(
"Retrieved pool IP: "
+ masterServer_address
+ ":"
+ str(masterServer_port)
)
break
except: # If it wasn't, display a message
print(
now().strftime(Style.RESET_ALL + Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ Back.BLUE
+ Fore.WHITE
+ " net"
+ str(com[-1:].lower())
+ " "
+ Back.RESET
+ Fore.RED
+ getString("data_error")
)
if debug == "y":
raise
time.sleep(10)
while True: # This section connects to the server
try:
socId = socket.socket()
socId.connect(
(str(masterServer_address), int(masterServer_port))
) # Connect to the server
serverVersion = socId.recv(3).decode() # Get server version
debugOutput("Server version: " + serverVersion)
if (
float(serverVersion) <= float(minerVersion)
and len(serverVersion) == 3
and connectionMessageShown != True
): # If miner is up-to-date, display a message and continue
connectionMessageShown = True
print(
now().strftime(Style.RESET_ALL + Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ Back.BLUE
+ Fore.WHITE
+ " net0 "
+ Back.RESET
+ Fore.YELLOW
+ getString("connected")
+ Style.RESET_ALL
+ Fore.WHITE
+ getString("connected_server")
+ str(serverVersion)
+ ")"
)
elif connectionMessageShown != True:
print(
now().strftime(Style.RESET_ALL + Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ Back.GREEN
+ Fore.WHITE
+ " sys0 "
+ Back.RESET
+ Fore.RED
+ " Miner is outdated (v"
+ minerVersion
+ "),"
+ Style.RESET_ALL
+ Fore.RED
+ getString("server_is_on_version")
+ serverVersion
+ getString("update_warning")
)
break
except:
print(
now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.BLUE
+ Fore.WHITE
+ " net0 "
+ Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RED
+ getString("connecting_error")
+ Style.RESET_ALL
)
if debug == "y":
raise
time.sleep(10)
while True:
try: # Close previous serial connections (if any)
com.close()
except:
pass
try:
comConnection = serial.Serial(
com,
115200,
timeout=3,
write_timeout=3,
inter_byte_timeout=1,
)
print(
now().strftime(Style.RESET_ALL + Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.WHITE
+ " "
+ str(com[-4:].lower())
+ " "
+ Style.RESET_ALL
+ Style.BRIGHT
+ Fore.GREEN
+ getString("board_on_port")
+ str(com[-4:])
+ getString("board_is_connected")
+ Style.RESET_ALL
)
break
except:
debugOutput("Error connecting to AVR")
if debug == "y":
raise
print(
now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.WHITE
+ " "
+ str(com[-4:].lower())
+ " "
+ Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RED
+ getString("board_connection_error")
+ str(com[-4:])
+ getString("board_connection_error2")
+ Style.RESET_ALL
)
time.sleep(10)
first_share = True
avr_not_initialized = True
while avr_not_initialized:
try:
ready = comConnection.readline().decode() # AVR will send ready signal
debugOutput("Received start word (" + str(ready) + ")")
print(
now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.GREEN
+ Fore.WHITE
+ " sys"
+ str(com[-1:])
+ " "
+ Back.RESET
+ Fore.YELLOW
+ getString("mining_start")
+ Style.RESET_ALL
+ Fore.WHITE
+ getString("mining_algorithm")
+ str(com)
+ ")"
)
avr_not_initialized = False
except:
print(
now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.WHITE
+ " "
+ str(com[-4:].toLower())
+ " "
+ Back.RESET
+ Fore.RED
+ getString("mining_avr_connection_error")
)
time.sleep(5)
avr_not_initialized = True
while True:
while True:
try:
job_not_received = True
while job_not_received:
socId.send(
bytes(
"JOB," + str(username) + "," + str(requestedDiff),
encoding="utf8",
)
) # Send job request
try:
job = socId.recv(1024).decode() # Retrieves work from pool
debugOutput("Received job")
job_not_received = False
except:
break
job = job.split(",") # Split received data to job and difficulty
try:
if job[0] and job[1] and job[2]:
debugOutput("Job received: " + str(job))
diff = job[2]
break # If job received, continue
except IndexError:
debugOutput("IndexError, retrying")
except:
if debug == "y":
raise
break
try: # Write data to AVR board
try:
comConnection.write(bytes("start\n", encoding="utf8")) # start word
debugOutput("Written start word")
comConnection.write(
bytes(
str(job[0] + "\n" + job[1] + "\n" + job[2] + "\n"),
encoding="utf8",
)
) # hash
debugOutput("Send job to arduino")
except:
ConnectToAVR()
continue
wrong_avr_result = True
wrong_results = 0
while wrong_avr_result:
result = comConnection.readline().decode() # Read the result
debugOutput(str("result: ") + str(result))
if result == "":
wrong_avr_result = True
wrong_results = wrong_results + 1
if first_share or wrong_results > 5:
wrong_avr_result = False
print(
now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.WHITE
+ " avr "
+ Back.RESET
+ Fore.RED
+ getString("mining_avr_not_responding")
)
else:
wrong_avr_result = False
first_share = False
wrong_results = 0
if first_share or wrong_results > 5:
continue
result = result.split(",")
try:
debugOutput("Received result (" + str(result[0]) + ")")
debugOutput("Received time (" + str(result[1]) + ")")
computetime = round(
int(result[1]) / 1000000, 3
) # Convert AVR time to s
hashrate = round(int(result[0]) / int(result[1]) * 1000000, 2)
debugOutput("Calculated hashrate (" + str(hashrate) + ")")
except:
break
try:
socId.send(
bytes(
str(result[0])
+ ","
+ str(hashrate)
+ ",Official AVR Miner v"
+ str(minerVersion)
+ ","
+ str(rigIdentifier),
encoding="utf8",
)
) # Send result back to the server
except:
break
except:
break
while True:
responsetimetart = now()
feedback_not_received = True
while feedback_not_received:
try:
feedback = socId.recv(64).decode() # Get feedback
except socket.timeout:
feedback_not_received = True
debugOutput("Timeout while getting feedback, retrying")
except ConnectionResetError:
debugOutput("Connection was reset, reconnecting")
feedback_not_received = True
break
except ConnectionAbortedError:
debugOutput("Connection was aborted, reconnecting")
feedback_not_received = True
break
else:
responsetimestop = now() # Measure server ping
ping = responsetimestop - responsetimetart # Calculate ping
ping = str(int(ping.microseconds / 1000)) # Convert to ms
feedback_not_received = False
debugOutput("Successfully retrieved feedback")
if feedback == "GOOD": # If result was good
shares[0] = (
shares[0] + 1
) # Share accepted = increment correct shares counter by 1
title(
getString("duco_avr_miner")
+ str(minerVersion)
+ ") - "
+ str(shares[0])
+ "/"
+ str(shares[0] + shares[1])
+ getString("accepted_shares")
)
print(
now().strftime(Style.RESET_ALL + Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.WHITE
+ " "
+ str(com[-4:].lower())
+ " "
+ Back.RESET
+ Fore.GREEN
+ getString("accepted")
+ Fore.WHITE
+ str(int(shares[0]))
+ "/"
+ str(int(shares[0] + shares[1]))
+ Back.RESET
+ Fore.YELLOW
+ " ("
+ str(int((shares[0] / (shares[0] + shares[1]) * 100)))
+ "%)"
+ Style.NORMAL
+ Fore.WHITE
+ " ∙ "
+ str(f"%01.3f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(f"%05.1f" % float(hashrate))
+ " H/s"
+ Style.NORMAL
+ Fore.WHITE
+ " @ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str(f"%02.0f" % int(ping))
+ "ms"
)
break # Repeat
elif feedback == "BLOCK": # If result was good
shares[0] = (
shares[0] + 1
) # Share accepted = increment correct shares counter by 1
title(
getString("duco_avr_miner")
+ str(minerVersion)
+ ") - "
+ str(shares[0])
+ "/"
+ str(shares[0] + shares[1])
+ getString("accepted_shares")
)
print(
now().strftime(Style.RESET_ALL + Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.WHITE
+ " "
+ str(com[-4:].lower())
+ " "
+ Back.RESET
+ Fore.CYAN
+ getString("block_found")
+ Fore.WHITE
+ str(int(shares[0]))
+ "/"
+ str(int(shares[0] + shares[1]))
+ Back.RESET
+ Fore.YELLOW
+ " ("
+ str(int((shares[0] / (shares[0] + shares[1]) * 100)))
+ "%)"
+ Style.NORMAL
+ Fore.WHITE
+ " ∙ "
+ str(f"%01.3f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(f"%05.1f" % float(hashrate))
+ " H/s"
+ Style.NORMAL
+ Fore.WHITE
+ " @ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str(f"%02.0f" % int(ping))
+ "ms"
)
break
elif feedback == "INVU": # If user doesn't exist
print(
now().strftime(Style.RESET_ALL + Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.BLUE
+ Fore.WHITE
+ " net"
+ str(com[-1:])
+ " "
+ Back.RESET
+ Fore.RED
+ getString("mining_user")
+ str(username)
+ getString("mining_not_exist")
+ Style.RESET_ALL
+ Fore.RED
+ getString("mining_not_exist_warning")
)
time.sleep(10)
elif feedback == "ERR": # If server says that it encountered an error
print(
now().strftime(Style.RESET_ALL + Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.BLUE
+ Fore.WHITE
+ " net"
+ str(com[-1:])
+ " "
+ Back.RESET
+ Fore.RED
+ getString("internal_server_error")
+ Style.RESET_ALL
+ Fore.RED
+ getString("retrying")
)
time.sleep(10)
else: # If result was bad
shares[1] = (
shares[1] + 1
) # Share rejected = increment bad shares counter by 1
title(
getString("duco_avr_miner")
+ str(minerVersion)
+ ") - "
+ str(shares[0])
+ "/"
+ str(shares[0] + shares[1])
+ getString("accepted_shares")
)
print(
now().strftime(Style.RESET_ALL + Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.WHITE
+ " "
+ str(com[-4:].lower())
+ " "
+ Back.RESET
+ Fore.RED
+ getString("rejected")
+ Fore.WHITE
+ str(int(shares[0]))
+ "/"
+ str(int(shares[0] + shares[1]))
+ Back.RESET
+ Fore.YELLOW
+ " ("
+ str(int((shares[0] / (shares[0] + shares[1]) * 100)))
+ "%)"
+ Style.NORMAL
+ Fore.WHITE
+ " ∙ "
+ str(f"%01.3f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(f"%05.1f" % float(hashrate))
+ " H/s"
+ Style.NORMAL
+ Fore.WHITE
+ " @ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str(f"%02.0f" % int(ping))
+ "ms"
)
break # Repeat
if __name__ == "__main__":
init(autoreset=True) # Enable colorama
title(getString("duco_avr_miner") + str(minerVersion) + ")")
try:
loadConfig() # Load config file or create new one
debugOutput("Config file loaded")
except:
print(
now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.GREEN
+ Fore.WHITE
+ " sys0 "
+ Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RED
+ getString("load_config_error")
+ resourcesFolder
+ getString("load_config_error_warning")
+ Style.RESET_ALL
)
if debug == "y":
raise
time.sleep(10)
os._exit(1)
try:
Greeting() # Display greeting message
debugOutput("Greeting displayed")
except:
if debug == "y":
raise
try:
Donate() # Start donation thread
except:
if debug == "y":
raise
try:
for port in avrport:
threading.Thread(
target=AVRMine, args=(port,)
).start() # Launch avr duco mining threads
except:
if debug == "y":
raise
initRichPresence()
threading.Thread(target=updateRichPresence).start()
|
__init__.py
|
from __future__ import print_function
from builtins import input
import sys
import time
import cancat
import struct
import threading
import cancat.iso_tp as cisotp
# In 11-bit CAN, an OBD2 tester typically sends requests with an ID of 7DF, and
# can accept response messages on IDs 7E8 to 7EF, requests to a specific ECU can
# be sent from ID 7E0 to 7E7. So the non-OBD2 range normally ends at 7D7,
# although I can't find a specific "standard" for this.
#
# In 29-bit CAN an OBD2 tester typically sends requests with an ID of 0x18DB33F1
# where 0x18DBxxxx indicates this is an OBD2 message, 0x33 indicates this
# message is for the OBD2 ECU(s), and 0xF1 is the tester. Normal UDS messages
# use a prefix of 0x18DAxxxx.
# 0xF1 is used as a tester address in normal UDS messaging as well.
ARBID_CONSTS = {
'11bit': {
'prefix': 0x700,
'prefix_mask': 0xF00,
'resp_offset': 8, # rxid is normally the txid + 8
'max_req_id': 0xF7,
'obd2_broadcast': 0x7DF,
},
'29bit': {
'prefix': 0x18DA0000,
'prefix_mask': 0xFFFF0000,
'destid_mask': 0x0000FF00,
'destid_shift': 8,
'srcid_mask': 0x000000FF,
'tester': 0xF1,
'obd2_broadcast': 0x18DA33F1,
}
}
ISO_14229_DIDS = {
0xF180: 'bootSoftwareIdentificationDataIdentifier',
0xF181: 'applicationSoftwareIdentificationDataIdentifier',
0xF182: 'applicationDataIdentificationDataIdentifier',
0xF183: 'bootSoftwareFingerprintDataIdentifier',
0xF184: 'applicationSoftwareFingerprintDataIdentifier',
0xF185: 'applicationDataFingerprintDataIdentifier',
0xF186: 'activeDiagnosticSessionDataIdentifier',
0xF187: 'vehicleManufacturerSparePartNumberDataIdentifier',
0xF188: 'vehicleManufacturerECUSoftwareNumberDataIdentifier',
0xF189: 'vehicleManufacturerECUSoftwareVersionNumberDataIdentifier',
0xF18A: 'systemSupplierIdentifierDataIdentifier',
0xF18B: 'ECUManufacturingDateDataIdentifier',
0xF18C: 'ECUSerialNumberDataIdentifier',
0xF18D: 'supportedFunctionalUnitsDataIdentifier',
0xF18E: 'vehicleManufacturerKitAssemblyPartNumberDataIdentifier',
0xF190: 'VINDataIdentifier',
0xF191: 'vehicleManufacturerECUHardwareNumberDataIdentifier',
0xF192: 'systemSupplierECUHardwareNumberDataIdentifier',
0xF193: 'systemSupplierECUHardwareVersionNumberDataIdentifier',
0xF194: 'systemSupplierECUSoftwareNumberDataIdentifier',
0xF195: 'systemSupplierECUSoftwareVersionNumberDataIdentifier',
0xF196: 'exhaustRegulationOrTypeApprovalNumberDataIdentifier',
0xF197: 'systemNameOrEngineTypeDataIdentifier',
0xF198: 'repairShopCodeOrTesterSerialNumberDataIdentifier',
0xF199: 'programmingDateDataIdentifier',
0xF19A: 'calibrationRepairShopCodeOrCalibrationEquipmentSerialNumberData-',
0xF19B: 'calibrationDateDataIdentifier',
0xF19C: 'calibrationEquipmentSoftwareNumberDataIdentifier',
0xF19D: 'ECUInstallationDateDataIdentifier',
0xF19E: 'ODXFileDataIdentifier',
0xF19F: 'entityDataIdentifier',
}
NEG_RESP_CODES = {
0x10:'GeneralReject',
0x11:'ServiceNotSupported',
0x12:'SubFunctionNotSupported',
0x13:'IncorrectMesageLengthOrInvalidFormat',
0x14:'ResponseTooLong',
0x21:'BusyRepeatRequest',
0x22:'ConditionsNotCorrect',
0x24:'RequestSequenceError',
0x25:'NoResponseFromSubnetComponent',
0x26:'FailurePreventsExecutionOfRequestedAction',
0x31:'RequestOutOfRange',
0x33:'SecurityAccessDenied',
0x35:'InvalidKey',
0x36:'ExceedNumberOfAttempts',
0x37:'RequiredTimeDelayNotExpired',
0x70:'UploadDownloadNotAccepted',
0x71:'TransferDataSuspended',
0x72:'GeneralProgrammingFailure',
0x73:'WrongBlockSequenceCounter',
0x78:'RequestCorrectlyReceived-ResponsePending',
0x7e:'SubFunctionNotSupportedInActiveSession',
0x7f:'ServiceNotSupportedInActiveSession',
0x81:'RpmTooHigh',
0x82:'RpmTooLow',
0x83:'EngineIsRunning',
0x84:'EngineIsNotRunning',
0x85:'EngineRunTimeTooLow',
0x86:'TemperatureTooHigh',
0x87:'TemperatureTooLow',
0x88:'VehicleSpeedTooHigh',
0x89:'VehicleSpeedTooLow',
0x8a:'ThrottlePedalTooHigh',
0x8b:'ThrottlePedalTooLow',
0x8c:'TransmissionRangeNotInNeutral',
0x8d:'TransmissionRangeNotInGear',
0x8f:'BrakeSwitchsNotClosed',
0x90:'ShifterLeverNotInPark',
0x91:'TorqueConverterClutchLocked',
0x92:'VoltageTooHigh',
0x93:'VoltageTooLow',
}
SVC_DIAGNOSTICS_SESSION_CONTROL = 0x10
SVC_ECU_RESET = 0x11
SVC_CLEAR_DIAGNOSTICS_INFORMATION = 0x14
SVC_READ_DTC_INFORMATION = 0x19
SVC_READ_DATA_BY_IDENTIFIER = 0x22
SVC_READ_MEMORY_BY_ADDRESS = 0x23
SVC_SECURITY_ACCESS = 0x27
SVC_READ_DATA_BY_PERIODIC_IDENTIFIER = 0x2a
SVC_DYNAMICALLY_DEFINE_DATA_IDENTIFIER = 0x2c
SVC_WRITE_DATA_BY_IDENTIFIER = 0x2e
SVC_INPUT_OUTPUT_CONTROL_BY_IDENTIFIER = 0x2f
SVC_ROUTINE_CONTROL = 0x31
SVC_REQUEST_DOWNLOAD = 0x34
SVC_REQUEST_UPLOAD = 0x35
SVC_TRANSFER_DATA = 0x36
SVC_REQUEST_TRANSFER_EXIT = 0x37
SVC_WRITE_MEMORY_BY_ADDRESS = 0x3d
SVC_TESTER_PRESENT = 0x3e
SVC_NEGATIVE_RESPONSE = 0x7f
SVC_CONTROL_DTC_SETTING = 0x85
UDS_SVCS = { v:k for k,v in globals().items() if k.startswith('SVC_') }
POS_RESP_CODES = { (k|0x40) : "OK_" + v.lower() for k,v in UDS_SVCS.items() }
POS_RESP_CODES[0] = 'Success'
NEG_RESP_REPR = {}
for k,v in NEG_RESP_CODES.items():
NEG_RESP_REPR[k] = 'ERR_' + v
RESP_CODES = {}
RESP_CODES.update(NEG_RESP_REPR)
RESP_CODES.update(POS_RESP_CODES)
class NegativeResponseException(Exception):
def __init__(self, neg_code, svc, msg):
self.neg_code = neg_code
self.msg = msg
self.svc = svc
def __repr__(self):
negresprepr = NEG_RESP_CODES.get(self.neg_code)
return "NEGATIVE RESPONSE to 0x%x (%s): ERROR 0x%x: %s \tmsg: %s" % \
(self.svc, UDS_SVCS.get(self.svc), self.neg_code, negresprepr, self.msg)
def __str__(self):
negresprepr = NEG_RESP_CODES.get(self.neg_code)
return "NEGATIVE RESPONSE to 0x%x (%s): ERROR 0x%x: %s \tmsg: %s" % \
(self.svc, UDS_SVCS.get(self.svc), self.neg_code, negresprepr, self.msg)
class UDS(object):
def __init__(self, c, tx_arbid, rx_arbid=None, verbose=True, extflag=0, timeout=3.0):
self.c = c
self.t = None
self.verbose = verbose
self.extflag = extflag
self.timeout = timeout
if rx_arbid == None:
rx_arbid = tx_arbid + 8 # by UDS spec
self.tx_arbid = tx_arbid
self.rx_arbid = rx_arbid
def xmit_recv(self, data, extflag=0, count=1, service=None):
msg, idx = self.c.ISOTPxmit_recv(self.tx_arbid, self.rx_arbid, data, extflag, self.timeout, count, service)
# check if the response is something we know about and can help out
if msg != None and len(msg):
svc = data[0]
svc_resp = msg[0]
errcode = 0
if len(msg) >= 3:
errcode = msg[2]
if svc_resp == svc + 0x40:
if self.verbose:
print("Positive Response!")
negresprepr = NEG_RESP_CODES.get(errcode)
if negresprepr != None and svc_resp != svc + 0x40:
if self.verbose > 1:
print(negresprepr + "\n")
# TODO: Implement getting final message if ResponseCorrectlyReceivedResponsePending is received
if errcode != 0x78: # Don't throw an exception for ResponseCorrectlyReceivedResponsePending
raise NegativeResponseException(errcode, svc, msg)
else:
# Try again but increment the start index
msg, idx = self.c._isotp_get_msg(self.rx_arbid, start_index = idx+1, service = service, timeout = self.timeout)
return msg
def _do_Function(self, func, data=None, subfunc=None, service=None):
if subfunc != None:
omsg = struct.pack('>BB', func, subfunc)
else:
omsg = struct.pack('>B', func)
if data != None:
omsg += data
msg = self.xmit_recv(omsg, extflag=self.extflag, service=service)
return msg
def SendTesterPresent(self):
while self.TesterPresent is True:
if self.TesterPresentRequestsResponse:
self.c.CANxmit(self.tx_arbid, b"\x02\x3E\x00\x00\x00\x00\x00\x00")
else:
self.c.CANxmit(self.tx_arbid, b"\x02\x3E\x80\x00\x00\x00\x00\x00")
time.sleep(2.0)
def StartTesterPresent(self, request_response=True):
self.TesterPresent = True
self.TesterPresentRequestsResponse=request_response
self.t = threading.Thread(target = self.SendTesterPresent)
self.t.setDaemon(True)
self.t.start()
def StopTesterPresent(self):
self.TesterPresent = False
if self.t is not None:
self.t.join(5.0)
if self.t.is_alive():
if self.verbose:
print("Error killing Tester Present thread")
self.t = None
def DiagnosticSessionControl(self, session):
currIdx = self.c.getCanMsgCount()
return self._do_Function(SVC_DIAGNOSTICS_SESSION_CONTROL, data=struct.pack('>B', session), service=0x50)
def ReadMemoryByAddress(self, address, size):
currIdx = self.c.getCanMsgCount()
return self._do_Function(SVC_READ_MEMORY_BY_ADDRESS, subfunc=0x24, data=struct.pack(">IH", address, size), service = 0x63)
#return self.xmit_recv("\x23\x24" + struct.pack(">I", address) + struct.pack(">H", size), service = 0x63)
def ReadDID(self, did):
'''
Read the Data Identifier specified from the ECU.
Returns: The response ISO-TP message as a string
'''
msg = self._do_Function(SVC_READ_DATA_BY_IDENTIFIER, struct.pack('>H', did), service=0x62)
#msg = self.xmit_recv("22".decode('hex') + struct.pack('>H', did), service=0x62)
return msg
def WriteDID(self, did, data):
'''
Write the Data Identifier specified from the ECU.
Returns: The response ISO-TP message as a string
'''
msg = self._do_Function(SVC_WRITE_DATA_BY_IDENTIFIER,struct.pack('>H', did) + data, service=0x62)
#msg = self.xmit_recv("22".decode('hex') + struct.pack('>H', did), service=0x62)
return msg
def RequestDownload(self, addr, data, data_format = 0x00, addr_format = 0x44):
'''
Assumes correct Diagnostics Session and SecurityAccess
'''
# Figure out the right address and data length formats
pack_fmt_str = ">BB"
try:
pack_fmt_str += {1:"B", 2:"H", 4:"I"}.get(addr_format >> 4) + {1:"B", 2:"H", 4:"I"}.get(addr_format & 0xf)
except TypeError:
print("Cannot parse addressAndLengthFormatIdentifier", hex(addr_format))
return None
msg = self.xmit_recv(b"\x34" + struct.pack(pack_fmt_str, data_format, addr_format, addr, len(data)), extflag=self.extflag, service = 0x74)
# Parse the response
if msg[0] != 0x74:
print("Error received: {}".format(msg.encode('hex')))
return msg
max_txfr_num_bytes = msg[1] >> 4 # number of bytes in the max tranfer length parameter
max_txfr_len = 0
for i in range(2,2+max_txfr_num_bytes):
max_txfr_len <<= 8
max_txfr_len += msg[i]
# Transfer data
data_idx = 0
block_idx = 1
while data_idx < len(data):
msg = self.xmit_recv(b"\x36" + struct.pack('>B', block_idx) + data[data_idx:data_idx+max_txfr_len-2], extflag=self.extflag, service = 0x76)
data_idx += max_txfr_len - 2
block_idx += 1
if block_idx > 0xff:
block_idx = 0
# error checking
if msg is not None and msg[0] == 0x7f and msg[2] != 0x78:
print("Error sending data: {}".format(msg.encode('hex')))
return None
if msg is None:
print("Didn't get a response?")
data_idx -= max_txfr_len - 2
block_idx -= 1
if block_idx == 0:
block_idx = 0xff
# TODO: need to figure out how to get 2nd isotp message to verify that this worked
# Send RequestTransferExit
self._do_Function(SVC_REQUEST_TRANSFER_EXIT, service = 0x77)
def readMemoryByAddress(self, address, length, lenlen=1, addrlen=4):
'''
Work in progress!
'''
if lenlen == 1:
lfmt = "B"
else:
lfmt = "H"
lenlenbyte = (lenlen << 4) | addrlen
msg = self._do_Function(SVC_READ_MEMORY_BY_ADDRESS, data=struct.pack('<BI' + lfmt, lenlenbyte, address, length), service=0x63)
return msg
def writeMemoryByAddress(self, address, data, lenlen=1, addrlen=4):
'''
Work in progress!
'''
if lenlen == 1:
lfmt = "B"
else:
lfmt = "H"
lenlenbyte = (lenlen << 4) | addrlen
data = struct.pack('<BI' + lfmt, lenlenbyte, address, length)
#data = "3d".decode('hex') + struct.pack('<BI' + lfmt, lenlenbyte, address, length)
msg = self._do_Function(SVC_WRITE_MEMORY_BY_ADDRESS, data=data, service=0x63)
#msg = self.xmit_recv(data, service=0x63)
return msg
def RequestUpload(self, addr, length, data_format = 0x00, addr_format = 0x44):
'''
Work in progress!
'''
msg = self._do_Function(SVC_REQUEST_UPLOAD, subfunc=data_format, data = chr(addr_format) + struct.pack('>I', addr)[1:] + struct.pack('>I', length)[1:])
sid, lfmtid, maxnumblocks = struct.unpack('>BBH', msg[:4])
output = []
for loop in maxnumblocks:
msg = self._do_Function(SVC_TRANSFER_DATA, subfunc=loop)
output.append(msg)
if len(msg) and msg[0] != '\x76':
print("FAILURE TO DOWNLOAD ALL. Returning what we have so far (including error message)")
return output
msg = self._do_Function(SVC_REQUEST_TRANSFER_EXIT)
if len(msg) and msg[0] != '\x77':
print("FAILURE TO EXIT CLEANLY. Returning what we received.")
return output
def EcuReset(self, rst_type=0x1):
return self._do_Function(SVC_ECU_RESET, subfunc=rst_type)
def ClearDiagnosticInformation(self):
pass
def ReadDTCInfomation(self):
pass
def ReadDataByPeriodicIdentifier(self, pdid):
pass
def DynamicallyDefineDataIdentifier(self):
pass
def InputOutputControlByIdentifier(self, iodid):
pass
def TransferData(self, did):
pass
def RequestTransferExit(self):
pass
def ControlDTCSetting(self):
pass
def RoutineControl(self, action, routine, *args):
"""
action: 1 for start, 0 for stop
routine: 2 byte value for which routine to call
*args: any additional arguments (must already be bytes)
"""
# Extra data for routine control is initially just the routine, but
# accepts additional bytes
data = struct.pack('>H', routine)
for arg in args:
data += arg
return self._do_Function(SVC_ROUTINE_CONTROL, subfunc=action, data=data)
def ScanDIDs(self, start=0, end=0x10000, delay=0):
success = []
try:
for x in range(start, end):
try:
if self.verbose:
sys.stderr.write(' %x ' % x)
val = self.ReadDID(x)
success.append((x, val))
except KeyboardInterrupt:
raise
except Exception as e:
if self.verbose > 1:
print(e)
time.sleep(delay)
except KeyboardInterrupt:
print("Stopping Scan during DID 0x%x " % x)
return success
return success
def SecurityAccess(self, level, secret = ""):
"""Send and receive the UDS messages to switch SecurityAccess levels.
@level = the SecurityAccess level to switch to
@secret = a SecurityAccess algorithm specific secret used to generate the key
"""
msg = self._do_Function(SVC_SECURITY_ACCESS, subfunc=level, service = 0x67)
if msg is None:
return msg
if msg[0] == 0x7f:
print("Error getting seed:", msg.encode('hex'))
else:
seed = msg[2:]
if isinstance(secret, str):
# If key is a string convert it to bytes
key = bytes(self._key_from_seed(seed, bytes.fromhex(secret.replace(' ', ''))))
else:
key = bytes(self._key_from_seed(seed, secret))
msg = self._do_Function(SVC_SECURITY_ACCESS, subfunc=level+1, data=key, service = 0x67)
return msg
def _key_from_seed(self, seed, secret):
"""Generates the key for a specific SecurityAccess seed request.
@seed = the SecurityAccess seed received from the ECU. Formatted
as a hex string with spaces between each seed byte.
@secret = a SecurityAccess algorithm specific key
Returns the key, as a string of key bytes.
"""
print("Not implemented in this class")
return []
def printUDSSession(c, tx_arbid, rx_arbid=None, paginate=45):
if rx_arbid == None:
rx_arbid = tx_arbid + 8 # by UDS spec
msgs = [msg for msg in c.genCanMsgs(arbids=[tx_arbid, rx_arbid])]
msgs_idx = 0
linect = 1
while msgs_idx < len(msgs):
arbid, isotpmsg, count = cisotp.msg_decode(msgs, msgs_idx)
#print("Message: (%s:%s) \t %s" % (count, msgs_idx, isotpmsg.encode('hex')))
svc = isotpmsg[0]
mtype = (RESP_CODES, UDS_SVCS)[arbid==tx_arbid].get(svc, '')
print("Message: (%s:%s) \t %-30s %s" % (count, msgs_idx, isotpmsg.encode('hex'), mtype))
msgs_idx += count
if paginate:
if (linect % paginate)==0:
input("%x) PRESS ENTER" % linect)
linect += 1
|
Task.py
|
import time
from tqdm import tqdm
from .common import npu_print, NEURO_AI_STR, get_response, get
from .web.urls import TASK_STATUS_URL
from threading import Thread
FAILURE = "FAILURE"
PENDING = "PENDING"
COMPLETE = "COMPLETE"
STOPPED = "STOPPED"
TASK_DONE_LIST = (FAILURE, COMPLETE, STOPPED)
bar_suffix = NEURO_AI_STR + " {desc}: {percentage:.1f}%|{bar}| remaining: {remaining} || elapsed: {elapsed} "
dash_str = "Started {0}. View status at https://dashboard.getneuro.ai/task?id={1}"
one_hundred_percent = 100
class Task:
def __init__(self, task_id, callback=None, show=True):
self.task_id = task_id
self.url = TASK_STATUS_URL + self.task_id
self.task_result = ""
self.task_type = ""
self.progress = 0
self.task_state = PENDING
self.callback = callback
self.cache = None
self._logs = {}
self.prints = []
self.params = {"include_result": False}
if self.callback:
t = Thread(target=self.callback_thread)
t.start()
if show:
self.update()
npu_print(dash_str.format(self.task_type.lower(), task_id))
def wait(self):
with tqdm(desc=self.task_type, total=one_hundred_percent,
bar_format=bar_suffix) as bar:
while not self.finished():
time.sleep(0.1)
[bar.write(log) for log in self.prints]
self.prints = []
bar.n = self.progress * one_hundred_percent
bar.refresh()
if self.task_state == FAILURE:
self.update(include_result=True)
npu_print(f"Error for task {self.task_id}: {self.task_result}", level="ERROR")
raise Exception
# exit(1)
if self.task_state == STOPPED:
npu_print("Task has been stopped.")
return
bar.n = one_hundred_percent
def callback_thread(self):
self.get_result()
self.callback(self)
def get_result(self):
self.wait()
self.update(include_result=True)
return self.task_result
def update(self, include_result=False):
self.params["include_result"] = include_result
response = get(self.url, params=self.params)
response = get_response(response)
self.task_state = response["state"]
self.task_type = response["taskType"]
self.progress = response["progress"]
if "result" in response:
self.task_result = response["result"]
if "metrics" in response:
self._logs = response["metrics"]
if "print" in response:
self.prints = response["print"]
def __str__(self):
return str(self.get_result())
def finished(self):
self.update()
return self.task_state in TASK_DONE_LIST
def logs(self):
self.get_result()
return self._logs
|
webfrontend.py
|
from flask import Flask
from flask import request, send_file, render_template, send_from_directory
import json
import coding
import time
from server_config import SERVER
import keyfilelib
from multiprocessing import Pipe
from threading import Thread
from Queue import Queue
from werkzeug import secure_filename
from cStringIO import StringIO
app = Flask(__name__)
keyfile_name = keyfilelib.create_new_keyfile()
plength, clength = Pipe()
pname, cname = Pipe()
current_name = ""
current_size = 0
@app.route("/fp")
def upload():
return render_template("upload.html")
@app.route("/")
def index():
global current_name
global current_size
global keyfile
with open(keyfile_name, 'r') as keyfile_handle:
keyfile = json.loads(keyfile_handle.read())
if pname.poll():
current_name = pname.recv()
current_size = plength.recv()
return render_template('index.html', current_upload=current_name, upload_size=current_size,
num_pending=upload_jobs.qsize(), keys=sorted(keyfile.keys()))
@app.route("/upload", methods=['POST'])
def posted():
if request.method == 'POST':
ufile = request.files['file']
if ufile:
filename = secure_filename(ufile.filename)
data = ufile.read()
upload_jobs.put((filename, data))
return render_template("queued.html")
@app.route("/download/<filename>", methods=['GET'])
def download(filename):
with open(keyfile_name, 'r') as keyfile_handle:
keyfile = json.loads(keyfile_handle.read())
metadata = keyfile[filename]
data = coding.get_chunks(metadata)
strIO = StringIO()
strIO.write(data)
strIO.seek(0)
return send_file(strIO,
attachment_filename=filename,
as_attachment=True)
@app.route("/assets/css/<filename>", methods=['GET'])
def css(filename):
return send_from_directory(app.static_folder + "/css", filename)
@app.route("/assets/js/<filename>", methods=['GET'])
def js(filename):
return send_from_directory(app.static_folder + "/js", filename)
@app.route("/demo")
def demo():
return render_template("index2.html")
def consume(input_queue, done_jobs, length_pipe, name_pipe, keyfile_name):
while True:
try:
filename, data = input_queue.get()
name_pipe.send(filename)
length_pipe.send(len(data))
metadata = coding.send_chunks_get_metadata(data)
print "Got chunks"
# Save it back
with open(keyfile_name, 'r') as keyfile_handle:
keyfile = json.loads(keyfile_handle.read())
keyfile[filename] = metadata
keyfilelib.save(keyfile_name, keyfile)
print "Done saving to keyfile"
except Queue.Empty:
time.sleep(1)
if __name__ == "__main__":
upload_jobs = Queue()
done_jobs = Queue()
app.debug = True
consumer = Thread(target=consume, args=(upload_jobs, done_jobs, clength, cname, keyfile_name))
consumer.start()
app.run(host=SERVER)
consumer.join()
|
test.py
|
# -*- coding: utf-8 -*-
import redis
import unittest
from hotels import hotels
import random
import time
from RLTest import Env
from includes import *
def testAdd(env):
if env.is_cluster():
raise unittest.SkipTest()
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'body', 'text'))
env.assertTrue(r.exists('idx:idx'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields',
'title', 'hello world',
'body', 'lorem ist ipsum'))
for _ in r.retry_with_rdb_reload():
prefix = 'ft'
env.assertExists(prefix + ':idx/hello')
env.assertExists(prefix + ':idx/world')
env.assertExists(prefix + ':idx/lorem')
def testAddErrors(env):
env.expect('ft.create idx schema foo text bar numeric sortable').equal('OK')
env.expect('ft.add idx doc1 1 redis 4').error().contains('Unknown keyword')
env.expect('ft.add idx doc1').error().contains("wrong number of arguments")
env.expect('ft.add idx doc1 42').error().contains("Score must be between 0 and 1")
env.expect('ft.add idx doc1 1.0').error().contains("No field list found")
env.expect('ft.add fake_idx doc1 1.0 fields foo bar').error().contains("Unknown index name")
def assertEqualIgnoreCluster(env, val1, val2):
# todo: each test that uses this function should be switch back to env.assertEqual once fix
# issues on coordinator
if env.isCluster():
return
env.assertEqual(val1, val2)
def testConditionalUpdate(env):
env.assertOk(env.cmd(
'ft.create', 'idx', 'schema', 'foo', 'text', 'bar', 'numeric', 'sortable'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1',
'fields', 'foo', 'hello', 'bar', '123'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'if',
'@foo == "hello"', 'fields', 'foo', 'world', 'bar', '123'))
env.assertEqual('NOADD', env.cmd('ft.add', 'idx', '1', '1', 'replace',
'if', '@foo == "hello"', 'fields', 'foo', 'world', 'bar', '123'))
env.assertEqual('NOADD', env.cmd('ft.add', 'idx', '1', '1', 'replace',
'if', '1 == 2', 'fields', 'foo', 'world', 'bar', '123'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'partial', 'if',
'@foo == "world"', 'fields', 'bar', '234'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'if',
'@bar == 234', 'fields', 'foo', 'hello', 'bar', '123'))
# Ensure that conditionals are ignored if the document doesn't exist
env.assertOk(env.cmd('FT.ADD', 'idx', '666', '1',
'IF', '@bar > 42', 'FIELDS', 'bar', '15'))
# Ensure that it fails if we try again, because it already exists
env.assertEqual('NOADD', env.cmd('FT.ADD', 'idx', '666', '1',
'REPLACE', 'IF', '@bar > 42', 'FIELDS', 'bar', '15'))
# Ensure that it fails because we're not using 'REPLACE'
with env.assertResponseError():
env.assertOk(env.cmd('FT.ADD', 'idx', '666', '1',
'IF', '@bar > 42', 'FIELDS', 'bar', '15'))
def testUnionIdList(env):
# Regression test for https://github.com/RediSearch/RediSearch/issues/306
r = env
N = 100
env.assertOk(r.execute_command(
"ft.create", "test", "SCHEMA", "tags", "TAG", "waypoint", "GEO"))
env.assertOk(r.execute_command(
"ft.add", "test", "1", "1", "FIELDS", "tags", "alberta", "waypoint", "-113.524,53.5244"))
env.assertOk(r.execute_command(
"ft.add", "test", "2", "1", "FIELDS", "tags", "ontario", "waypoint", "-79.395,43.661667"))
r.cmd('ft.search', 'test', '@tags:{ontario}')
res = r.execute_command(
'ft.search', 'test', "@waypoint:[-113.52 53.52 20 mi]|@tags:{ontario}", 'nocontent')
env.assertEqual(res, [2, '2', '1'])
def testAttributes(env):
env.assertOk(env.cmd('ft.create', 'idx', 'schema',
'title', 'text', 'body', 'text'))
env.assertOk(env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields',
'title', 't1 t2', 'body', 't3 t4 t5'))
env.assertOk(env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields',
'body', 't1 t2', 'title', 't3 t5'))
res = env.cmd(
'ft.search', 'idx', '(@title:(t1 t2) => {$weight: 0.2}) |(@body:(t1 t2) => {$weight: 0.5})', 'nocontent')
env.assertListEqual([2L, 'doc2', 'doc1'], res)
res = env.cmd(
'ft.search', 'idx', '(@title:(t1 t2) => {$weight: 2.5}) |(@body:(t1 t2) => {$weight: 0.5})', 'nocontent')
env.assertListEqual([2L, 'doc1', 'doc2'], res)
res = env.cmd(
'ft.search', 'idx', '(t3 t5) => {$slop: 4}', 'nocontent')
env.assertListEqual([2L, 'doc2', 'doc1'], res)
res = env.cmd(
'ft.search', 'idx', '(t5 t3) => {$slop: 0}', 'nocontent')
env.assertListEqual([1L, 'doc2'], res)
res = env.cmd(
'ft.search', 'idx', '(t5 t3) => {$slop: 0; $inorder:true}', 'nocontent')
env.assertListEqual([0], res)
def testUnion(env):
N = 100
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text'))
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world' if i % 2 == 0 else 'hallo werld'))
for _ in r.retry_with_rdb_reload():
res = r.execute_command(
'ft.search', 'idx', 'hello|hallo', 'nocontent', 'limit', '0', '100')
env.assertEqual(N + 1, len(res))
env.assertEqual(N, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello|world', 'nocontent', 'limit', '0', '100')
env.assertEqual(51, len(res))
env.assertEqual(50, res[0])
res = r.execute_command('ft.search', 'idx', '(hello|hello)(world|world)',
'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(51, len(res))
env.assertEqual(50, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello|hallo)(werld|world)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hallo|hello)(world|werld)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello|werld)(hallo|world)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello|hallo) world', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(51, len(res))
env.assertEqual(50, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello world)|((hello world)|(hallo world|werld) | hello world werld)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
def testSearch(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'weight', 10.0, 'body', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello world',
'body', 'lorem ist ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello another world',
'body', 'lorem ist ipsum lorem lorem'))
for _ in r.retry_with_rdb_reload():
res = r.execute_command('ft.search', 'idx', 'hello')
env.assertTrue(len(res) == 5)
env.assertEqual(res[0], 2L)
env.assertEqual(res[1], "doc2")
env.assertTrue(isinstance(res[2], list))
env.assertTrue('title' in res[2])
env.assertTrue('hello another world' in res[2])
env.assertEqual(res[3], "doc1")
env.assertTrue('hello world' in res[4])
# Test empty query
res = r.execute_command('ft.search', 'idx', '')
env.assertListEqual([0], res)
# Test searching with no content
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent')
env.assertTrue(len(res) == 3)
env.assertEqual(res[0], 2L)
env.assertEqual(res[1], "doc2")
env.assertEqual(res[2], "doc1")
# Test searching WITHSCORES
res = r.execute_command(
'ft.search', 'idx', 'hello', 'WITHSCORES')
env.assertEqual(len(res), 7)
env.assertEqual(res[0], 2L)
env.assertEqual(res[1], "doc2")
env.assertTrue(float(res[2]) > 0)
env.assertEqual(res[4], "doc1")
env.assertTrue(float(res[5]) > 0)
# Test searching WITHSCORES NOCONTENT
res = r.execute_command(
'ft.search', 'idx', 'hello', 'WITHSCORES', 'NOCONTENT')
env.assertEqual(len(res), 5)
env.assertEqual(res[0], 2L)
env.assertEqual(res[1], "doc2")
env.assertTrue(float(res[2]) > 0)
env.assertEqual(res[3], "doc1")
env.assertTrue(float(res[4]) > 0)
def testSearchNosave(env):
# Check to see what happens when we try to return unsaved documents
env.cmd('ft.create', 'idx', 'SCHEMA', 'f1', 'text')
# Add 3 documents
for x in range(3):
env.cmd('ft.add', 'idx', 'doc{}'.format(x),
1.0, 'NOSAVE', 'FIELDS', 'f1', 'value')
# Now query the results
res = env.cmd('ft.search', 'idx', 'value')
env.assertEqual(3, res[0])
for content in res[2::2]:
env.assertEqual([], content)
def testGet(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text', 'bar', 'text'))
env.expect('ft.get').error().contains("wrong number of arguments")
env.expect('ft.get', 'idx').error().contains("wrong number of arguments")
env.expect('ft.get', 'idx', 'foo', 'bar').error().contains("wrong number of arguments")
env.expect('ft.mget').error().contains("wrong number of arguments")
env.expect('ft.mget', 'idx').error().contains("wrong number of arguments")
env.expect('ft.mget', 'fake_idx').error().contains("wrong number of arguments")
env.expect('ft.get fake_idx foo').error().contains("Unknown Index name")
env.expect('ft.mget fake_idx foo').error().contains("Unknown Index name")
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'hello world', 'bar', 'wat wat'))
for i in range(100):
res = r.execute_command('ft.get', 'idx', 'doc%d' % i)
env.assertIsNotNone(res)
env.assertListEqual(
['foo', 'hello world', 'bar', 'wat wat'], res)
env.assertIsNone(r.execute_command(
'ft.get', 'idx', 'doc%dsdfsd' % i))
env.expect('ft.get', 'no_idx', 'doc0').error().contains("Unknown Index name")
rr = r.execute_command(
'ft.mget', 'idx', *('doc%d' % i for i in range(100)))
env.assertEqual(len(rr), 100)
for res in rr:
env.assertIsNotNone(res)
env.assertListEqual(
['foo', 'hello world', 'bar', 'wat wat'], res)
rr = r.execute_command(
'ft.mget', 'idx', *('doc-%d' % i for i in range(100)))
env.assertEqual(len(rr), 100)
for res in rr:
env.assertIsNone(res)
# Verify that when a document is deleted, GET returns NULL
r.cmd('ft.del', 'idx', 'doc10') # But we still keep the document
r.cmd('ft.del', 'idx', 'doc11')
r.cmd('ft.del', 'idx', 'coverage')
res = r.cmd('ft.get', 'idx', 'doc10')
r.assertEqual(None, res)
res = r.cmd('ft.mget', 'idx', 'doc10')
r.assertEqual([None], res)
res = r.cmd('ft.mget', 'idx', 'doc10', 'doc11', 'doc12')
r.assertIsNone(res[0])
r.assertIsNone(res[1])
r.assertTrue(not not res[2])
def testDelete(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world'))
env.expect('ft.del', 'fake_idx', 'doc1').error()
for i in range(100):
# the doc hash should exist now
r.expect('ft.get', 'idx', 'doc%d' % i).notRaiseError()
# Delete the actual docs only half of the time
env.assertEqual(1, r.execute_command(
'ft.del', 'idx', 'doc%d' % i, 'DD' if i % 2 == 0 else ''))
# second delete should return 0
env.assertEqual(0, r.execute_command(
'ft.del', 'idx', 'doc%d' % i))
# After del with DD the doc hash should not exist
if i % 2 == 0:
env.assertFalse(r.exists('doc%d' % i))
else:
r.expect('ft.get', 'idx', 'doc%d' % i).notRaiseError()
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 100)
env.assertNotIn('doc%d' % i, res)
env.assertEqual(res[0], 100 - i - 1)
env.assertEqual(len(res), 100 - i)
# test reinsertion
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world'))
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 100)
env.assertIn('doc%d' % i, res)
env.assertEqual(1, r.execute_command(
'ft.del', 'idx', 'doc%d' % i))
for _ in r.retry_with_rdb_reload():
did = 'rrrr'
env.assertOk(r.execute_command('ft.add', 'idx', did, 1, 'fields',
'f', 'hello world'))
env.assertEqual(1, r.execute_command('ft.del', 'idx', did))
env.assertEqual(0, r.execute_command('ft.del', 'idx', did))
env.assertOk(r.execute_command('ft.add', 'idx', did, 1, 'fields',
'f', 'hello world'))
env.assertEqual(1, r.execute_command('ft.del', 'idx', did))
env.assertEqual(0, r.execute_command('ft.del', 'idx', did))
def testReplace(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields',
'f', 'hello world'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'f', 'hello world'))
res = r.execute_command(
'ft.search', 'idx', 'hello world')
env.assertEqual(2, res[0])
with env.assertResponseError():
# make sure we can't insert a doc twice
res = r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields',
'f', 'hello world')
# now replace doc1 with a different content
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'replace', 'fields',
'f', 'goodbye universe'))
for _ in r.retry_with_rdb_reload():
# make sure the query for hello world does not return the replaced
# document
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc2', res[1])
# search for the doc's new content
res = r.execute_command(
'ft.search', 'idx', 'goodbye universe', 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc1', res[1])
def testDrop(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world', 'n', 666, 't', 'foo bar',
'g', '19.04,47.497'))
keys = r.keys('*')
env.assertGreaterEqual(len(keys), 100)
env.assertOk(r.execute_command('ft.drop', 'idx'))
keys = r.keys('*')
env.assertEqual(0, len(keys))
# Now do the same with KEEPDOCS
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world', 'n', 666, 't', 'foo bar',
'g', '19.04,47.497'))
keys = r.keys('*')
env.assertGreaterEqual(len(keys), 100)
if not env.is_cluster():
env.assertOk(r.execute_command('ft.drop', 'idx', 'KEEPDOCS'))
keys = r.keys('*')
env.assertListEqual(['doc0', 'doc1', 'doc10', 'doc11', 'doc12', 'doc13', 'doc14', 'doc15', 'doc16', 'doc17', 'doc18', 'doc19', 'doc2', 'doc20', 'doc21', 'doc22', 'doc23', 'doc24', 'doc25', 'doc26', 'doc27', 'doc28', 'doc29', 'doc3', 'doc30', 'doc31', 'doc32', 'doc33', 'doc34', 'doc35', 'doc36', 'doc37', 'doc38', 'doc39', 'doc4', 'doc40', 'doc41', 'doc42', 'doc43', 'doc44', 'doc45', 'doc46', 'doc47', 'doc48', 'doc49', 'doc5', 'doc50', 'doc51', 'doc52', 'doc53',
'doc54', 'doc55', 'doc56', 'doc57', 'doc58', 'doc59', 'doc6', 'doc60', 'doc61', 'doc62', 'doc63', 'doc64', 'doc65', 'doc66', 'doc67', 'doc68', 'doc69', 'doc7', 'doc70', 'doc71', 'doc72', 'doc73', 'doc74', 'doc75', 'doc76', 'doc77', 'doc78', 'doc79', 'doc8', 'doc80', 'doc81', 'doc82', 'doc83', 'doc84', 'doc85', 'doc86', 'doc87', 'doc88', 'doc89', 'doc9', 'doc90', 'doc91', 'doc92', 'doc93', 'doc94', 'doc95', 'doc96', 'doc97', 'doc98', 'doc99'], sorted(keys))
env.expect('FT.DROP', 'idx', 'KEEPDOCS', '666').error().contains("wrong number of arguments")
def testCustomStopwords(env):
r = env
# Index with default stopwords
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
# Index with custom stopwords
env.assertOk(r.execute_command('ft.create', 'idx2', 'stopwords', 2, 'hello', 'world',
'schema', 'foo', 'text'))
if not env.isCluster:
res = env.cmd('ft.info', 'idx2')
env.assertEqual(res[39], ['hello', 'world'])
# Index with NO stopwords
env.assertOk(r.execute_command('ft.create', 'idx3', 'stopwords', 0,
'schema', 'foo', 'text'))
if not env.isCluster:
res = env.cmd('ft.info', 'idx3')
env.assertEqual(res[39], [])
for idx in ('idx', 'idx2', 'idx3'):
env.assertOk(r.execute_command(
'ft.add', idx, 'doc1', 1.0, 'fields', 'foo', 'hello world'))
env.assertOk(r.execute_command(
'ft.add', idx, 'doc2', 1.0, 'fields', 'foo', 'to be or not to be'))
for _ in r.retry_with_rdb_reload():
# Normal index should return results just for 'hello world'
env.assertEqual([1, 'doc1'], r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent'))
env.assertEqual([0], r.execute_command(
'ft.search', 'idx', 'to be or not', 'nocontent'))
# Custom SW index should return results just for 'to be or not'
env.assertEqual([0], r.execute_command(
'ft.search', 'idx2', 'hello world', 'nocontent'))
env.assertEqual([1, 'doc2'], r.execute_command(
'ft.search', 'idx2', 'to be or not', 'nocontent'))
# No SW index should return results for both
env.assertEqual([1, 'doc1'], r.execute_command(
'ft.search', 'idx3', 'hello world', 'nocontent'))
env.assertEqual([1, 'doc2'], r.execute_command(
'ft.search', 'idx3', 'to be or not', 'nocontent'))
def testStopwords(env):
# This test was taken from Python's tests, and failed due to some changes
# made earlier
env.cmd('ft.create', 'idx', 'stopwords', 3, 'foo',
'bar', 'baz', 'schema', 'txt', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'txt', 'foo bar')
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields', 'txt', 'hello world')
r1 = env.cmd('ft.search', 'idx', 'foo bar', 'nocontent')
r2 = env.cmd('ft.search', 'idx', 'foo bar hello world', 'nocontent')
env.assertEqual(0, r1[0])
env.assertEqual(1, r2[0])
def testNoStopwords(env):
# This test taken from Java's test suite
env.cmd('ft.create', 'idx', 'schema', 'title', 'text')
for i in range(100):
env.cmd('ft.add', 'idx', 'doc{}'.format(i), 1.0, 'fields',
'title', 'hello world' if i % 2 == 0 else 'hello worlds')
res = env.cmd('ft.search', 'idx', 'hello a world', 'NOCONTENT')
env.assertEqual(100, res[0])
res = env.cmd('ft.search', 'idx', 'hello a world',
'VERBATIM', 'NOCONTENT')
env.assertEqual(50, res[0])
res = env.cmd('ft.search', 'idx', 'hello a world', 'NOSTOPWORDS')
env.assertEqual(0, res[0])
def testOptional(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx',
'doc1', 1.0, 'fields', 'foo', 'hello wat woot'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2',
1.0, 'fields', 'foo', 'hello world woot'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc3',
1.0, 'fields', 'foo', 'hello world werld'))
res = r.execute_command('ft.search', 'idx', 'hello', 'nocontent')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([2L, 'doc3', 'doc2'], res)
res = r.execute_command(
'ft.search', 'idx', 'hello ~world', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', 'hello ~world ~werld', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', '~world ~werld hello', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
def testExplain(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text', 'bar', 'numeric', 'sortable'))
q = '(hello world) "what what" hello|world @bar:[10 100]|@bar:[200 300]'
res = r.execute_command('ft.explain', 'idx', q)
# print res.replace('\n', '\\n')
# expected = """INTERSECT {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n"""
# expected = """INTERSECT {\n UNION {\n hello\n <HL(expanded)\n +hello(expanded)\n }\n UNION {\n world\n <ARLT(expanded)\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n <HL(expanded)\n +hello(expanded)\n }\n UNION {\n world\n <ARLT(expanded)\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n"""
expected = """INTERSECT {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n"""
env.assertEqual(res, expected)
# expected = ['INTERSECT {', ' UNION {', ' hello', ' <HL(expanded)', ' +hello(expanded)', ' }', ' UNION {', ' world', ' <ARLT(expanded)', ' +world(expanded)', ' }', ' EXACT {', ' what', ' what', ' }', ' UNION {', ' UNION {', ' hello', ' <HL(expanded)', ' +hello(expanded)', ' }', ' UNION {', ' world', ' <ARLT(expanded)', ' +world(expanded)', ' }', ' }', ' UNION {', ' NUMERIC {10.000000 <= @bar <= 100.000000}', ' NUMERIC {200.000000 <= @bar <= 300.000000}', ' }', '}', '']
if env.is_cluster():
raise unittest.SkipTest()
res = env.cmd('ft.explainCli', 'idx', q)
expected = ['INTERSECT {', ' UNION {', ' hello', ' +hello(expanded)', ' }', ' UNION {', ' world', ' +world(expanded)', ' }', ' EXACT {', ' what', ' what', ' }', ' UNION {', ' UNION {', ' hello', ' +hello(expanded)', ' }', ' UNION {', ' world', ' +world(expanded)', ' }', ' }', ' UNION {', ' NUMERIC {10.000000 <= @bar <= 100.000000}', ' NUMERIC {200.000000 <= @bar <= 300.000000}', ' }', '}', '']
env.assertEqual(expected, res)
def testNoIndex(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema',
'foo', 'text',
'num', 'numeric', 'sortable', 'noindex',
'extra', 'text', 'noindex', 'sortable'))
if not env.isCluster():
# to specific check on cluster, todo : change it to be generic enough
res = env.cmd('ft.info', 'idx')
env.assertEqual(res[5][1][4], 'NOINDEX')
env.assertEqual(res[5][2][6], 'NOINDEX')
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'fields',
'foo', 'hello world', 'num', 1, 'extra', 'hello lorem ipsum'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent')
env.assertListEqual([1, 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', 'lorem ipsum', 'nocontent')
env.assertListEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', '@extra:hello', 'nocontent')
env.assertListEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', '@num:[1 1]', 'nocontent')
env.assertListEqual([0], res)
def testPartial(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema',
'foo', 'text',
'num', 'numeric', 'sortable', 'noindex',
'extra', 'text', 'noindex'))
# print r.execute_command('ft.info', 'idx')
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'fields',
'foo', 'hello world', 'num', 1, 'extra', 'lorem ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', '0.1', 'fields',
'foo', 'hello world', 'num', 2, 'extra', 'abba'))
res = r.execute_command('ft.search', 'idx', 'hello world',
'sortby', 'num', 'asc', 'nocontent', 'withsortkeys')
env.assertListEqual([2L, 'doc1', '#1', 'doc2', '#2'], res)
res = r.execute_command('ft.search', 'idx', 'hello world',
'sortby', 'num', 'desc', 'nocontent', 'withsortkeys')
env.assertListEqual([2L, 'doc2', '#2', 'doc1', '#1'], res)
# Updating non indexed fields doesn't affect search results
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'replace', 'partial',
'fields', 'num', 3, 'extra', 'jorem gipsum'))
env.expect('ft.add', 'idx', 'doc12', '0.1', 'replace', 'partial',
'fields', 'num1', 'redis').equal('OK')
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'sortby', 'num', 'desc',)
assertResultsEqual(env, [2L, 'doc1', ['foo', 'hello world', 'num', '3', 'extra', 'jorem gipsum'],
'doc2', ['foo', 'hello world', 'num', '2', 'extra', 'abba']], res)
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'withscores')
# Updating only indexed field affects search results
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'replace', 'partial',
'fields', 'foo', 'wat wet'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent')
env.assertListEqual([1L, 'doc2'], res)
res = r.execute_command('ft.search', 'idx', 'wat', 'nocontent')
env.assertListEqual([1L, 'doc1'], res)
# Test updating of score and no fields
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withscores')
env.assertLess(float(res[2]), 1)
# env.assertListEqual([1L, 'doc1'], res)
env.assertOk(r.execute_command('ft.add', 'idx',
'doc1', '1.0', 'replace', 'partial', 'fields'))
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withscores')
env.assertGreater(float(res[2]), 1)
# Test updating payloads
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withpayloads')
env.assertIsNone(res[2])
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '1.0',
'replace', 'partial', 'payload', 'foobar', 'fields'))
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withpayloads')
env.assertEqual('foobar', res[2])
def testPaging(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text', 'bar', 'numeric', 'sortable'))
N = 100
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', '%d' % i, 1, 'fields',
'foo', 'hello', 'bar', i))
chunk = 7
offset = 0
while True:
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'sortby', 'bar', 'desc', 'limit', offset, chunk)
env.assertEqual(res[0], N)
if offset + chunk > N:
env.assertTrue(len(res) - 1 <= chunk)
break
env.assertEqual(len(res), chunk + 1)
for n, id in enumerate(res[1:]):
env.assertEqual(int(id), N - 1 - (offset + n))
offset += chunk
chunk = random.randrange(1, 10)
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'sortby', 'bar', 'asc', 'limit', N, 10)
env.assertEqual(res[0], N)
env.assertEqual(len(res), 1)
with env.assertResponseError():
r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, -1)
with env.assertResponseError():
r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', -1, 10)
with env.assertResponseError():
r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 2000000)
def testPrefix(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
N = 100
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'constant term%d' % (random.randrange(0, 5))))
for _ in r.retry_with_rdb_reload():
res = r.execute_command(
'ft.search', 'idx', 'constant term', 'nocontent')
env.assertEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', 'constant term*', 'nocontent')
env.assertEqual(N, res[0])
res = r.execute_command(
'ft.search', 'idx', 'const* term*', 'nocontent')
env.assertEqual(N, res[0])
res = r.execute_command(
'ft.search', 'idx', 'constant term1*', 'nocontent')
env.assertGreater(res[0], 2)
res = r.execute_command(
'ft.search', 'idx', 'const* -term*', 'nocontent')
env.assertEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', 'constant term9*', 'nocontent')
env.assertEqual([0], res)
def testSortBy(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text', 'sortable', 'bar', 'numeric', 'sortable'))
N = 100
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'hello%03d world' % i, 'bar', 100 - i))
for _ in r.retry_with_rdb_reload():
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'foo')
env.assertEqual([100L, 'doc0', 'doc1', 'doc2', 'doc3',
'doc4', 'doc5', 'doc6', 'doc7', 'doc8', 'doc9'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'foo', 'desc')
env.assertEqual([100L, 'doc99', 'doc98', 'doc97', 'doc96',
'doc95', 'doc94', 'doc93', 'doc92', 'doc91', 'doc90'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'desc')
env.assertEqual([100L, 'doc0', 'doc1', 'doc2', 'doc3',
'doc4', 'doc5', 'doc6', 'doc7', 'doc8', 'doc9'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'asc')
env.assertEqual([100L, 'doc99', 'doc98', 'doc97', 'doc96',
'doc95', 'doc94', 'doc93', 'doc92', 'doc91', 'doc90'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'bar', 'desc', 'withscores', 'limit', '2', '5')
env.assertEqual(
[100L, 'doc2', '0', 'doc3', '0', 'doc4', '0', 'doc5', '0', 'doc6', '0'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'bar', 'desc', 'withsortkeys', 'limit', 0, 5)
env.assertListEqual(
[100L, 'doc0', '#100', 'doc1', '#99', 'doc2', '#98', 'doc3', '#97', 'doc4', '#96'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'foo', 'desc', 'withsortkeys', 'limit', 0, 5)
env.assertListEqual([100L, 'doc99', '$hello099 world', 'doc98', '$hello098 world', 'doc97', '$hello097 world', 'doc96',
'$hello096 world', 'doc95', '$hello095 world'], res)
def testNot(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
N = 10
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'constant term%d' % (random.randrange(0, 5))))
for i in range(5):
inclusive = r.execute_command(
'ft.search', 'idx', 'constant term%d' % i, 'nocontent', 'limit', 0, N)
exclusive = r.execute_command(
'ft.search', 'idx', 'constant -term%d' % i, 'nocontent', 'limit', 0, N)
exclusive2 = r.execute_command(
'ft.search', 'idx', '-(term%d)' % i, 'nocontent', 'limit', 0, N)
exclusive3 = r.execute_command(
'ft.search', 'idx', '(-term%d) (constant)' % i, 'nocontent', 'limit', 0, N)
env.assertNotEqual(inclusive[0], N)
env.assertEqual(inclusive[0] + exclusive[0], N)
env.assertEqual(exclusive3[0], exclusive2[0])
env.assertEqual(exclusive3[0], exclusive[0])
s1, s2, s3, s4 = set(inclusive[1:]), set(
exclusive[1:]), set(exclusive2[1:]), set(exclusive3[1:])
env.assertTrue(s1.difference(s2) == s1)
env.assertTrue(s1.difference(s3) == s1)
env.assertTrue(s1.difference(s4) == s1)
env.assertTrue(s2 == s3)
env.assertTrue(s2 == s4)
env.assertTrue(s2.intersection(s1) == set())
env.assertTrue(s3.intersection(s1) == set())
env.assertTrue(s4.intersection(s1) == set())
# NOT on a non existing term
env.assertEqual(r.execute_command(
'ft.search', 'idx', 'constant -dasdfasdf', 'nocontent')[0], N)
# not on env term
env.assertEqual(r.execute_command(
'ft.search', 'idx', 'constant -constant', 'nocontent'), [0])
env.assertEqual(r.execute_command(
'ft.search', 'idx', 'constant -(term0|term1|term2|term3|term4|nothing)', 'nocontent'), [0])
# env.assertEqual(r.execute_command('ft.search', 'idx', 'constant -(term1 term2)', 'nocontent')[0], N)
def testNestedIntersection(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'a', 'text', 'b', 'text', 'c', 'text', 'd', 'text'))
for i in range(20):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'a', 'foo', 'b', 'bar', 'c', 'baz', 'd', 'gaz'))
res = [
r.execute_command('ft.search', 'idx',
'foo bar baz gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@a:foo @b:bar @c:baz @d:gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@b:bar @a:foo @c:baz @d:gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@c:baz @b:bar @a:foo @d:gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@d:gaz @c:baz @b:bar @a:foo', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@a:foo (@b:bar (@c:baz @d:gaz))', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@c:baz (@a:foo (@b:bar (@c:baz @d:gaz)))', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@b:bar (@a:foo (@c:baz @d:gaz))', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@d:gaz (@a:foo (@c:baz @b:bar))', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (bar baz gaz)', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (bar (baz gaz))', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (bar (foo bar) (foo bar))', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (foo (bar baz (gaz)))', 'nocontent'),
r.execute_command('ft.search', 'idx', 'foo (foo (bar (baz (gaz (foo bar (gaz))))))', 'nocontent')]
for i, r in enumerate(res):
# print i, res[0], r
env.assertListEqual(res[0], r)
def testInKeys(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
for i in range(200):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'hello world'))
for _ in r.retry_with_rdb_reload():
for keys in (
['doc%d' % i for i in range(10)], ['doc%d' % i for i in range(0, 30, 2)], [
'doc%d' % i for i in range(99, 0, -5)]
):
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'NOCONTENT', 'LIMIT', 0, 100, 'INKEYS', len(keys), *keys)
env.assertEqual(len(keys), res[0])
env.assertTrue(all((k in res for k in keys)))
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 'hello world', 'NOCONTENT', 'LIMIT', 0, 100, 'INKEYS', 3, 'foo', 'bar', 'baz')[0])
with env.assertResponseError():
env.cmd('ft.search', 'idx', 'hello', 'INKEYS', 99)
with env.assertResponseError():
env.cmd('ft.search', 'idx', 'hello', 'INKEYS', -1)
with env.assertResponseError():
env.cmd('ft.search', 'idx', 'hello', 'inkeys', 4, 'foo')
def testSlopInOrder(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1, 'fields',
'title', 't1 t2'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1, 'fields',
'title', 't1 t3 t2'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc3', 1, 'fields',
'title', 't1 t3 t4 t2'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc4', 1, 'fields',
'title', 't1 t3 t4 t5 t2'))
res = r.execute_command(
'ft.search', 'idx', 't1|t4 t3|t2', 'slop', '0', 'inorder', 'nocontent')
env.assertEqual({'doc3', 'doc4', 'doc2', 'doc1'}, set(res[1:]))
res = r.execute_command(
'ft.search', 'idx', 't2 t1', 'slop', '0', 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc1', res[1])
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 't2 t1', 'slop', '0', 'inorder')[0])
env.assertEqual(1, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '0', 'inorder')[0])
env.assertEqual(2, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '1', 'inorder')[0])
env.assertEqual(3, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '2', 'inorder')[0])
env.assertEqual(4, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '3', 'inorder')[0])
env.assertEqual(4, r.execute_command(
'ft.search', 'idx', 't1 t2', 'inorder')[0])
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 't t1', 'inorder')[0])
env.assertEqual(2, r.execute_command(
'ft.search', 'idx', 't1 t2 t3 t4')[0])
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 't1 t2 t3 t4', 'inorder')[0])
def testExact(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'weight', 10.0, 'body', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello world',
'body', 'lorem ist ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello another world',
'body', 'lorem ist ipsum lorem lorem'))
res = r.execute_command(
'ft.search', 'idx', '"hello world"', 'verbatim')
env.assertEqual(3, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc1", res[1])
res = r.execute_command(
'ft.search', 'idx', "hello \"another world\"", 'verbatim')
env.assertEqual(3, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
def testGeoErrors(env):
env.expect('flushall')
env.expect('ft.create idx schema name text location geo').equal('OK')
env.expect('ft.add idx hotel 1.0 fields name hill location -0.1757,51.5156').equal('OK')
env.expect('ft.search idx hilton geofilter location -0.1757 51.5156 1 km').equal([0L])
# Insert error
env.expect('ft.add', 'idx', 'hotel1', 1, 'fields', 'name', '_hotel1', 'location', '1, 1').error() \
.contains('Could not index geo value')
# Query errors
env.expect('ft.search idx hilton geofilter location lon 51.5156 1 km').error() \
.contains('Bad arguments for <lon>: Could not convert argument to expected type')
env.expect('ft.search idx hilton geofilter location 51.5156 lat 1 km').error() \
.contains('Bad arguments for <lat>: Could not convert argument to expected type')
env.expect('ft.search idx hilton geofilter location -0.1757 51.5156 radius km').error() \
.contains('Bad arguments for <radius>: Could not convert argument to expected type')
env.expect('ft.search idx hilton geofilter location -0.1757 51.5156 1 fake').error() \
.contains('Unknown distance unit fake')
env.expect('ft.search idx hilton geofilter location -0.1757 51.5156 1').error() \
.contains('GEOFILTER requires 5 arguments')
if not env.isCluster():
env.expect('flushall')
env.expect('set geo:idx/location foo').equal('OK')
env.expect('ft.create idx schema name text location geo').equal('OK')
env.expect('ft.add idx hotel 1.0 fields name hill location -0.1757,51.5156').error() \
.contains('Could not index geo value')
def testGeo(env):
r = env
gsearch = lambda query, lon, lat, dist, unit='km': r.execute_command(
'ft.search', 'idx', query, 'geofilter', 'location', lon, lat, dist, unit)
gsearch_inline = lambda query, lon, lat, dist, unit='km': r.execute_command(
'ft.search', 'idx', '{} @location:[{} {} {} {}]'.format(query, lon, lat, dist, unit))
env.assertOk(r.execute_command('ft.create', 'idx',
'schema', 'name', 'text', 'location', 'geo'))
for i, hotel in enumerate(hotels):
env.assertOk(r.execute_command('ft.add', 'idx', 'hotel{}'.format(i), 1.0, 'fields', 'name',
hotel[0], 'location', '{},{}'.format(hotel[2], hotel[1])))
for _ in r.retry_with_rdb_reload():
res = r.execute_command('ft.search', 'idx', 'hilton')
env.assertEqual(len(hotels), res[0])
res = gsearch('hilton', "-0.1757", "51.5156", '1')
print res
env.assertEqual(3, res[0])
env.assertEqual('hotel2', res[5])
env.assertEqual('hotel21', res[3])
env.assertEqual('hotel79', res[1])
res2 = gsearch_inline('hilton', "-0.1757", "51.5156", '1')
env.assertListEqual(res, res2)
res = gsearch('hilton', "-0.1757", "51.5156", '10')
env.assertEqual(14, res[0])
env.assertEqual('hotel93', res[1])
env.assertEqual('hotel92', res[3])
env.assertEqual('hotel79', res[5])
res2 = gsearch('hilton', "-0.1757", "51.5156", '10000', 'm')
env.assertListEqual(res, res2)
res2 = gsearch_inline('hilton', "-0.1757", "51.5156", '10')
env.assertListEqual(res, res2)
res = gsearch('heathrow', -0.44155, 51.45865, '10', 'm')
env.assertEqual(1, res[0])
env.assertEqual('hotel94', res[1])
res2 = gsearch_inline(
'heathrow', -0.44155, 51.45865, '10', 'm')
env.assertListEqual(res, res2)
res = gsearch('heathrow', -0.44155, 51.45865, '10', 'km')
env.assertEqual(5, res[0])
env.assertIn('hotel94', res)
res2 = gsearch_inline(
'heathrow', -0.44155, 51.45865, '10', 'km')
env.assertListEqual(res, res2)
res = gsearch('heathrow', -0.44155, 51.45865, '5', 'km')
env.assertEqual(3, res[0])
env.assertIn('hotel94', res)
res2 = gsearch_inline(
'heathrow', -0.44155, 51.45865, '5', 'km')
env.assertListEqual(res, res2)
def testTagErrors(env):
env.expect("ft.create", "test", "SCHEMA", "tags", "TAG").equal('OK')
env.expect("ft.add", "test", "1", "1", "FIELDS", "tags", "alberta").equal('OK')
env.expect("ft.add", "test", "2", "1", "FIELDS", "tags", "ontario. alberta").equal('OK')
def testGeoDeletion(env):
if env.is_cluster():
raise unittest.SkipTest()
# Can't properly test if deleted on cluster
env.cmd('ft.create', 'idx', 'schema',
'g1', 'geo', 'g2', 'geo', 't1', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields',
'g1', "-0.1757,51.5156",
'g2', "-0.1757,51.5156",
't1', "hello")
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields',
'g1', "-0.1757,51.5156",
'g2', "-0.1757,51.5156",
't1', "hello")
# keys are: "geo:idx/g1" and "geo:idx/g2"
env.assertEqual(2, env.cmd('zcard', 'geo:idx/g1'))
env.assertEqual(2, env.cmd('zcard', 'geo:idx/g2'))
# Remove the first doc
env.cmd('ft.del', 'idx', 'doc1')
env.assertEqual(1, env.cmd('zcard', 'geo:idx/g1'))
env.assertEqual(1, env.cmd('zcard', 'geo:idx/g2'))
# Replace the other one:
env.cmd('ft.add', 'idx', 'doc2', 1.0,
'replace', 'fields',
't1', 'just text here')
env.assertEqual(0, env.cmd('zcard', 'geo:idx/g1'))
env.assertEqual(0, env.cmd('zcard', 'geo:idx/g2'))
def testAddHash(env):
if env.is_cluster():
raise unittest.SkipTest()
r = env
env.assertOk(r.execute_command('ft.create', 'idx', 'schema',
'title', 'text', 'weight', 10.0, 'body', 'text', 'price', 'numeric'))
env.assertTrue(
r.hmset('doc1', {"title": "hello world", "body": "lorem ipsum", "price": 2}))
env.assertTrue(
r.hmset('doc2', {"title": "hello werld", "body": "lorem ipsum", "price": 5}))
env.assertOk(r.execute_command('ft.addhash', 'idx', 'doc1', 1.0))
env.assertOk(r.execute_command('ft.addhash', 'idx', 'doc2', 1.0))
env.expect('ft.addhash', 'idx', 'doc3', 1.0, 1.0).error().contains('Unknown keyword: `1.0`')
res = r.execute_command('ft.search', 'idx', "hello", "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
env.assertEqual("doc1", res[2])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx',
"hello",
"filter", "price", "0", "3"
)
env.assertEqual(3, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc1", res[1])
env.assertListEqual(
['body', 'lorem ipsum', 'price', '2', 'title', 'hello world'], res[2])
res = r.execute_command(
'ft.search', 'idx', "hello werld", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
def testSafeAddHash(env):
if env.is_cluster():
raise unittest.SkipTest()
r = env
env.assertOk(r.execute_command('ft.create', 'idx', 'schema',
'title', 'text', 'weight', 10.0, 'body', 'text', 'price', 'numeric'))
env.assertTrue(
r.hmset('doc1', {"title": "hello world", "body": "lorem ipsum", "price": 2}))
env.assertTrue(
r.hmset('doc2', {"title": "hello werld", "body": "lorem ipsum", "price": 5}))
env.expect('ft.safeaddhash idx doc1 1.0').equal('OK')
env.expect('ft.safeaddhash idx doc2 1.0').equal('OK')
env.expect('ft.safeaddhash idx').error().contains("wrong number of arguments for 'ft.safeaddhash' command")
env.expect('ft.safeaddhash idx doc3 2.0').error().contains('Score must be between 0 and 1')
env.expect('ft.safeaddhash idx doc3 -2.0').error().contains('Score must be between 0 and 1')
env.expect('ft.safeaddhash idx doc3 1.0 1.0').error().contains('Unknown keyword: `1.0`')
env.expect('ft.safeaddhash idx doc3 not_a_number').error().contains('Could not parse document score')
env.expect('ft.safeaddhash idx doc3 1.0 LANGUAGE RediSearch').error().contains('Unknown language: `RediSearch`')
env.expect('ft.safeaddhash idx doc3 1.0 LANGUAGE RediSearch not_an_arg').error().contains("Unknown keyword: `not_an_arg`")
#env.expect('ft.safeaddhash', 'idx', 'doc3', '1.0', 'LANGUAGE', 'RediSearch, ""').error().contains("Error parsing arguments for `%s`: %s")
env.expect('ft.safeaddhash not_idx doc3 1.0').error().contains('Unknown Index name')
res = r.execute_command('ft.search', 'idx', "hello", "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
env.assertEqual("doc1", res[2])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx',
"hello",
"filter", "price", "0", "3"
)
env.assertEqual(3, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc1", res[1])
env.assertListEqual(
['body', 'lorem ipsum', 'price', '2', 'title', 'hello world'], res[2])
res = r.execute_command(
'ft.search', 'idx', "hello werld", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
def testInfields(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'weight', 10.0, 'body', 'text', 'weight', 1.0))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello world',
'body', 'lorem ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello world lorem ipsum',
'body', 'hello world'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'verbatim', "infields", 1, "title", "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
env.assertEqual("doc2", res[1])
env.assertEqual("doc1", res[2])
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx', 'hello', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx', '\"hello world\"', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx', '\"lorem ipsum\"', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc1", res[1])
res = r.execute_command(
'ft.search', 'idx', 'lorem ipsum', "infields", 2, "body", "title", "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
env.assertEqual("doc2", res[1])
env.assertEqual("doc1", res[2])
def testScorerSelection(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'body', 'text'))
# this is the default scorer
res = r.execute_command(
'ft.search', 'idx', 'foo', 'scorer', 'TFIDF')
env.assertEqual(res, [0])
with env.assertResponseError():
res = r.execute_command(
'ft.search', 'idx', 'foo', 'scorer', 'NOSUCHSCORER')
def testFieldSelectors(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'TiTle', 'text', 'BoDy', 'text', "יוניקוד", 'text', 'field.with,punct', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1, 'fields',
'title', 'hello world', 'body', 'foo bar', 'יוניקוד', 'unicode', 'field.with,punct', 'punt'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 0.5, 'fields',
'body', 'hello world', 'title', 'foo bar', 'יוניקוד', 'unicode', 'field.with,punct', 'punt'))
res = r.execute_command(
'ft.search', 'idx', '@title:hello world', 'nocontent')
env.assertEqual(res, [1, 'doc1'])
res = r.execute_command(
'ft.search', 'idx', '@body:hello world', 'nocontent')
env.assertEqual(res, [1, 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@body:hello @title:world', 'nocontent')
env.assertEqual(res, [0])
res = r.execute_command(
'ft.search', 'idx', '@body:hello world @title:world', 'nocontent')
env.assertEqual(res, [0])
res = r.execute_command(
'ft.search', 'idx', '@BoDy:(hello|foo) @Title:(world|bar)', 'nocontent')
env.assertEqual(res, [2, 'doc1', 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@body:(hello|foo world|bar)', 'nocontent')
env.assertEqual(res, [2, 'doc1', 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@body|title:(hello world)', 'nocontent')
env.assertEqual(res, [2, 'doc1', 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@יוניקוד:(unicode)', 'nocontent')
env.assertEqual(res, [2, 'doc1', 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@field\\.with\\,punct:(punt)', 'nocontent')
env.assertEqual(res, [2, 'doc1', 'doc2'])
def testStemming(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello kitty'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello kitties'))
res = r.execute_command(
'ft.search', 'idx', 'hello kitty', "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty', "nocontent", "verbatim")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
# test for unknown language
with env.assertResponseError():
res = r.execute_command(
'ft.search', 'idx', 'hello kitty', "nocontent", "language", "foofoofian")
def testExpander(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello kitty'))
res = r.execute_command(
'ft.search', 'idx', 'kitties',
"nocontent",
"expander", "SBSTEM"
)
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
res = r.execute_command(
'ft.search', 'idx', 'kitties', "nocontent", "expander", "noexpander")
env.assertEqual(1, len(res))
env.assertEqual(0, res[0])
res = r.execute_command(
'ft.search', 'idx', 'kitti', "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
res = r.execute_command(
'ft.search', 'idx', 'kitti', "nocontent", 'verbatim')
env.assertEqual(1, len(res))
env.assertEqual(0, res[0])
# Calling a stem directly works even with VERBATIM.
# You need to use the + prefix escaped
res = r.execute_command(
'ft.search', 'idx', '\\+kitti', "nocontent", 'verbatim')
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
def testNumericRange(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'score', 'numeric', 'price', 'numeric'))
env.expect('ft.search', 'idx', 'hello kitty', 'filter', 'score', 5).error().contains("FILTER requires 3 arguments")
env.expect('ft.search', 'idx', 'hello kitty', 'filter', 'score', 5, 'inf').error().contains("Bad upper range: inf")
env.expect('ft.search', 'idx', 'hello kitty', 'filter', 'score', 'inf', 5).error().contains("Bad lower range: inf")
for i in xrange(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1, 'fields',
'title', 'hello kitty', 'score', i, 'price', 100 + 10 * i))
for _ in r.retry_with_rdb_reload():
res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent",
"filter", "score", 0, 100)
env.assertEqual(11, len(res))
env.assertEqual(100, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent",
"filter", "score", 0, 50)
env.assertEqual(51, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty', 'verbatim', "nocontent", "limit", 0, 100,
"filter", "score", "(0", "(50")
env.assertEqual(49, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent",
"filter", "score", "-inf", "+inf")
env.assertEqual(100, res[0])
# test multi filters
scrange = (19, 90)
prrange = (290, 385)
res = r.execute_command('ft.search', 'idx', 'hello kitty',
"filter", "score", scrange[
0], scrange[1],
"filter", "price", prrange[0], prrange[1])
# print res
for doc in res[2::2]:
sc = int(doc[doc.index('score') + 1])
pr = int(doc[doc.index('price') + 1])
env.assertTrue(sc >= scrange[0] and sc <= scrange[1])
env.assertGreaterEqual(pr, prrange[0])
env.assertLessEqual(pr, prrange[1])
env.assertEqual(10, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty',
"filter", "score", "19", "90",
"filter", "price", "90", "185")
env.assertEqual(0, res[0])
# Test numeric ranges as part of query syntax
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[0 100]', "nocontent")
env.assertEqual(11, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[0 50]', "nocontent")
env.assertEqual(51, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[(0 (50]', 'verbatim', "nocontent")
env.assertEqual(49, res[0])
res = r.execute_command(
'ft.search', 'idx', '@score:[(0 (50]', 'verbatim', "nocontent")
env.assertEqual(49, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty -@score:[(0 (50]', 'verbatim', "nocontent")
env.assertEqual(51, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[-inf +inf]', "nocontent")
env.assertEqual(100, res[0])
def testSuggestions(env):
r = env
env.assertEqual(1, r.execute_command(
'ft.SUGADD', 'ac', 'hello world', 1))
env.assertEqual(1, r.execute_command(
'ft.SUGADD', 'ac', 'hello world', 1, 'INCR'))
res = r.execute_command("FT.SUGGET", "ac", "hello")
env.assertEqual(1, len(res))
env.assertEqual("hello world", res[0])
terms = ["hello werld", "hallo world",
"yellow world", "wazzup", "herp", "derp"]
sz = 2
for term in terms:
env.assertEqual(sz, r.execute_command(
'ft.SUGADD', 'ac', term, sz - 1))
sz += 1
for _ in r.retry_with_rdb_reload():
env.assertEqual(7, r.execute_command('ft.SUGLEN', 'ac'))
# search not fuzzy
env.assertEqual(["hello world", "hello werld"],
r.execute_command("ft.SUGGET", "ac", "hello"))
# print r.execute_command("ft.SUGGET", "ac", "hello", "FUZZY", "MAX", "1", "WITHSCORES")
# search fuzzy - shuold yield more results
env.assertEqual(['hello world', 'hello werld', 'yellow world', 'hallo world'],
r.execute_command("ft.SUGGET", "ac", "hello", "FUZZY"))
# search fuzzy with limit of 1
env.assertEqual(['hello world'],
r.execute_command("ft.SUGGET", "ac", "hello", "FUZZY", "MAX", "1"))
# scores should return on WITHSCORES
rc = r.execute_command(
"ft.SUGGET", "ac", "hello", "WITHSCORES")
env.assertEqual(4, len(rc))
env.assertTrue(float(rc[1]) > 0)
env.assertTrue(float(rc[3]) > 0)
rc = r.execute_command("ft.SUGDEL", "ac", "hello world")
env.assertEqual(1L, rc)
rc = r.execute_command("ft.SUGDEL", "ac", "world")
env.assertEqual(0L, rc)
rc = r.execute_command("ft.SUGGET", "ac", "hello")
env.assertEqual(['hello werld'], rc)
def testSuggestErrors(env):
env.expect('ft.SUGADD ac olah 1').equal(1)
env.expect('ft.SUGADD ac olah 1 INCR').equal(1)
env.expect('ft.SUGADD ac missing').error().contains("wrong number of arguments")
env.expect('ft.SUGADD ac olah not_a_number').error().contains("invalid score")
env.expect('ft.SUGADD ac olah 1 PAYLOAD').error().contains('Invalid payload: Expected an argument, but none provided')
env.expect('ft.SUGADD ac olah 1 REDIS PAYLOAD payload').error().contains('Unknown argument `REDIS`')
env.expect('ft.SUGGET ac olah FUZZ').error().contains("Unrecognized argument: FUZZ")
query = 'verylongquery'
for _ in range(3):
query += query
env.expect('ft.SUGGET ac', query).error().contains("Invalid query")
env.expect('ft.SUGGET ac', query + query).error().contains("Invalid query length")
def testSuggestPayload(env):
r = env
env.assertEqual(1, r.execute_command(
'ft.SUGADD', 'ac', 'hello world', 1, 'PAYLOAD', 'foo'))
env.assertEqual(2, r.execute_command(
'ft.SUGADD', 'ac', 'hello werld', 1, 'PAYLOAD', 'bar'))
env.assertEqual(3, r.execute_command(
'ft.SUGADD', 'ac', 'hello nopayload', 1, 'PAYLOAD', ''))
env.assertEqual(4, r.execute_command(
'ft.SUGADD', 'ac', 'hello nopayload2', 1))
res = r.execute_command("FT.SUGGET", "ac", "hello", 'WITHPAYLOADS')
env.assertListEqual(['hello world', 'foo', 'hello werld', 'bar', 'hello nopayload', None, 'hello nopayload2', None],
res)
res = r.execute_command("FT.SUGGET", "ac", "hello")
env.assertListEqual(['hello world', 'hello werld', 'hello nopayload', 'hello nopayload2'],
res)
res = r.execute_command(
"FT.SUGGET", "ac", "hello", 'WITHPAYLOADS', 'WITHSCORES')
# we don't compare the scores beause they may change
env.assertEqual(12, len(res))
def testPayload(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text'))
for i in range(10):
env.assertOk(r.execute_command('ft.add', 'idx', '%d' % i, 1.0,
'payload', 'payload %d' % i,
'fields', 'f', 'hello world'))
for x in r.retry_with_rdb_reload():
res = r.execute_command(
'ft.search', 'idx', 'hello world')
env.assertEqual(21, len(res))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'withpayloads')
env.assertEqual(31, len(res))
env.assertEqual(10, res[0])
for i in range(1, 30, 3):
env.assertEqual(res[i + 1], 'payload %s' % res[i])
def testGarbageCollector(env):
env.skipOnCluster()
if env.moduleArgs is not None and 'GC_POLICY FORK' in env.moduleArgs[0]:
# this test is not relevent for fork gc cause its not cleaning the last block
raise unittest.SkipTest()
N = 100
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0,
'fields', 'foo', ' '.join(('term%d' % random.randrange(0, 10) for i in range(10)))))
def get_stats(r):
res = r.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
gc_stats = {d['gc_stats'][x]: float(
d['gc_stats'][x + 1]) for x in range(0, len(d['gc_stats']), 2)}
d['gc_stats'] = gc_stats
return d
stats = get_stats(r)
if 'current_hz' in stats['gc_stats']:
env.assertGreater(stats['gc_stats']['current_hz'], 8)
env.assertEqual(0, stats['gc_stats']['bytes_collected'])
env.assertGreater(int(stats['num_records']), 0)
initialIndexSize = float(stats['inverted_sz_mb']) * 1024 * 1024
for i in range(N):
env.assertEqual(1, r.execute_command(
'ft.del', 'idx', 'doc%d' % i))
for _ in range(100):
# gc is random so we need to do it long enough times for it to work
env.cmd('ft.debug', 'GC_FORCEINVOKE', 'idx')
stats = get_stats(r)
env.assertEqual(0, int(stats['num_docs']))
env.assertEqual(0, int(stats['num_records']))
if not env.is_cluster():
env.assertEqual(100, int(stats['max_doc_id']))
if 'current_hz' in stats['gc_stats']:
env.assertGreater(stats['gc_stats']['current_hz'], 30)
currentIndexSize = float(stats['inverted_sz_mb']) * 1024 * 1024
# print initialIndexSize, currentIndexSize,
# stats['gc_stats']['bytes_collected']
env.assertGreater(initialIndexSize, currentIndexSize)
env.assertGreater(stats['gc_stats'][
'bytes_collected'], currentIndexSize)
for i in range(10):
res = r.execute_command('ft.search', 'idx', 'term%d' % i)
env.assertEqual([0], res)
def testReturning(env):
env.assertCmdOk('ft.create', 'idx', 'schema',
'f1', 'text',
'f2', 'text',
'n1', 'numeric', 'sortable',
'f3', 'text')
for i in range(10):
env.assertCmdOk('ft.add', 'idx', 'DOC_{0}'.format(i), 1.0, 'fields',
'f2', 'val2', 'f1', 'val1', 'f3', 'val3',
'n1', i)
# RETURN 0. Simplest case
for x in env.retry_with_reload():
res = env.cmd('ft.search', 'idx', 'val*', 'return', '0')
env.assertEqual(11, len(res))
env.assertEqual(10, res[0])
for r in res[1:]:
env.assertTrue(r.startswith('DOC_'))
for field in ('f1', 'f2', 'f3', 'n1'):
res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, field)
env.assertEqual(21, len(res))
env.assertEqual(10, res[0])
for pair in grouper(res[1:], 2):
docname, fields = pair
env.assertEqual(2, len(fields))
env.assertEqual(field, fields[0])
env.assertTrue(docname.startswith('DOC_'))
# Test that we don't return SORTBY fields if they weren't specified
# also in RETURN
res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, 'f1',
'sortby', 'n1', 'ASC')
row = res[2]
# get the first result
env.assertEqual(['f1', 'val1'], row)
# Test when field is not found
res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, 'nonexist')
env.assertEqual(21, len(res))
env.assertEqual(10, res[0])
# # Test that we don't crash if we're given the wrong number of fields
with env.assertResponseError():
res = env.cmd('ft.search', 'idx', 'val*', 'return', 700, 'nonexist')
def _test_create_options_real(env, *options):
options = [x for x in options if x]
has_offsets = 'NOOFFSETS' not in options
has_fields = 'NOFIELDS' not in options
has_freqs = 'NOFREQS' not in options
try:
env.cmd('ft.drop', 'idx')
except:
pass
options = ['idx'] + options + ['schema', 'f1', 'text', 'f2', 'text']
env.assertCmdOk('ft.create', *options)
for i in range(10):
env.assertCmdOk('ft.add', 'idx', 'doc{}'.format(
i), 0.5, 'fields', 'f1', 'value for {}'.format(i))
# Query
# res = env.cmd('ft.search', 'idx', "value for 3")
# if not has_offsets:
# env.assertIsNone(res)
# else:
# env.assertIsNotNone(res)
# Frequencies:
env.assertCmdOk('ft.add', 'idx', 'doc100',
1.0, 'fields', 'f1', 'foo bar')
env.assertCmdOk('ft.add', 'idx', 'doc200', 1.0,
'fields', 'f1', ('foo ' * 10) + ' bar')
res = env.cmd('ft.search', 'idx', 'foo')
env.assertEqual(2, res[0])
if has_offsets:
docname = res[1]
if has_freqs:
env.assertEqual('doc200', docname)
else:
env.assertEqual('doc100', docname)
env.assertCmdOk('ft.add', 'idx', 'doc300',
1.0, 'fields', 'f1', 'Hello')
res = env.cmd('ft.search', 'idx', '@f2:Hello')
if has_fields:
env.assertEqual(1, len(res))
else:
env.assertEqual(3, len(res))
def testCreationOptions(env):
from itertools import combinations
for x in range(1, 5):
for combo in combinations(('NOOFFSETS', 'NOFREQS', 'NOFIELDS', ''), x):
_test_create_options_real(env, *combo)
env.expect('ft.create', 'idx').error()
def testInfoCommand(env):
from itertools import combinations
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'NOFIELDS', 'schema', 'title', 'text'))
N = 50
for i in xrange(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1, 'replace', 'fields',
'title', 'hello term%d' % i))
for _ in r.retry_with_rdb_reload():
res = r.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(d['index_name'], 'idx')
env.assertEqual(d['index_options'], ['NOFIELDS'])
env.assertListEqual(
d['fields'], [['title', 'type', 'TEXT', 'WEIGHT', '1']])
if not env.is_cluster():
env.assertEquals(int(d['num_docs']), N)
env.assertEquals(int(d['num_terms']), N + 1)
env.assertEquals(int(d['max_doc_id']), N)
env.assertEquals(int(d['records_per_doc_avg']), 2)
env.assertEquals(int(d['num_records']), N * 2)
env.assertGreater(float(d['offset_vectors_sz_mb']), 0)
env.assertGreater(float(d['key_table_size_mb']), 0)
env.assertGreater(float(d['inverted_sz_mb']), 0)
env.assertGreater(float(d['bytes_per_record_avg']), 0)
env.assertGreater(float(d['doc_table_size_mb']), 0)
for x in range(1, 5):
for combo in combinations(('NOOFFSETS', 'NOFREQS', 'NOFIELDS', ''), x):
combo = list(filter(None, combo))
options = combo + ['schema', 'f1', 'text']
try:
env.cmd('ft.drop', 'idx')
except:
pass
env.assertCmdOk('ft.create', 'idx', *options)
info = env.cmd('ft.info', 'idx')
ix = info.index('index_options')
env.assertFalse(ix == -1)
opts = info[ix + 1]
# make sure that an empty opts string returns no options in
# info
if not combo:
env.assertListEqual([], opts)
for option in filter(None, combo):
env.assertTrue(option in opts)
def testNoStem(env):
env.cmd('ft.create', 'idx', 'schema', 'body',
'text', 'name', 'text', 'nostem')
if not env.isCluster():
# todo: change it to be more generic to pass on is_cluster
res = env.cmd('ft.info', 'idx')
env.assertEqual(res[5][1][5], 'NOSTEM')
for _ in env.retry_with_reload():
try:
env.cmd('ft.del', 'idx', 'doc')
except redis.ResponseError:
pass
# Insert a document
env.assertCmdOk('ft.add', 'idx', 'doc', 1.0, 'fields',
'body', "located",
'name', "located")
# Now search for the fields
res_body = env.cmd('ft.search', 'idx', '@body:location')
res_name = env.cmd('ft.search', 'idx', '@name:location')
env.assertEqual(0, res_name[0])
env.assertEqual(1, res_body[0])
def testSearchNonexistField(env):
# GH Issue 133
env.cmd('ft.create', 'idx', 'schema', 'title', 'text',
'weight', 5.0, 'body', 'text', 'url', 'text')
env.cmd('ft.add', 'idx', 'd1', 1.0, 'nosave', 'fields', 'title',
'hello world', 'body', 'lorem dipsum', 'place', '-77.0366 38.8977')
env.cmd('ft.search', 'idx', 'Foo', 'GEOFILTER',
'place', '-77.0366', '38.8977', '1', 'km')
def testSortbyMissingField(env):
# GH Issue 131
env.cmd('ft.create', 'ix', 'schema', 'txt',
'text', 'num', 'numeric', 'sortable')
env.cmd('ft.add', 'ix', 'doc1', 1.0, 'fields', 'txt', 'foo')
env.cmd('ft.search', 'ix', 'foo', 'sortby', 'num')
def testParallelIndexing(env):
# GH Issue 207
env.cmd('ft.create', 'idx', 'schema', 'txt', 'text')
from threading import Thread
env.getConnection()
ndocs = 100
def runner(tid):
cli = env.getConnection()
for num in range(ndocs):
cli.execute_command('ft.add', 'idx', 'doc{}_{}'.format(tid, num), 1.0,
'fields', 'txt', 'hello world' * 20)
ths = []
for tid in range(10):
ths.append(Thread(target=runner, args=(tid,)))
[th.start() for th in ths]
[th.join() for th in ths]
res = env.cmd('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(1000, int(d['num_docs']))
def testDoubleAdd(env):
# Tests issue #210
env.cmd('ft.create', 'idx', 'schema', 'txt', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'txt', 'hello world')
with env.assertResponseError():
env.cmd('ft.add', 'idx', 'doc1', 1.0,
'fields', 'txt', 'goodbye world')
env.assertEqual('hello world', env.cmd('ft.get', 'idx', 'doc1')[1])
env.assertEqual(0, env.cmd('ft.search', 'idx', 'goodbye')[0])
env.assertEqual(1, env.cmd('ft.search', 'idx', 'hello')[0])
# Now with replace
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'replace',
'fields', 'txt', 'goodbye world')
env.assertEqual(1, env.cmd('ft.search', 'idx', 'goodbye')[0])
env.assertEqual(0, env.cmd('ft.search', 'idx', 'hello')[0])
env.assertEqual('goodbye world', env.cmd('ft.get', 'idx', 'doc1')[1])
def testConcurrentErrors(env):
from multiprocessing import Process
import random
env.cmd('ft.create', 'idx', 'schema', 'txt', 'text')
docs_per_thread = 100
num_threads = 50
docIds = ['doc{}'.format(x) for x in range(docs_per_thread)]
def thrfn():
myIds = docIds[::]
random.shuffle(myIds)
cli = env.getConnection()
with cli.pipeline(transaction=False) as pl:
for x in myIds:
pl.execute_command('ft.add', 'idx', x, 1.0,
'fields', 'txt', ' hello world ' * 50)
try:
pl.execute()
except Exception as e:
pass
# print e
thrs = [Process(target=thrfn) for x in range(num_threads)]
[th.start() for th in thrs]
[th.join() for th in thrs]
res = env.cmd('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(100, int(d['num_docs']))
def testBinaryKeys(env):
env.cmd('ft.create', 'idx', 'schema', 'txt', 'text')
# Insert a document
env.cmd('ft.add', 'idx', 'Hello', 1.0, 'fields', 'txt', 'NoBin match')
env.cmd('ft.add', 'idx', 'Hello\x00World', 1.0, 'fields', 'txt', 'Bin match')
for _ in env.reloading_iterator():
exp = [2L, 'Hello\x00World', ['txt', 'Bin match'], 'Hello', ['txt', 'NoBin match']]
res = env.cmd('ft.search', 'idx', 'match')
env.assertEqual(exp, res)
def testNonDefaultDb(env):
if env.is_cluster():
raise unittest.SkipTest()
# Should be ok
env.cmd('FT.CREATE', 'idx1', 'schema', 'txt', 'text')
try:
env.cmd('SELECT 1')
except redis.ResponseError:
return
# Should fail
with env.assertResponseError():
env.cmd('FT.CREATE', 'idx2', 'schema', 'txt', 'text')
def testDuplicateNonspecFields(env):
env.cmd('FT.CREATE', 'idx', 'schema', 'txt', 'text')
env.cmd('FT.ADD', 'idx', 'doc', 1.0, 'fields',
'f1', 'f1val', 'f1', 'f1val2', 'F1', 'f1Val3')
res = env.cmd('ft.get', 'idx', 'doc')
res = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertTrue(res['f1'] in ('f1val', 'f1val2'))
env.assertEqual('f1Val3', res['F1'])
def testDuplicateFields(env):
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'txt',
'TEXT', 'num', 'NUMERIC', 'SORTABLE')
for _ in env.retry_with_reload():
# Ensure the index assignment is correct after an rdb load
with env.assertResponseError():
env.cmd('FT.ADD', 'idx', 'doc', 1.0, 'FIELDS',
'txt', 'foo', 'txt', 'bar', 'txt', 'baz')
# Try add hash
env.hmset('newDoc', {'txt': 'foo', 'Txt': 'bar', 'txT': 'baz'})
# Get the actual value:
from redis import ResponseError
if not env.is_cluster():
with env.assertResponseError(contained='twice'):
env.cmd('FT.ADDHASH', 'idx', 'newDoc', 1.0)
# Try with REPLACE
with env.assertResponseError():
env.cmd('FT.ADD', 'idx', 'doc2', 1.0, 'REPLACE', 'FIELDS',
'txt', 'foo', 'txt', 'bar')
# With replace partial
env.cmd('FT.ADD', 'idx', 'doc2', 1.0, 'REPLACE',
'PARTIAL', 'FIELDS', 'num', 42)
with env.assertResponseError():
env.cmd('FT.ADD', 'idx', 'doc2', 1.0, 'REPLACE',
'PARTIAL', 'FIELDS', 'num', 42, 'num', 32)
def testDuplicateSpec(env):
with env.assertResponseError():
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'f1',
'text', 'n1', 'numeric', 'f1', 'text')
def testSortbyMissingFieldSparse(env):
# Note, the document needs to have one present sortable field in
# order for the indexer to give it a sort vector
env.cmd('ft.create', 'idx', 'SCHEMA', 'lastName', 'text',
'SORTABLE', 'firstName', 'text', 'SORTABLE')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'lastName', 'mark')
res = env.cmd('ft.search', 'idx', 'mark', 'WITHSORTKEYS', "SORTBY",
"firstName", "ASC", "limit", 0, 100)
# commented because we don't filter out exclusive sortby fields
# env.assertEqual([1L, 'doc1', None, ['lastName', 'mark']], res)
def testLuaAndMulti(env):
if env.is_cluster():
raise unittest.SkipTest()
# Ensure we can work in Lua and Multi environments without crashing
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'f1', 'text', 'n1', 'numeric')
env.cmd('HMSET', 'hashDoc', 'f1', 'v1', 'n1', 4)
env.cmd('HMSET', 'hashDoc2', 'f1', 'v1', 'n1', 5)
r = env.getConnection()
r.eval("return redis.call('ft.add', 'idx', 'doc1', 1.0, 'fields', 'f1', 'bar')", "0")
r.eval("return redis.call('ft.addhash', 'idx', 'hashDoc', 1.0)", 0)
# Try in a pipeline:
with r.pipeline(transaction=True) as pl:
pl.execute_command('ft.add', 'idx', 'doc2',
1.0, 'fields', 'f1', 'v3')
pl.execute_command('ft.add', 'idx', 'doc3',
1.0, 'fields', 'f1', 'v4')
pl.execute_command('ft.addhash', 'idx', 'hashdoc2', 1.0)
pl.execute()
def testLanguageField(env):
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'language', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc1', 1.0,
'FIELDS', 'language', 'gibberish')
res = env.cmd('FT.SEARCH', 'idx', 'gibberish')
env.assertEqual([1L, 'doc1', ['language', 'gibberish']], res)
# The only way I can verify that LANGUAGE is parsed twice is ensuring we
# provide a wrong language. This is much easier to test than trying to
# figure out how a given word is stemmed
with env.assertResponseError():
env.cmd('FT.ADD', 'idx', 'doc1', 1.0, 'LANGUAGE',
'blah', 'FIELDS', 'language', 'gibber')
def testUninitSortvector(env):
# This would previously crash
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'f1', 'TEXT')
for x in range(2000):
env.cmd('FT.ADD', 'idx', 'doc{}'.format(
x), 1.0, 'FIELDS', 'f1', 'HELLO')
env.broadcast('SAVE')
for x in range(10):
env.broadcast('DEBUG RELOAD')
def normalize_row(row):
return to_dict(row)
def assertAggrowsEqual(env, exp, got):
env.assertEqual(exp[0], got[0])
env.assertEqual(len(exp), len(got))
# and now, it's just free form:
exp = sorted(to_dict(x) for x in exp[1:])
got = sorted(to_dict(x) for x in got[1:])
env.assertEqual(exp, got)
def assertResultsEqual(env, exp, got, inorder=True):
from pprint import pprint
# pprint(exp)
# pprint(got)
env.assertEqual(exp[0], got[0])
env.assertEqual(len(exp), len(got))
exp = list(grouper(exp[1:], 2))
got = list(grouper(got[1:], 2))
for x in range(len(exp)):
exp_did, exp_fields = exp[x]
got_did, got_fields = got[x]
env.assertEqual(exp_did, got_did, message="at position {}".format(x))
got_fields = to_dict(got_fields)
exp_fields = to_dict(exp_fields)
env.assertEqual(exp_fields, got_fields, message="at position {}".format(x))
def testAlterIndex(env):
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'f1', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc1', 1.0, 'FIELDS', 'f1', 'hello', 'f2', 'world')
env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'f2', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc2', 1.0, 'FIELDS', 'f1', 'hello', 'f2', 'world')
for _ in env.retry_with_reload():
ret = env.cmd('FT.SEARCH', 'idx', 'world')
env.assertEqual([1, 'doc2', ['f1', 'hello', 'f2', 'world']], ret)
env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'f3', 'TEXT', 'SORTABLE')
for x in range(10):
env.cmd('FT.ADD', 'idx', 'doc{}'.format(x + 3), 1.0,
'FIELDS', 'f1', 'hello', 'f3', 'val{}'.format(x))
for _ in env.retry_with_reload():
# Test that sortable works
res = env.cmd('FT.SEARCH', 'idx', 'hello', 'SORTBY', 'f3', 'DESC')
exp = [12, 'doc12', ['f1', 'hello', 'f3', 'val9'], 'doc11', ['f1', 'hello', 'f3', 'val8'], 'doc10', ['f1', 'hello', 'f3', 'val7'], 'doc9', ['f1', 'hello', 'f3', 'val6'], 'doc8', ['f1', 'hello', 'f3', 'val5'], 'doc7', [
'f1', 'hello', 'f3', 'val4'], 'doc6', ['f1', 'hello', 'f3', 'val3'], 'doc5', ['f1', 'hello', 'f3', 'val2'], 'doc4', ['f1', 'hello', 'f3', 'val1'], 'doc3', ['f1', 'hello', 'f3', 'val0']]
assertResultsEqual(env, exp, res)
# Test that we can add a numeric field
env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'n1', 'NUMERIC')
env.cmd('FT.ADD', 'idx', 'docN1', 1.0, 'FIELDS', 'n1', 50)
env.cmd('FT.ADD', 'idx', 'docN2', 1.0, 'FIELDS', 'n1', 250)
for _ in env.retry_with_reload():
res = env.cmd('FT.SEARCH', 'idx', '@n1:[0 100]')
env.assertEqual([1, 'docN1', ['n1', '50']], res)
env.expect('FT.ALTER', 'idx', 'SCHEMA', 'NOT_ADD', 'f2', 'TEXT').error()
env.expect('FT.ALTER', 'idx', 'SCHEMA', 'ADD').error()
env.expect('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'f2').error()
env.expect('FT.ALTER', 'idx', 'ADD', 'SCHEMA', 'f2', 'TEXT').error()
env.expect('FT.ALTER', 'idx', 'f2', 'TEXT').error()
def testAlterValidation(env):
# Test that constraints for ALTER comand
env.cmd('FT.CREATE', 'idx1', 'SCHEMA', 'f0', 'TEXT')
for x in range(1, 32):
env.cmd('FT.ALTER', 'idx1', 'SCHEMA', 'ADD', 'f{}'.format(x), 'TEXT')
# OK for now.
# Should be too many indexes
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER',
'idx1', 'SCHEMA', 'ADD', 'tooBig', 'TEXT')
env.cmd('FT.CREATE', 'idx2', 'MAXTEXTFIELDS', 'SCHEMA', 'f0', 'TEXT')
# print env.cmd('FT.INFO', 'idx2')
for x in range(1, 50):
env.cmd('FT.ALTER', 'idx2', 'SCHEMA', 'ADD', 'f{}'.format(x + 1), 'TEXT')
env.cmd('FT.ADD', 'idx2', 'doc1', 1.0, 'FIELDS', 'f50', 'hello')
for _ in env.retry_with_reload():
ret = env.cmd('FT.SEARCH', 'idx2', '@f50:hello')
env.assertEqual([1, 'doc1', ['f50', 'hello']], ret)
env.cmd('FT.CREATE', 'idx3', 'SCHEMA', 'f0', 'text')
# Try to alter the index with garbage
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER', 'idx3',
'SCHEMA', 'ADD', 'f1', 'TEXT', 'f2', 'garbage')
ret = to_dict(env.cmd('ft.info', 'idx3'))
env.assertEqual(1, len(ret['fields']))
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER',
'nonExist', 'SCHEMA', 'ADD', 'f1', 'TEXT')
# test with no fields!
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER', 'idx2', 'SCHEMA', 'ADD')
def testIssue366_1(env):
if env.is_cluster():
raise unittest.SkipTest('ADDHASH unsupported!')
# Test random RDB regressions, see GH 366
env.cmd('FT.CREATE', 'idx1', 'SCHEMA', 'textfield', 'TEXT', 'numfield', 'NUMERIC')
env.hmset('foo', {'textfield': 'blah', 'numfield': 1})
env.cmd('FT.ADDHASH', 'idx1', 'foo', 1, 'replace')
env.cmd('FT.DEL', 'idx1', 'foo')
for _ in env.retry_with_reload():
pass # --just ensure it doesn't crash
def testIssue366_2(env):
# FT.CREATE atest SCHEMA textfield TEXT numfield NUMERIC
# FT.ADD atest anId 1 PAYLOAD '{"hello":"world"}' FIELDS textfield sometext numfield 1234
# FT.ADD atest anId 1 PAYLOAD '{"hello":"world2"}' REPLACE PARTIAL FIELDS numfield 1111
# shutdown
env.cmd('FT.CREATE', 'idx1', 'SCHEMA', 'textfield', 'TEXT', 'numfield', 'NUMERIC')
env.cmd('FT.ADD', 'idx1', 'doc1', 1, 'PAYLOAD', '{"hello":"world"}',
'FIELDS', 'textfield', 'sometext', 'numfield', 1234)
env.cmd('ft.add', 'idx1', 'doc1', 1,
'PAYLOAD', '{"hello":"world2"}',
'REPLACE', 'PARTIAL',
'FIELDS', 'textfield', 'sometext', 'numfield', 1111)
for _ in env.retry_with_reload():
pass #
def testIssue654(env):
# Crashes during FILTER
env.cmd('ft.create', 'idx', 'schema', 'id', 'numeric')
env.cmd('ft.add', 'idx', 1, 1, 'fields', 'id', 1)
env.cmd('ft.add', 'idx', 2, 1, 'fields', 'id', 2)
res = env.cmd('ft.search', 'idx', '*', 'filter', '@version', 0, 2)
def testReplaceReload(env):
env.cmd('FT.CREATE', 'idx2', 'SCHEMA', 'textfield', 'TEXT', 'numfield', 'NUMERIC')
# Create a document and then replace it.
env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'FIELDS', 'textfield', 's1', 'numfield', 99)
env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'REPLACE', 'PARTIAL',
'FIELDS', 'textfield', 's100', 'numfield', 990)
env.dump_and_reload()
# RDB Should still be fine
env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'REPLACE', 'PARTIAL',
'FIELDS', 'textfield', 's200', 'numfield', 1090)
doc = to_dict(env.cmd('FT.GET', 'idx2', 'doc2'))
env.assertEqual('s200', doc['textfield'])
env.assertEqual('1090', doc['numfield'])
# command = 'FT.CREATE idx SCHEMA '
# for i in range(255):
# command += 't%d NUMERIC SORTABLE ' % i
# command = command[:-1]
# r.execute_command(command)
# r.execute_command('save')
# // reload from ...
# r.execute_command('FT.ADD idx doc1 1.0 FIELDS t0 1')
def testIssue417(env):
command = ['ft.create', 'idx', 'schema']
for x in range(255):
command += ['t{}'.format(x), 'numeric', 'sortable']
command = command[:-1]
env.cmd(*command)
for _ in env.reloading_iterator():
try:
env.execute_command('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 't0', '1')
except redis.ResponseError as e:
env.assertTrue('already' in e.message.lower())
# >FT.CREATE myIdx SCHEMA title TEXT WEIGHT 5.0 body TEXT url TEXT
# >FT.ADD myIdx doc1 1.0 FIELDS title "hello world" body "lorem ipsum" url "www.google.com"
# >FT.SEARCH myIdx "no-as"
# Could not connect to Redis at 127.0.0.1:6379: Connection refused
# >FT.SEARCH myIdx "no-as"
# (error) Unknown Index name
def testIssue422(env):
env.cmd('ft.create', 'myIdx', 'schema',
'title', 'TEXT', 'WEIGHT', '5.0',
'body', 'TEXT',
'url', 'TEXT')
env.cmd('ft.add', 'myIdx', 'doc1', '1.0', 'FIELDS', 'title', 'hello world', 'bod', 'lorem ipsum', 'url', 'www.google.com')
rv = env.cmd('ft.search', 'myIdx', 'no-as')
env.assertEqual([0], rv)
def testIssue446(env):
env.cmd('ft.create', 'myIdx', 'schema',
'title', 'TEXT', 'SORTABLE')
env.cmd('ft.add', 'myIdx', 'doc1', '1.0', 'fields', 'title', 'hello world', 'body', 'lorem ipsum', 'url', '"www.google.com')
rv = env.cmd('ft.search', 'myIdx', 'hello', 'limit', '0', '0')
env.assertEqual([1], rv)
# Related - issue 635
env.cmd('ft.add', 'myIdx', 'doc2', '1.0', 'fields', 'title', 'hello')
rv = env.cmd('ft.search', 'myIdx', 'hello', 'limit', '0', '0')
env.assertEqual([2], rv)
def testTimeoutSettings(env):
env.cmd('ft.create', 'idx', 'schema', 't1', 'text')
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'BLAHBLAH').raiseError()
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'RETURN').notRaiseError()
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'FAIL').notRaiseError()
def testAlias(env):
env.cmd('ft.create', 'idx', 'schema', 't1', 'text')
env.cmd('ft.create', 'idx2', 'schema', 't1', 'text')
env.expect('ft.aliasAdd', 'myIndex').raiseError()
env.expect('ft.aliasupdate', 'fake_alias', 'imaginary_alias', 'Too_many_args').raiseError()
env.cmd('ft.aliasAdd', 'myIndex', 'idx')
env.cmd('ft.add', 'myIndex', 'doc1', 1.0, 'fields', 't1', 'hello')
r = env.cmd('ft.search', 'idx', 'hello')
env.assertEqual([1, 'doc1', ['t1', 'hello']], r)
r2 = env.cmd('ft.search', 'myIndex', 'hello')
env.assertEqual(r, r2)
# try to add the same alias again; should be an error
env.expect('ft.aliasAdd', 'myIndex', 'idx2').raiseError()
env.expect('ft.aliasAdd', 'alias2', 'idx').notRaiseError()
# now delete the index
env.cmd('ft.drop', 'myIndex')
# index list should be cleared now. This can be tested by trying to alias
# the old alias to different index
env.cmd('ft.aliasAdd', 'myIndex', 'idx2')
env.cmd('ft.aliasAdd', 'alias2', 'idx2')
env.cmd('ft.add', 'myIndex', 'doc2', 1.0, 'fields', 't1', 'hello')
r = env.cmd('ft.search', 'alias2', 'hello')
env.assertEqual([1L, 'doc2', ['t1', 'hello']], r)
# check that aliasing one alias to another returns an error. This will
# end up being confusing
env.expect('ft.aliasAdd', 'alias3', 'myIndex').raiseError()
# check that deleting the alias works as expected
env.expect('ft.aliasDel', 'myIndex').notRaiseError()
env.expect('ft.search', 'myIndex', 'foo').raiseError()
# create a new index and see if we can use the old name
env.cmd('ft.create', 'idx3', 'schema', 't1', 'text')
env.cmd('ft.add', 'idx3', 'doc3', 1.0, 'fields', 't1', 'foo')
env.cmd('ft.aliasAdd', 'myIndex', 'idx3')
# also, check that this works in rdb save
for _ in env.retry_with_rdb_reload():
r = env.cmd('ft.search', 'myIndex', 'foo')
env.assertEqual([1L, 'doc3', ['t1', 'foo']], r)
# Check that we can move an alias from one index to another
env.cmd('ft.aliasUpdate', 'myIndex', 'idx2')
r = env.cmd('ft.search', 'myIndex', "hello")
env.assertEqual([1L, 'doc2', ['t1', 'hello']], r)
# Test that things like ft.get, ft.aggregate, etc. work
r = env.cmd('ft.get', 'myIndex', 'doc2')
env.assertEqual(['t1', 'hello'], r)
r = env.cmd('ft.aggregate', 'myIndex', 'hello', 'LOAD', '1', '@t1')
env.assertEqual([1, ['t1', 'hello']], r)
# Test update
env.expect('ft.aliasAdd', 'updateIndex', 'idx3')
env.expect('ft.aliasUpdate', 'updateIndex', 'fake_idx')
r = env.cmd('ft.del', 'idx2', 'doc2')
env.assertEqual(1, r)
env.expect('ft.aliasdel').raiseError()
env.expect('ft.aliasdel', 'myIndex', 'yourIndex').raiseError()
env.expect('ft.aliasdel', 'non_existing_alias').raiseError()
def testNoCreate(env):
env.cmd('ft.create', 'idx', 'schema', 'f1', 'text')
env.expect('ft.add', 'idx', 'schema', 'f1').raiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'nocreate', 'fields', 'f1', 'hello').raiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'nocreate', 'fields', 'f1', 'hello').raiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'fields', 'f1', 'hello').notRaiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'nocreate', 'fields', 'f1', 'world').notRaiseError()
def testSpellCheck(env):
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'report', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc1', 1.0, 'FIELDS', 'report', 'report content')
rv = env.cmd('FT.SPELLCHECK', 'idx', '111111')
env.assertEqual([['TERM', '111111', []]], rv)
if not env.isCluster():
rv = env.cmd('FT.SPELLCHECK', 'idx', '111111', 'FULLSCOREINFO')
env.assertEqual([1L, ['TERM', '111111', []]], rv)
# Standalone functionality
def testIssue484(env):
# Issue with split
# 127.0.0.1:6379> ft.drop productSearch1
# OK
# 127.0.0.1:6379> "FT.CREATE" "productSearch1" "NOSCOREIDX" "SCHEMA" "productid" "TEXT" "categoryid" "TEXT" "color" "TEXT" "timestamp" "NUMERIC"
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID1" "1.0" "REPLACE" "FIELDS" "productid" "1" "categoryid" "cars" "color" "blue" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID2" "1.0" "REPLACE" "FIELDS" "productid" "1" "categoryid" "small cars" "color" "white" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID3" "1.0" "REPLACE" "FIELDS" "productid" "2" "categoryid" "Big cars" "color" "white" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID4" "1.0" "REPLACE" "FIELDS" "productid" "2" "categoryid" "Big cars" "color" "green" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID5" "1.0" "REPLACE" "FIELDS" "productid" "3" "categoryid" "cars" "color" "blue" "categoryType" 0
# OK
# 127.0.0.1:6379> FT.AGGREGATE productSearch1 * load 2 @color @categoryid APPLY "split(format(\"%s-%s\",@color,@categoryid),\"-\")" as value GROUPBY 1 @value REDUCE COUNT 0 as value_count
env.cmd('ft.create', 'productSearch1', 'noscoreidx', 'schema', 'productid',
'text', 'categoryid', 'text', 'color', 'text', 'timestamp', 'numeric')
env.cmd('ft.add', 'productSearch1', 'GUID1', '1.0', 'REPLACE', 'FIELDS', 'productid', '1', 'categoryid', 'cars', 'color', 'blue', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID2', '1.0', 'REPLACE', 'FIELDS', 'productid', '1', 'categoryid', 'small cars', 'color', 'white', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID3', '1.0', 'REPLACE', 'FIELDS', 'productid', '2', 'categoryid', 'Big cars', 'color', 'white', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID4', '1.0', 'REPLACE', 'FIELDS', 'productid', '2', 'categoryid', 'Big cars', 'color', 'green', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID5', '1.0', 'REPLACE', 'FIELDS', 'productid', '3', 'categoryid', 'cars', 'color', 'blue', 'categoryType', 0)
res = env.cmd('FT.AGGREGATE', 'productSearch1', '*',
'load', '2', '@color', '@categoryid',
'APPLY', 'split(format("%s-%s",@color,@categoryid),"-")', 'as', 'value',
'GROUPBY', '1', '@value',
'REDUCE', 'COUNT', '0', 'as', 'value_count',
'SORTBY', '4', '@value_count', 'DESC', '@value', 'ASC')
expected = [6, ['value', 'white', 'value_count', '2'], ['value', 'cars', 'value_count', '2'], ['value', 'small cars', 'value_count', '1'], ['value', 'blue', 'value_count', '2'], ['value', 'Big cars', 'value_count', '2'], ['value', 'green', 'value_count', '1']]
assertAggrowsEqual(env, expected, res)
for var in expected:
env.assertIn(var, res)
def testIssue501(env):
env.cmd('FT.CREATE', 'incidents', 'SCHEMA', 'report', 'TEXT')
env.cmd('FT.ADD', 'incidents', 'doc1', 1.0, 'FIELDS', 'report', 'report content')
env.cmd('FT.DICTADD', 'slang', 'timmies', 'toque', 'toonie', 'serviette', 'kerfuffle', 'chesterfield')
rv = env.cmd('FT.SPELLCHECK', 'incidents', 'qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq',
'TERMS', 'INCLUDE', 'slang', 'TERMS', 'EXCLUDE', 'slang')
env.assertEqual("qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq", rv[0][1])
env.assertEqual([], rv[0][2])
env.expect('FT.SPELLCHECK', 'incidents', 'qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq',
'TERMS', 'FAKE_COMMAND', 'slang').error()
def testIssue589(env):
env.cmd('FT.CREATE', 'incidents', 'SCHEMA', 'report', 'TEXT')
env.cmd('FT.ADD', 'incidents', 'doc1', 1.0, 'FIELDS', 'report', 'report content')
env.expect('FT.SPELLCHECK', 'incidents', 'report :').error().contains("Syntax error at offset")
def testIssue621(env):
env.expect('ft.create', 'test', 'SCHEMA', 'uuid', 'TAG', 'title', 'TEXT').equal('OK')
env.expect('ft.add', 'test', 'a', '1', 'REPLACE', 'PARTIAL', 'FIELDS', 'uuid', 'foo', 'title', 'bar').equal('OK')
env.expect('ft.add', 'test', 'a', '1', 'REPLACE', 'PARTIAL', 'FIELDS', 'title', 'bar').equal('OK')
env.expect('ft.search', 'test', '@uuid:{foo}').equal([1L, 'a', ['uuid', 'foo', 'title', 'bar']])
# Server crash on doc names that conflict with index keys #666
def testIssue666(env):
# We cannot reliably determine that any error will occur in cluster mode
# because of the key name
env.skipOnCluster()
env.cmd('ft.create', 'foo', 'schema', 'bar', 'text')
env.cmd('ft.add', 'foo', 'mydoc', 1, 'fields', 'bar', 'one two three')
# crashes here
with env.assertResponseError():
env.cmd('ft.add', 'foo', 'ft:foo/two', '1', 'fields', 'bar', 'four five six')
# try with replace:
with env.assertResponseError():
env.cmd('ft.add', 'foo', 'ft:foo/two', '1', 'REPLACE',
'FIELDS', 'bar', 'four five six')
with env.assertResponseError():
env.cmd('ft.add', 'foo', 'idx:foo', '1', 'REPLACE',
'FIELDS', 'bar', 'four five six')
env.cmd('ft.add', 'foo', 'mydoc1', 1, 'fields', 'bar', 'four five six')
# 127.0.0.1:6379> flushdb
# OK
# 127.0.0.1:6379> ft.create foo SCHEMA bar text
# OK
# 127.0.0.1:6379> ft.add foo mydoc 1 FIELDS bar "one two three"
# OK
# 127.0.0.1:6379> keys *
# 1) "mydoc"
# 2) "ft:foo/one"
# 3) "idx:foo"
# 4) "ft:foo/two"
# 5) "ft:foo/three"
# 127.0.0.1:6379> ft.add foo "ft:foo/two" 1 FIELDS bar "four five six"
# Could not connect to Redis at 127.0.0.1:6379: Connection refused
def testPrefixDeletedExpansions(env):
env.skipOnCluster()
env.cmd('ft.create', 'idx', 'schema', 'txt1', 'text', 'tag1', 'tag')
# get the number of maximum expansions
maxexpansions = int(env.cmd('ft.config', 'get', 'MAXEXPANSIONS')[0][1])
for x in range(maxexpansions):
env.cmd('ft.add', 'idx', 'doc{}'.format(x), 1, 'fields',
'txt1', 'term{}'.format(x), 'tag1', 'tag{}'.format(x))
for x in range(maxexpansions):
env.cmd('ft.del', 'idx', 'doc{}'.format(x))
env.cmd('ft.add', 'idx', 'doc_XXX', 1, 'fields', 'txt1', 'termZZZ', 'tag1', 'tagZZZ')
# r = env.cmd('ft.search', 'idx', 'term*')
# print(r)
# r = env.cmd('ft.search', 'idx', '@tag1:{tag*}')
# print(r)
tmax = time.time() + 0.5 # 250ms max
iters = 0
while time.time() < tmax:
iters += 1
env.cmd('ft.debug', 'gc_forceinvoke', 'idx')
r = env.cmd('ft.search', 'idx', '@txt1:term* @tag1:{tag*}')
if r[0]:
break
print 'did {} iterations'.format(iters)
r = env.cmd('ft.search', 'idx', '@txt1:term* @tag1:{tag*}')
env.assertEqual([1, 'doc_XXX', ['txt1', 'termZZZ', 'tag1', 'tagZZZ']], r)
def testOptionalFilter(env):
env.cmd('ft.create', 'idx', 'schema', 't1', 'text')
for x in range(100):
env.cmd('ft.add', 'idx', 'doc_{}'.format(x), 1, 'fields', 't1', 'hello world word{}'.format(x))
env.cmd('ft.explain', 'idx', '(~@t1:word20)')
# print(r)
r = env.cmd('ft.search', 'idx', '~(word20 => {$weight: 2.0})')
def testIssue736(env):
# 1. create the schema, we need a tag field
env.cmd('ft.create', 'idx', 'schema', 't1', 'text', 'n2', 'numeric', 't2', 'tag')
# 2. create a single document to initialize at least one RSAddDocumentCtx
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 't1', 'hello', 't2', 'foo, bar')
# 3. create a second document with many filler fields to force a realloc:
extra_fields = []
for x in range(20):
extra_fields += ['nidx_fld{}'.format(x), 'val{}'.format(x)]
extra_fields += ['n2', 'not-a-number', 't2', 'random, junk']
with env.assertResponseError():
env.cmd('ft.add', 'idx', 'doc2', 1, 'fields', *extra_fields)
def testCriteriaTesterDeactivated():
env = Env(moduleArgs='_MAX_RESULTS_TO_UNSORTED_MODE 1')
env.cmd('ft.create', 'idx', 'schema', 't1', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 't1', 'hello1 hey hello2')
env.cmd('ft.add', 'idx', 'doc2', 1, 'fields', 't1', 'hello2 hey')
env.cmd('ft.add', 'idx', 'doc3', 1, 'fields', 't1', 'hey')
res = env.execute_command('ft.search', 'idx', '(hey hello1)|(hello2 hey)')
expected = [2L, 'doc1', ['t1', 'hello1 hey hello2'], 'doc2', ['t1', 'hello2 hey']]
env.assertEqual(sorted(res), sorted(expected))
def testIssue828(env):
env.cmd('ft.create', 'beers', 'SCHEMA',
'name', 'TEXT', 'PHONETIC', 'dm:en',
'style', 'TAG', 'SORTABLE',
'abv', 'NUMERIC', 'SORTABLE')
rv = env.cmd("FT.ADD", "beers", "802", "1.0",
"FIELDS", "index", "25", "abv", "0.049",
"name", "Hell or High Watermelon Wheat (2009)",
"style", "Fruit / Vegetable Beer")
env.assertEqual('OK', rv)
def testIssue862(env):
env.cmd('ft.create', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE')
rv = env.cmd("FT.ADD", "idx", "doc1", "1.0", "FIELDS", "test", "foo")
env.assertEqual('OK', rv)
env.cmd("FT.SEARCH", "idx", "foo", 'WITHSORTKEYS')
env.assertTrue(env.isUp())
def testIssue_884(env):
env.expect('FT.create', 'idx', 'STOPWORDS', '0', 'SCHEMA', 'title', 'text', 'weight',
'50', 'subtitle', 'text', 'weight', '10', 'author', 'text', 'weight',
'10', 'description', 'text', 'weight', '20').equal('OK')
env.expect('FT.ADD', 'idx', 'doc4', '1.0', 'FIELDS', 'title', 'mohsin conversation the conversation tahir').equal('OK')
env.expect('FT.ADD', 'idx', 'doc3', '1.0', 'FIELDS', 'title', 'Fareham Civilization Church - Sermons and conversations mohsin conversation the').equal('OK')
env.expect('FT.ADD', 'idx', 'doc2', '1.0', 'FIELDS', 'title', 'conversation the conversation - a drama about conversation, the science of conversation.').equal('OK')
env.expect('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 'title', 'mohsin conversation with the mohsin').equal('OK')
expected = [2L, 'doc2', ['title', 'conversation the conversation - a drama about conversation, the science of conversation.'], 'doc4', ['title', 'mohsin conversation the conversation tahir']]
res = env.cmd('FT.SEARCH', 'idx', '@title:(conversation) (@title:(conversation the conversation))=>{$inorder: true;$slop: 0}')
env.assertEquals(len(expected), len(res))
for v in expected:
env.assertContains(v, res)
def testIssue_866(env):
env.expect('ft.sugadd', 'sug', 'test123', '1').equal(1)
env.expect('ft.sugadd', 'sug', 'test456', '1').equal(2)
env.expect('ft.sugdel', 'sug', 'test').equal(0)
env.expect('ft.sugget', 'sug', '').equal(['test123', 'test456'])
def testIssue_848(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test1', 'TEXT', 'SORTABLE').equal('OK')
env.expect('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 'test1', 'foo').equal('OK')
env.expect('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'test2', 'TEXT', 'SORTABLE').equal('OK')
env.expect('FT.ADD', 'idx', 'doc2', '1.0', 'FIELDS', 'test1', 'foo', 'test2', 'bar').equal('OK')
env.expect('FT.SEARCH', 'idx', 'foo', 'SORTBY', 'test2', 'ASC').equal([2L, 'doc1', ['test1', 'foo'], 'doc2', ['test2', 'bar', 'test1', 'foo']])
def testMod_309(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
for i in range(100000):
env.expect('FT.ADD', 'idx', 'doc%d'%i, '1.0', 'FIELDS', 'test', 'foo').equal('OK')
res = env.cmd('FT.AGGREGATE', 'idx', 'foo')
env.assertEqual(len(res), 100001)
def testIssue_865(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', '1', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', '1', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', '1', 'foo2').equal('OK')
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', '1', 'ASC').equal([2, 'doc1', ['1', 'foo1'], 'doc2', ['1', 'foo2']])
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', '1', 'DESC').equal([2, 'doc2', ['1', 'foo2'], 'doc1', ['1', 'foo1']])
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', '1', 'bad').error()
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', 'bad', 'bad').error()
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', 'bad').error()
env.expect('ft.search', 'idx', 'foo*', 'SORTBY').error()
def testIssue_779(env):
# FT.ADD should return NOADD and not change the doc if value < same_value, but it returns OK and makes the change.
# Note that "greater than" ">" does not have the same bug.
env.cmd('FT.CREATE idx2 SCHEMA ot1 TAG')
env.cmd('FT.ADD idx2 doc2 1.0 FIELDS newf CAT ot1 4001')
env.expect('FT.GET idx2 doc2').equal(["newf", "CAT", "ot1", "4001"])
# NOADD is expected since 4001 is not < 4000, and no updates to the doc2 is expected as a result
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4000 FIELDS newf DOG ot1 4000', 'NOADD')
env.expect('FT.GET idx2 doc2').equal(["newf", "CAT", "ot1", "4001"])
# OK is expected since 4001 < 4002 and the doc2 is updated
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4002 FIELDS newf DOG ot1 4002').equal('OK')
env.expect('FT.GET idx2 doc2').equal(["newf", "DOG", "ot1", "4002"])
# OK is NOT expected since 4002 is not < 4002
# We expect NOADD and doc2 update; however, we get OK and doc2 updated
# After fix, @ot1 implicitly converted to a number, thus we expect NOADD
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4002 FIELDS newf FISH ot1 4002').equal('NOADD')
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if to_number(@ot1)<4002 FIELDS newf FISH ot1 4002').equal('NOADD')
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<to_str(4002) FIELDS newf FISH ot1 4002').equal('NOADD')
env.expect('FT.GET idx2 doc2').equal(["newf", "DOG", "ot1", "4002"])
# OK and doc2 update is expected since 4002 < 4003
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4003 FIELDS newf HORSE ot1 4003').equal('OK')
env.expect('FT.GET idx2 doc2').equal(["newf", "HORSE", "ot1", "4003"])
# Expect NOADD since 4003 is not > 4003
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1>4003 FIELDS newf COW ot1 4003').equal('NOADD')
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if 4003<@ot1 FIELDS newf COW ot1 4003').equal('NOADD')
# Expect OK and doc2 updated since 4003 > 4002
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1>4002 FIELDS newf PIG ot1 4002').equal('OK')
env.expect('FT.GET idx2 doc2').equal(["newf", "PIG", "ot1", "4002"])
# Syntax errors
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4-002 FIELDS newf DOG ot1 4002').contains('Syntax error')
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<to_number(4-002) FIELDS newf DOG ot1 4002').contains('Syntax error')
def testUnknownSymbolErrorOnConditionalAdd(env):
env.expect('FT.CREATE idx SCHEMA f1 TAG f2 NUMERIC NOINDEX f3 TAG NOINDEX').ok()
env.expect('ft.add idx doc1 1.0 REPLACE PARTIAL IF @f1<awfwaf FIELDS f1 foo f2 1 f3 boo').ok()
env.expect('ft.add idx doc1 1.0 REPLACE PARTIAL IF @f1<awfwaf FIELDS f1 foo f2 1 f3 boo').error()
def testDelIndexExternally(env):
env.skipOnCluster() # todo: remove once fix on coordinator
env.expect('FT.CREATE idx SCHEMA num NUMERIC t TAG g GEO').equal('OK')
env.expect('ft.add idx doc1 1.0 FIELDS num 3 t my_tag g', "1,1").equal('OK')
env.expect('set nm:idx/num 1').equal('OK')
env.expect('ft.add idx doc2 1.0 FIELDS num 3').equal('Could not open numeric index for indexing')
env.expect('set tag:idx/t 1').equal('OK')
env.expect('ft.add idx doc3 1.0 FIELDS t 3').equal('Could not open tag index for indexing')
env.expect('set geo:idx/g 1').equal('OK')
env.expect('ft.add idx doc4 1.0 FIELDS g "1,1"').equal('Could not index geo value')
def testWrongResultsReturnedBySkipOptimization(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'f1', 'TEXT', 'f2', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'f1', 'foo', 'f2', 'bar').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'f1', 'moo', 'f2', 'foo').equal('OK')
env.expect('ft.search', 'idx', 'foo @f2:moo').debugPrint().equal([0L])
def testErrorWithApply(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo bar').equal('OK')
err = env.cmd('FT.AGGREGATE', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'split()')[1]
env.assertEqual(str(err[0]), 'Invalid number of arguments for split')
def testSummerizeWithAggregateRaiseError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.aggregate', 'idx', 'foo2', 'SUMMARIZE', 'FIELDS', '1', 'test',
'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0').error()
def testSummerizeHighlightParseError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.search', 'idx', 'foo2', 'SUMMARIZE', 'FIELDS', 'WITHSCORES').error()
env.expect('ft.search', 'idx', 'foo2', 'HIGHLIGHT', 'FIELDS', 'WITHSCORES').error()
def testCursorBadArgument(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.aggregate', 'idx', '*',
'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0',
'WITHCURSOR', 'COUNT', 'BAD').error()
def testLimitBadArgument(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.search', 'idx', '*', 'LIMIT', '1').error()
def testOnTimeoutBadArgument(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'bad').error()
def testAggregateSortByWrongArgument(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', 'bad').error()
def testAggregateSortByMaxNumberOfFields(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA',
'test1', 'TEXT', 'SORTABLE',
'test2', 'TEXT', 'SORTABLE',
'test3', 'TEXT', 'SORTABLE',
'test4', 'TEXT', 'SORTABLE',
'test5', 'TEXT', 'SORTABLE',
'test6', 'TEXT', 'SORTABLE',
'test7', 'TEXT', 'SORTABLE',
'test8', 'TEXT', 'SORTABLE',
'test9', 'TEXT', 'SORTABLE'
).equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '9', *['@test%d' % (i + 1) for i in range(9)]).error()
args = ['@test%d' % (i + 1) for i in range(8)] + ['bad']
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '9', *args).error()
args = ['@test%d' % (i + 1) for i in range(8)] + ['ASC', 'MAX', 'bad']
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '9', *args).error()
args = ['@test%d' % (i + 1) for i in range(8)] + ['ASC', 'MAX']
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '9', *args).error()
def testNumericFilterError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.search', 'idx', '*', 'FILTER', 'test', 'bad', '2').error()
env.expect('ft.search', 'idx', '*', 'FILTER', 'test', '0', 'bad').error()
env.expect('ft.search', 'idx', '*', 'FILTER', 'test', '0').error()
env.expect('ft.search', 'idx', '*', 'FILTER', 'test', 'bad').error()
env.expect('ft.search', 'idx', '*', 'FILTER', 'test', '0', '2', 'FILTER', 'test', '0', 'bla').error()
def testGeoFilterError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', '1').error()
env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', 'bad' , '2', '3', 'km').error()
env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', '1' , 'bad', '3', 'km').error()
env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', '1' , '2', 'bad', 'km').error()
env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', '1' , '2', '3', 'bad').error()
def testReducerError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', 'bad').error()
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0', 'as').error()
def testGroupbyError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE').error()
if not env.isCluster(): # todo: remove once fix on coordinator
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test1').error()
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE', 'bad', '0').error()
if not env.isCluster(): # todo: remove once fix on coordinator
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE', 'SUM', '1', '@test1').error()
def testGroupbyWithSort(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.add', 'idx', 'doc3', '1.0', 'FIELDS', 'test', '2').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '2', '@test', 'ASC',
'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0', 'as', 'count').equal([2L, ['test', '2', 'count', '1'], ['test', '1', 'count', '2']])
def testApplyError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'APPLY', 'split(@test)', 'as').error()
def testLoadError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'LOAD', 'bad').error()
env.expect('ft.aggregate', 'idx', '*', 'LOAD', 'bad', 'test').error()
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '2', 'test').error()
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '2', '@test').error()
def testMissingArgsError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx').error()
def testUnexistsScorer(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.search', 'idx', '*', 'SCORER', 'bad').error()
def testHighlightWithUnknowsProperty(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'HIGHLIGHT', 'FIELDS', '1', 'test1').error()
def testBadFilterExpression(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'FILTER', 'blabla').error()
if not env.isCluster(): # todo: remove once fix on coordinator
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'FILTER', '@test1 > 1').error()
def testWithSortKeysOnNoneSortableValue(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.search', 'idx', '*', 'WITHSORTKEYS', 'SORTBY', 'test').equal([1L, 'doc1', '$foo', ['test', 'foo']])
def testWithWithRawIds(env):
env.skipOnCluster() # todo: remove once fix on coordinator
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.search', 'idx', '*', 'WITHRAWIDS').equal([1L, 'doc1', 1L, ['test', 'foo']])
def testUnkownIndex(env):
env.skipOnCluster() # todo: remove once fix on coordinator
env.expect('ft.aggregate').error()
env.expect('ft.aggregate', 'idx', '*').error()
env.expect('ft.aggregate', 'idx', '*', 'WITHCURSOR').error()
def testExplainError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('FT.EXPLAIN', 'idx', '(').error()
def testBadCursor(env):
env.expect('FT.CURSOR', 'READ', 'idx').error()
env.expect('FT.CURSOR', 'READ', 'idx', '1111').error()
env.expect('FT.CURSOR', 'READ', 'idx', 'bad').error()
env.expect('FT.CURSOR', 'DROP', 'idx', '1111').error()
env.expect('FT.CURSOR', 'bad', 'idx', '1111').error()
def testGroupByWithApplyError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
err = env.cmd('FT.AGGREGATE', 'idx', '*', 'APPLY', 'split()', 'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0', 'AS', 'count')[1]
assertEqualIgnoreCluster(env, str(err[0]), 'Invalid number of arguments for split')
def testSubStrErrors(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'matched_terms()', 'as', 'a', 'APPLY', 'substr(@a,0,4)')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test",3,-2)', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test",3,1000)', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test",-1,2)', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test")', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr(1)', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test", "test")', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test", "test", "test")', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test", "-1", "-1")', 'as', 'a')
env.assertTrue(env.isUp())
def testToUpperLower(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower(@test)', 'as', 'a').equal([1L, ['test', 'foo', 'a', 'foo']])
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower("FOO")', 'as', 'a').equal([1L, ['test', 'foo', 'a', 'foo']])
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper(@test)', 'as', 'a').equal([1L, ['test', 'foo', 'a', 'FOO']])
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper("foo")', 'as', 'a').equal([1L, ['test', 'foo', 'a', 'FOO']])
err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper(1)', 'as', 'a').equal([1L, ['test', 'foo', 'a', None]])
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower(1)', 'as', 'a').equal([1L, ['test', 'foo', 'a', None]])
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper(1,2)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower(1,2)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
def testMatchedTerms(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'matched_terms()', 'as', 'a').equal([1L, ['test', 'foo', 'a', None]])
env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'matched_terms()', 'as', 'a').equal([1L, ['test', 'foo', 'a', ['foo']]])
env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'matched_terms(100)', 'as', 'a').equal([1L, ['test', 'foo', 'a', ['foo']]])
env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'matched_terms(-100)', 'as', 'a').equal([1L, ['test', 'foo', 'a', ['foo']]])
env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'matched_terms("test")', 'as', 'a').equal([1L, ['test', 'foo', 'a', ['foo']]])
def testStrFormatError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format("%s")', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format("%", "test")', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format("%b", "test")', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format(5)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'upper(1)', 'as', 'b', 'APPLY', 'format("%s", @b)', 'as', 'a').equal([1L, ['test', 'foo', 'b', None, 'a', '(null)']])
# working example
env.expect('ft.aggregate', 'idx', 'foo', 'APPLY', 'format("%%s-test", "test")', 'as', 'a').equal([1L, ['a', '%s-test']])
env.expect('ft.aggregate', 'idx', 'foo', 'APPLY', 'format("%s-test", "test")', 'as', 'a').equal([1L, ['a', 'test-test']])
def testTimeFormatError(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'NUMERIC').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK')
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
if not env.isCluster(): # todo: remove once fix on coordinator
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(@test1)', 'as', 'a').error()
env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(@test)', 'as', 'a')
env.assertTrue(env.isUp())
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(@test, 4)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt("awfawf")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(235325153152356426246246246254)', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(@test, "%s")' % ('d' * 2048), 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'hour("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'minute("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'day("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'month("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'dayofweek("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'dayofmonth("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'dayofyear("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'year("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
def testMonthOfYear(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'NUMERIC').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK')
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear(@test)', 'as', 'a').equal([1L, ['test', '12234556', 'a', '4']])
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear(@test, 112)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear("bad")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
def testParseTimeErrors(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'NUMERIC').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK')
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'parse_time()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'parse_time(11)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'parse_time(11,22)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'parse_time("%s", "%s")' % ('d' * 2048, 'd' * 2048), 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'parse_time("test", "%s")' % ('d' * 2048), 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
def testMathFunctions(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'NUMERIC').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK')
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'exp(@test)', 'as', 'a').equal([1L, ['test', '12234556', 'a', 'inf']])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'ceil(@test)', 'as', 'a').equal([1L, ['test', '12234556', 'a', '12234556']])
def testErrorOnOpperation(env):
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'test', 'NUMERIC').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK')
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', '1 + split()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'split() + 1', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', '"bad" + "bad"', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'split("bad" + "bad")', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', '!(split("bad" + "bad"))', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'APPLY', '!@test', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
def testSortkeyUnsortable(env):
env.cmd('ft.create', 'idx', 'schema', 'test', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 'test', 'foo')
rv = env.cmd('ft.aggregate', 'idx', 'foo', 'withsortkeys',
'load', '1', '@test',
'sortby', '1', '@test')
env.assertEqual([1, '$foo', ['test', 'foo']], rv)
def testIssue919(env):
# This only works if the missing field has a lower sortable index
# than the present field..
env.cmd('ft.create', 'idx', 'schema', 't1', 'text', 'sortable', 'n1', 'numeric', 'sortable')
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 'n1', 42)
rv = env.cmd('ft.search', 'idx', '*', 'sortby', 't1', 'desc')
env.assertEqual([1L, 'doc1', ['n1', '42']], rv)
def testIssue1074(env):
# Ensure that sortable fields are returned in their string form from the
# document
env.cmd('ft.create', 'idx', 'schema', 't1', 'text', 'n1', 'numeric', 'sortable')
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 't1', 'hello', 'n1', 1581011976800)
rv = env.cmd('ft.search', 'idx', '*', 'sortby', 'n1')
env.assertEqual([1L, 'doc1', ['n1', '1581011976800', 't1', 'hello']], rv)
def testIssue1085(env):
env.skipOnCluster()
env.cmd('FT.CREATE issue1085 SCHEMA foo TEXT SORTABLE bar NUMERIC SORTABLE')
for i in range(1, 10):
env.cmd('FT.ADD issue1085 document_%d 1 REPLACE FIELDS foo foo%d bar %d' % (i, i, i))
env.expect('FT.SEARCH', 'issue1085', '@bar:[8 8]').equal([1L, 'document_8', ['foo', 'foo8', 'bar', '8']])
for i in range(1, 10):
env.cmd('FT.ADD issue1085 document_8 1 REPLACE FIELDS foo foo8 bar 8')
env.expect('ft.debug GC_FORCEINVOKE issue1085').equal('DONE')
env.expect('FT.SEARCH', 'issue1085', '@bar:[8 8]').equal([1, 'document_8', ['foo', 'foo8', 'bar', '8']])
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
from itertools import izip_longest
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
def to_dict(r):
return {r[i]: r[i + 1] for i in range(0, len(r), 2)}
def testOptimize(env):
env.skipOnCluster()
env.cmd('ft.create', 'idx', 'SCHEMA', 'test', 'TEXT', 'SORTABLE')
env.cmd('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo')
env.assertEqual(0, env.cmd('FT.OPTIMIZE', 'idx'))
with env.assertResponseError():
env.assertOk(env.cmd('FT.OPTIMIZE', 'idx', '666'))
env.expect('FT.OPTIMIZE', 'fake_idx').error()
def testInfoError(env):
env.expect('ft.info', 'no_idx').error()
def testSetPayload(env):
env.skipOnCluster()
env.expect('flushall')
env.expect('ft.create idx schema name text').equal('OK')
env.expect('ft.add idx hotel 1.0 fields name hilton').equal('OK')
env.expect('FT.SETPAYLOAD idx hotel payload').equal('OK')
env.expect('FT.SETPAYLOAD idx hotel payload').equal('OK')
env.expect('FT.SETPAYLOAD idx fake_hotel').error() \
.contains("wrong number of arguments for 'FT.SETPAYLOAD' command")
env.expect('FT.SETPAYLOAD fake_idx hotel payload').error().contains('Unknown Index name')
env.expect('FT.SETPAYLOAD idx fake_hotel payload').error().contains('Document not in index')
def testIndexNotRemovedFromCursorListAfterRecreated(env):
env.expect('FT.CREATE idx SCHEMA f1 TEXT').ok()
env.expect('FT.AGGREGATE idx * WITHCURSOR').equal([[0], 0])
env.expect('FT.CREATE idx SCHEMA f1 TEXT').error()
env.expect('FT.AGGREGATE idx * WITHCURSOR').equal([[0], 0])
def testSearchNotExistsTagValue(env):
# this test basically make sure we are not leaking
env.expect('FT.CREATE idx SCHEMA t TAG SORTABLE').ok()
env.expect('FT.SEARCH idx @t:{val}').equal([0])
def testUnseportedSortableTypeErrorOnTags(env):
env.skipOnCluster()
env.expect('FT.CREATE idx SCHEMA f1 TEXT SORTABLE f2 NUMERIC SORTABLE NOINDEX f3 TAG SORTABLE NOINDEX f4 TEXT SORTABLE NOINDEX').ok()
env.expect('FT.ADD idx doc1 1.0 FIELDS f1 foo1 f2 1 f3 foo1 f4 foo1').ok()
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL FIELDS f2 2 f3 foo2 f4 foo2').ok()
env.expect('HGETALL doc1').equal(['f1', 'foo1', 'f2', '2', 'f3', 'foo2', 'f4', 'foo2'])
env.expect('FT.SEARCH idx *').equal([1L, 'doc1', ['f1', 'foo1', 'f2', '2', 'f3', 'foo2', 'f4', 'foo2']])
def testIssue1158(env):
env.cmd('FT.CREATE idx SCHEMA txt1 TEXT txt2 TEXT txt3 TEXT')
env.cmd('FT.ADD idx doc1 1.0 FIELDS txt1 10 txt2 num1')
env.expect('FT.GET idx doc1').equal(['txt1', '10', 'txt2', 'num1'])
# only 1st checked (2nd returns an error)
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if @txt1||to_number(@txt2)<5 FIELDS txt1 5').equal('OK')
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if @txt3&&to_number(@txt2)<5 FIELDS txt1 5').equal('NOADD')
# both are checked
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if to_number(@txt1)>11||to_number(@txt1)>42 FIELDS txt2 num2').equal('NOADD')
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if to_number(@txt1)>11||to_number(@txt1)<42 FIELDS txt2 num2').equal('OK')
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if to_number(@txt1)>11&&to_number(@txt1)>42 FIELDS txt2 num2').equal('NOADD')
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if to_number(@txt1)>11&&to_number(@txt1)<42 FIELDS txt2 num2').equal('NOADD')
env.expect('FT.GET idx doc1').equal(['txt1', '5', 'txt2', 'num2'])
def testIssue1159(env):
env.cmd('FT.CREATE idx SCHEMA f1 TAG')
for i in range(1000):
env.cmd('FT.add idx doc%d 1.0 FIELDS f1 foo' % i)
def testIssue1169(env):
env.cmd('FT.CREATE idx SCHEMA txt1 TEXT txt2 TEXT')
env.cmd('FT.ADD idx doc1 1.0 FIELDS txt1 foo')
env.expect('FT.AGGREGATE idx foo GROUPBY 1 @txt1 REDUCE FIRST_VALUE 1 @txt2 as test').equal([1L, ['txt1', 'foo', 'test', None]])
def testIssue1184(env):
env.skipOnCluster()
field_types = ['TEXT', 'NUMERIC', 'TAG']
for ft in field_types:
env.assertOk(env.execute_command('FT.CREATE idx SCHEMA field ' + ft))
res = env.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(d['inverted_sz_mb'], '0')
env.assertEqual(d['num_records'], '0')
value = '42'
env.assertOk(env.execute_command('FT.ADD idx doc0 1 FIELD field ' + value))
doc = env.cmd('FT.SEARCH idx *')
env.assertEqual(doc, [1L, 'doc0', ['field', value]])
res = env.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertGreater(d['inverted_sz_mb'], '0')
env.assertEqual(d['num_records'], '1')
env.assertEqual(env.execute_command('FT.DEL idx doc0'), 1)
env.cmd('ft.debug', 'GC_FORCEINVOKE', 'idx')
res = env.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(d['inverted_sz_mb'], '0')
env.assertEqual(d['num_records'], '0')
env.cmd('FT.DROP idx')
def test_MOD_865(env):
args_list = ['FT.CREATE', 'idx', 'SCHEMA']
# We have a limit on number of text fields so we split between TEXT and NUMERIC
for i in range(128):
args_list.extend([i, 'TEXT', 'SORTABLE'])
for i in range(128, 256):
args_list.extend([i, 'NUMERIC', 'SORTABLE'])
env.expect(*args_list).error().contains('Too many SORTABLE fields in schema')
def testIssue1208(env):
env.cmd('FT.CREATE idx SCHEMA n NUMERIC')
env.cmd('FT.ADD idx doc1 1 FIELDS n 1.0321e5')
env.cmd('FT.ADD idx doc2 1 FIELDS n 101.11')
env.cmd('FT.ADD idx doc3 1 FIELDS n 0.0011')
env.expect('FT.SEARCH', 'idx', '@n:[1.1432E3 inf]').equal([1L, 'doc1', ['n', '1.0321e5']])
env.expect('FT.SEARCH', 'idx', '@n:[-1.12E-3 1.12E-1]').equal([1L, 'doc3', ['n', '0.0011']])
res = [3L, 'doc3', ['n', '0.0011'], 'doc2', ['n', '101.11'], 'doc1', ['n', '1.0321e5']]
env.expect('FT.SEARCH', 'idx', '@n:[-inf inf]').equal(res)
env.expect('FT.ADD idx doc3 1 REPLACE PARTIAL IF @n>42e3 FIELDS n 100').equal('NOADD')
env.expect('FT.ADD idx doc3 1 REPLACE PARTIAL IF @n<42e3 FIELDS n 100').ok()
def testRED47209(env):
env.expect('FT.CREATE idx SCHEMA t TEXT').ok()
env.expect('FT.ADD idx doc1 1 FIELDS t foo')
if env.isCluster():
# on cluster we have WITHSCORES set unconditionally for FT.SEARCH
res = [1L, 'doc1', ['t', 'foo']]
else:
res = [1L, 'doc1', None, ['t', 'foo']]
env.expect('FT.SEARCH idx foo WITHSORTKEYS LIMIT 0 1').equal(res)
def testSuggestMax(env):
for i in range(10):
env.expect('ft.sugadd', 'sug', 'test%d' % i, i + 1).equal(i + 1)
# for j in range(i + 1):
#env.expect('ft.sugadd', 'sug', 'test10', '1', 'INCR').equal(i + 1)
expected_res = ['test9', '7.0710678100585938', 'test8', '6.3639612197875977', 'test7', '5.6568541526794434',
'test6', '4.9497475624084473', 'test5', '4.242640495300293', 'test4', '3.5355339050292969',
'test3', '2.8284270763397217', 'test2', '2.1213202476501465', 'test1', '1.4142135381698608',
'test0', '0.70710676908493042']
for i in range(1,11):
env.expect('FT.SUGGET', 'sug', 'test', 'MAX', i, 'WITHSCORES').equal(expected_res[0:i*2])
env.expect('FT.SUGGET', 'sug', 'test', 'MAX', 10, 'WITHSCORES').equal(expected_res)
def testSuggestMax2(env):
for i in range(10):
env.expect('ft.sugadd', 'sug', 'test %d' % i, 1).equal(i + 1)
expected_res = ['test 0', 'test 1', 'test 2', 'test 3', 'test 4', 'test 5']
for i in range(1,7):
res = env.cmd('FT.SUGGET', 'sug', 'test ', 'MAX', i)
for item in res:
env.assertIn(item, expected_res[0:i])
|
test_aea.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests for aea/aea.py."""
import os
import tempfile
import time
from pathlib import Path
from threading import Thread
import pytest
import yaml
from aea import AEA_DIR
from aea.aea import AEA
from aea.configurations.base import ProtocolConfig, PublicId
from aea.connections.stub.connection import StubConnection
from aea.crypto.fetchai import FETCHAI
from aea.crypto.ledger_apis import LedgerApis
from aea.crypto.wallet import Wallet
from aea.identity.base import Identity
from aea.mail.base import Envelope
from aea.protocols.base import Protocol
from aea.protocols.default.message import DefaultMessage
from aea.protocols.default.serialization import DefaultSerializer
from aea.registries.base import Resources
from aea.skills.base import Skill
from packages.fetchai.connections.local.connection import LocalNode, OEFLocalConnection
from packages.fetchai.protocols.fipa.message import FIPAMessage
from packages.fetchai.protocols.fipa.serialization import FIPASerializer
from .conftest import (
CUR_PATH,
DUMMY_SKILL_PUBLIC_ID,
LOCAL_CONNECTION_PUBLIC_ID,
UNKNOWN_PROTOCOL_PUBLIC_ID,
)
from .data.dummy_aea.skills.dummy.tasks import DummyTask # type: ignore
from .data.dummy_skill.behaviours import DummyBehaviour # type: ignore
def test_initialise_aea():
"""Tests the initialisation of the AEA."""
node = LocalNode()
private_key_path = os.path.join(CUR_PATH, "data", "fet_private_key.txt")
wallet = Wallet({FETCHAI: private_key_path})
identity = Identity("my_name", address=wallet.addresses[FETCHAI])
connections1 = [
OEFLocalConnection(
identity.address, node, connection_id=OEFLocalConnection.connection_id
)
]
ledger_apis = LedgerApis({}, FETCHAI)
my_AEA = AEA(
identity,
connections1,
wallet,
ledger_apis,
resources=Resources(str(Path(CUR_PATH, "aea"))),
)
assert my_AEA.context == my_AEA._context, "Cannot access the Agent's Context"
assert (
not my_AEA.context.connection_status.is_connected
), "AEA should not be connected."
my_AEA.setup()
assert my_AEA.resources is not None, "Resources must not be None after setup"
my_AEA.resources = Resources(str(Path(CUR_PATH, "aea")))
assert my_AEA.resources is not None, "Resources must not be None after set"
assert (
my_AEA.context.shared_state is not None
), "Shared state must not be None after set"
assert my_AEA.context.identity is not None, "Identity must not be None after set."
my_AEA.stop()
def test_act():
"""Tests the act function of the AEA."""
with LocalNode() as node:
agent_name = "MyAgent"
private_key_path = os.path.join(CUR_PATH, "data", "fet_private_key.txt")
wallet = Wallet({FETCHAI: private_key_path})
identity = Identity(agent_name, address=wallet.addresses[FETCHAI])
ledger_apis = LedgerApis({}, FETCHAI)
connections = [
OEFLocalConnection(
identity.address, node, connection_id=LOCAL_CONNECTION_PUBLIC_ID
)
]
resources = Resources(str(Path(CUR_PATH, "data", "dummy_aea")))
agent = AEA(
identity, connections, wallet, ledger_apis, resources, is_programmatic=False
)
t = Thread(target=agent.start)
try:
t.start()
time.sleep(1.0)
behaviour = agent.resources.behaviour_registry.fetch(
(DUMMY_SKILL_PUBLIC_ID, "dummy")
)
assert behaviour.nb_act_called > 0, "Act() wasn't called"
finally:
agent.stop()
t.join()
def test_react():
"""Tests income messages."""
with LocalNode() as node:
agent_name = "MyAgent"
private_key_path = os.path.join(CUR_PATH, "data", "fet_private_key.txt")
wallet = Wallet({FETCHAI: private_key_path})
identity = Identity(agent_name, address=wallet.addresses[FETCHAI])
ledger_apis = LedgerApis({}, FETCHAI)
connection = OEFLocalConnection(
identity.address, node, connection_id=LOCAL_CONNECTION_PUBLIC_ID
)
connections = [connection]
resources = Resources(str(Path(CUR_PATH, "data", "dummy_aea")))
msg = DefaultMessage(type=DefaultMessage.Type.BYTES, content=b"hello")
msg.counterparty = identity.address
message_bytes = DefaultSerializer().encode(msg)
envelope = Envelope(
to=identity.address,
sender=identity.address,
protocol_id=DefaultMessage.protocol_id,
message=message_bytes,
)
agent = AEA(
identity, connections, wallet, ledger_apis, resources, is_programmatic=False
)
t = Thread(target=agent.start)
try:
t.start()
time.sleep(1.0)
agent.outbox.put(envelope)
time.sleep(2.0)
default_protocol_public_id = DefaultMessage.protocol_id
dummy_skill_public_id = DUMMY_SKILL_PUBLIC_ID
handler = agent.resources.handler_registry.fetch_by_protocol_and_skill(
default_protocol_public_id, dummy_skill_public_id
)
assert handler is not None, "Handler is not set."
assert (
msg in handler.handled_messages
), "The message is not inside the handled_messages."
except Exception:
raise
finally:
agent.stop()
t.join()
@pytest.mark.asyncio
async def test_handle():
"""Tests handle method of an agent."""
with LocalNode() as node:
agent_name = "MyAgent"
private_key_path = os.path.join(CUR_PATH, "data", "fet_private_key.txt")
wallet = Wallet({FETCHAI: private_key_path})
ledger_apis = LedgerApis({}, FETCHAI)
identity = Identity(agent_name, address=wallet.addresses[FETCHAI])
connection = OEFLocalConnection(
identity.address, node, connection_id=DUMMY_SKILL_PUBLIC_ID
)
connections = [connection]
resources = Resources(str(Path(CUR_PATH, "data", "dummy_aea")))
msg = DefaultMessage(type=DefaultMessage.Type.BYTES, content=b"hello")
msg.counterparty = agent_name
message_bytes = DefaultSerializer().encode(msg)
envelope = Envelope(
to=identity.address,
sender=identity.address,
protocol_id=UNKNOWN_PROTOCOL_PUBLIC_ID,
message=message_bytes,
)
agent = AEA(
identity, connections, wallet, ledger_apis, resources, is_programmatic=False
)
t = Thread(target=agent.start)
try:
t.start()
time.sleep(2.0)
dummy_skill = agent.resources.get_skill(DUMMY_SKILL_PUBLIC_ID)
dummy_handler = dummy_skill.handlers["dummy"]
expected_envelope = envelope
agent.outbox.put(expected_envelope)
time.sleep(2.0)
assert len(dummy_handler.handled_messages) == 1
# DECODING ERROR
msg = "hello".encode("utf-8")
envelope = Envelope(
to=identity.address,
sender=identity.address,
protocol_id=DefaultMessage.protocol_id,
message=msg,
)
expected_envelope = envelope
agent.outbox.put(expected_envelope)
time.sleep(2.0)
assert len(dummy_handler.handled_messages) == 2
# UNSUPPORTED SKILL
msg = FIPASerializer().encode(
FIPAMessage(
performative=FIPAMessage.Performative.ACCEPT,
message_id=0,
dialogue_reference=(str(0), ""),
target=1,
)
)
envelope = Envelope(
to=identity.address,
sender=identity.address,
protocol_id=FIPAMessage.protocol_id,
message=msg,
)
expected_envelope = envelope
agent.outbox.put(expected_envelope)
time.sleep(2.0)
assert len(dummy_handler.handled_messages) == 3
finally:
agent.stop()
t.join()
class TestInitializeAEAProgrammaticallyFromResourcesDir:
"""Test that we can initialize the agent by providing the resource object loaded from dir."""
@classmethod
def setup_class(cls):
"""Set the test up."""
cls.node = LocalNode()
cls.node.start()
cls.agent_name = "MyAgent"
cls.private_key_path = os.path.join(CUR_PATH, "data", "fet_private_key.txt")
cls.wallet = Wallet({FETCHAI: cls.private_key_path})
cls.ledger_apis = LedgerApis({}, FETCHAI)
cls.identity = Identity(cls.agent_name, address=cls.wallet.addresses[FETCHAI])
cls.connection = OEFLocalConnection(
cls.agent_name, cls.node, connection_id=LOCAL_CONNECTION_PUBLIC_ID,
)
cls.connections = [cls.connection]
cls.resources = Resources(os.path.join(CUR_PATH, "data", "dummy_aea"))
cls.aea = AEA(
cls.identity,
cls.connections,
cls.wallet,
cls.ledger_apis,
cls.resources,
is_programmatic=False,
)
cls.expected_message = DefaultMessage(
type=DefaultMessage.Type.BYTES, content=b"hello"
)
cls.expected_message.counterparty = cls.agent_name
envelope = Envelope(
to=cls.agent_name,
sender=cls.agent_name,
protocol_id=DefaultMessage.protocol_id,
message=DefaultSerializer().encode(cls.expected_message),
)
cls.t = Thread(target=cls.aea.start)
cls.t.start()
time.sleep(0.5)
cls.aea.outbox.put(envelope)
time.sleep(0.5)
def test_initialize_aea_programmatically(self):
"""Test that we can initialize an AEA programmatically."""
dummy_skill_id = DUMMY_SKILL_PUBLIC_ID
dummy_behaviour_name = "dummy"
dummy_behaviour = self.aea.resources.behaviour_registry.fetch(
(dummy_skill_id, dummy_behaviour_name)
)
assert dummy_behaviour is not None
assert dummy_behaviour.nb_act_called > 0
# TODO the previous code caused an error:
# _pickle.PicklingError: Can't pickle <class 'tasks.DummyTask'>: import of module 'tasks' failed
dummy_task = DummyTask()
task_id = self.aea.task_manager.enqueue_task(dummy_task)
async_result = self.aea.task_manager.get_task_result(task_id)
expected_dummy_task = async_result.get(2.0)
assert expected_dummy_task.nb_execute_called > 0
dummy_handler = self.aea.resources.handler_registry.fetch_by_protocol_and_skill(
DefaultMessage.protocol_id, dummy_skill_id
)
dummy_handler_alt = self.aea.resources.handler_registry.fetch(
(dummy_skill_id, "dummy")
)
assert dummy_handler == dummy_handler_alt
assert dummy_handler is not None
assert len(dummy_handler.handled_messages) == 1
assert dummy_handler.handled_messages[0] == self.expected_message
@classmethod
def teardown_class(cls):
"""Tear the test down."""
cls.aea.stop()
cls.t.join()
cls.node.stop()
class TestInitializeAEAProgrammaticallyBuildResources:
"""Test that we can initialize the agent by building the resource object."""
@classmethod
def setup_class(cls):
"""Set the test up."""
cls.node = LocalNode()
cls.node.start()
cls.agent_name = "MyAgent"
cls.private_key_path = os.path.join(CUR_PATH, "data", "fet_private_key.txt")
cls.wallet = Wallet({FETCHAI: cls.private_key_path})
cls.ledger_apis = LedgerApis({}, FETCHAI)
cls.identity = Identity(cls.agent_name, address=cls.wallet.addresses[FETCHAI])
cls.connection = OEFLocalConnection(
cls.agent_name, cls.node, connection_id=LOCAL_CONNECTION_PUBLIC_ID
)
cls.connections = [cls.connection]
cls.temp = tempfile.mkdtemp(prefix="test_aea_resources")
cls.resources = Resources(cls.temp)
cls.aea = AEA(
cls.identity,
cls.connections,
cls.wallet,
cls.ledger_apis,
resources=cls.resources,
)
default_protocol_id = DefaultMessage.protocol_id
cls.default_protocol_configuration = ProtocolConfig.from_json(
yaml.safe_load(open(Path(AEA_DIR, "protocols", "default", "protocol.yaml")))
)
cls.default_protocol = Protocol(
default_protocol_id, DefaultSerializer(), cls.default_protocol_configuration
)
cls.resources.protocol_registry.register(
default_protocol_id, cls.default_protocol
)
cls.error_skill = Skill.from_dir(
Path(AEA_DIR, "skills", "error"), cls.aea.context
)
cls.dummy_skill = Skill.from_dir(
Path(CUR_PATH, "data", "dummy_skill"), cls.aea.context
)
cls.resources.add_skill(cls.dummy_skill)
cls.resources.add_skill(cls.error_skill)
cls.expected_message = DefaultMessage(
type=DefaultMessage.Type.BYTES, content=b"hello"
)
cls.expected_message.counterparty = cls.agent_name
cls.t = Thread(target=cls.aea.start)
cls.t.start()
time.sleep(0.5)
cls.aea.outbox.put(
Envelope(
to=cls.agent_name,
sender=cls.agent_name,
protocol_id=default_protocol_id,
message=DefaultSerializer().encode(cls.expected_message),
)
)
def test_initialize_aea_programmatically(self):
"""Test that we can initialize an AEA programmatically."""
time.sleep(0.5)
dummy_skill_id = DUMMY_SKILL_PUBLIC_ID
dummy_behaviour_name = "dummy"
dummy_behaviour = self.aea.resources.behaviour_registry.fetch(
(dummy_skill_id, dummy_behaviour_name)
)
assert dummy_behaviour is not None
assert dummy_behaviour.nb_act_called > 0
dummy_task = DummyTask()
task_id = self.aea.task_manager.enqueue_task(dummy_task)
async_result = self.aea.task_manager.get_task_result(task_id)
expected_dummy_task = async_result.get(2.0)
assert expected_dummy_task.nb_execute_called > 0
dummy_handler_name = "dummy"
dummy_handler = self.aea.resources.handler_registry.fetch(
(dummy_skill_id, dummy_handler_name)
)
dummy_handler_alt = self.aea.resources.handler_registry.fetch_by_protocol_and_skill(
DefaultMessage.protocol_id, dummy_skill_id
)
assert dummy_handler == dummy_handler_alt
assert dummy_handler is not None
assert len(dummy_handler.handled_messages) == 1
assert dummy_handler.handled_messages[0] == self.expected_message
@classmethod
def teardown_class(cls):
"""Tear the test down."""
cls.aea.stop()
cls.t.join()
cls.node.stop()
Path(cls.temp).rmdir()
class TestAddBehaviourDynamically:
"""Test that we can add a behaviour dynamically."""
@classmethod
def setup_class(cls):
"""Set the test up."""
agent_name = "MyAgent"
private_key_path = os.path.join(CUR_PATH, "data", "fet_private_key.txt")
wallet = Wallet({FETCHAI: private_key_path})
ledger_apis = LedgerApis({}, FETCHAI)
resources = Resources(str(Path(CUR_PATH, "data", "dummy_aea")))
identity = Identity(agent_name, address=wallet.addresses[FETCHAI])
input_file = tempfile.mktemp()
output_file = tempfile.mktemp()
cls.agent = AEA(
identity,
[StubConnection(input_file, output_file)],
wallet,
ledger_apis,
resources,
is_programmatic=False,
)
cls.t = Thread(target=cls.agent.start)
cls.t.start()
time.sleep(1.0)
def test_add_behaviour_dynamically(self):
"""Test the dynamic registration of a behaviour."""
dummy_skill_id = PublicId("dummy_author", "dummy", "0.1.0")
dummy_skill = self.agent.resources.get_skill(dummy_skill_id)
assert dummy_skill is not None
new_behaviour = DummyBehaviour(
name="dummy2", skill_context=dummy_skill.skill_context
)
dummy_skill.skill_context.new_behaviours.put(new_behaviour)
time.sleep(1.0)
assert new_behaviour.nb_act_called > 0
assert (
len(self.agent.resources.behaviour_registry.fetch_by_skill(dummy_skill_id))
== 2
)
@classmethod
def teardown_class(cls):
"""Tear the class down."""
cls.agent.stop()
cls.t.join()
|
director.py
|
# packages
import os
import sys
import json
import time
import requests
import datetime
import argparse
import sseclient
import numpy as np
import multiprocessing as mpr
# plotting
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import cm
from matplotlib.colors import Normalize
# set matplotlib backend
matplotlib.use('TkAgg')
# project
import heatmap.helpers as hlp
import heatmap.miniclasses as mcl
class Director():
"""
Handles all API interfacing, including fetching sensors list and updating them.
Imports room layout and calculates euclidean distance maps during initialisation.
When new event data arrives in stream, delegate to the correct sensor for update.
"""
def __init__(self, username='', password='', project_id='', api_url_base='', t_range=[0, 40], resolution=5, cache_dir='/tmp/', pickle_id='hmap_'):
"""
Initialise Director class.
Parameters
----------
username : str
DT Studio service account key.
password : str
DT Studio service account secret.
project_id : str
DT Studio project identifier.
api_url_base : str
Endpoint for API.
t_range : [float, float]
Temperature range [min, max] used in visualization.
resolution : int
Number of points per meter in heatmap grid.
cache_dir : str
Absolute path to directory used for caching distance maps.
pickle_id : str
Identifier used for files cached in cache_dir.
"""
# give to self
self.username = username
self.password = password
self.project_id = project_id
self.api_url_base = api_url_base
self.t_range = t_range
self.resolution = resolution
self.cache_dir = cache_dir
self.pickle_id = pickle_id
# variables
self.last_update = -1
self.sample = False
self.cc = 0
# set stream endpoint
self.stream_endpoint = "{}/projects/{}/devices:stream".format(self.api_url_base, self.project_id)
# parse system arguments
self.__parse_sysargs()
# set history- and streaming filters
self.__set_filters()
# inherit rooms layout
self.__decode_json_layout()
# get limits for x- and y- axes
self.__generate_bounding_box()
# generate distance map for each sensor
if self.args['debug']:
self.__euclidean_map_debug()
else:
self.__euclidean_map_threaded()
# spawn heatmap
self.heatmap = np.zeros(shape=self.X.shape)
# check if sample is set
if self.sample:
print('\nUsing sample layout. No historic- of streaming data will be used.')
print('For this, provide a layout using the --layout argument.')
self.update_heatmap()
self.plot_heatmap(update_time='Sample Layout', show=True)
sys.exit()
def __parse_sysargs(self):
"""
Parse for command line arguments.
"""
# create parser object
parser = argparse.ArgumentParser(description='Heatmap generation on Stream and Event History.')
# get UTC time now
now = (datetime.datetime.utcnow().replace(microsecond=0)).isoformat() + 'Z'
# general arguments
parser.add_argument('--layout', metavar='', help='Json file with room layout.', required=False)
parser.add_argument('--starttime', metavar='', help='Event history UTC starttime [YYYY-MM-DDTHH:MM:SSZ].', required=False, default=now)
parser.add_argument('--endtime', metavar='', help='Event history UTC endtime [YYYY-MM-DDTHH:MM:SSZ].', required=False, default=now)
parser.add_argument('--timestep', metavar='', help='Heatmap update period.', required=False, default=3600, type=int)
# boolean flags
parser.add_argument('--no-plot', action='store_true', help='Suppress plots in stream.')
parser.add_argument('--debug', action='store_true', help='Disables multithreading for debug visualization.')
parser.add_argument('--read', action='store_true', help='Import cached distance maps.')
# convert to dictionary
self.args = vars(parser.parse_args())
# set history flag
if now == self.args['starttime']:
self.fetch_history = False
else:
self.fetch_history = True
def __new_event_data(self, event_data, cout=True):
"""
Receive new event_data json and pass it along to the correct room instance.
Parameters
----------
event_data : dictionary
Data json containing new event data.
cout : bool
Will print event information to console if True.
"""
# get id of source sensor
source_id = os.path.basename(event_data['targetName'])
# verify temperature event
if 'temperature' in event_data['data'].keys():
# check if sensor is in this room
for sensor in self.sensors + self.oofs:
if source_id == sensor.sensor_id:
# give data to room
sensor.new_event_data(event_data)
if cout: print('-- New temperature {} for {} at [{}, {}].'.format(event_data['data']['temperature']['value'], source_id, sensor.x, sensor.y))
return True
elif 'objectPresent' in event_data['data']:
# find correct door
for door in self.doors:
if source_id == door.sensor_id:
# give state to door
door.new_event_data(event_data)
if cout: print('-- New door state {} for {} at [{}, {}].'.format(event_data['data']['objectPresent']['state'], source_id, door.x, door.y))
return True
return False
def __check_timestep(self, unixtime):
"""
Check if more time than --timestep has passed since last heatmap update.
Parameters
----------
unixtime : int
Seconds since 01-Jan 1970.
Returns
-------
return : bool
True if time to update heatmap.
False if we're still waiting.
"""
# check time since last update
if self.last_update < 0:
# update time to this event time
self.last_update = unixtime
return False
elif unixtime - self.last_update > self.args['timestep']:
# update timer to this event time
self.last_update = unixtime
return True
def __decode_json_layout(self):
"""
Parse json layout file and spawn related class objects.
"""
# import json to dictionary
if self.args['layout'] != None:
path = self.args['layout']
else:
path = os.path.join(os.path.join(os.path.dirname(__file__), '..'), 'config', 'sample_layout.json')
self.sample = True
jdict = hlp.import_json(path)
# count rooms and doors
n_rooms = len(jdict['rooms'])
n_doors = len(jdict['doors'])
# initialise object lists
self.rooms = [mcl.Room() for i in range(n_rooms)]
self.doors = [mcl.Door() for i in range(n_doors)]
# get rooms in dict
for ri in range(n_rooms):
# isolate room
jdict_room = jdict['rooms'][ri]
# count corners and sensors
n_corners = len(jdict_room['corners'])
n_sensors = len(jdict_room['sensors'])
# adopt name
self.rooms[ri].name = jdict_room['name']
# give room list of corner and sensor objects
self.rooms[ri].corners = [mcl.Corner(x=None, y=None) for i in range(n_corners)]
self.rooms[ri].sensors = [mcl.Sensor(x=None, y=None) for i in range(n_sensors)]
# update corners
for ci in range(n_corners):
# isolate json corner and give to room corner
jdict_corner = jdict_room['corners'][ci]
self.rooms[ri].corners[ci].set_coordinates(x=jdict_corner['x'], y=jdict_corner['y'])
# update sensors
for si in range(n_sensors):
# isolate json sensor and give to room sensor
jdict_sensor = jdict_room['sensors'][si]
self.rooms[ri].sensors[si].post_initialise(jdict_sensor['x'], jdict_sensor['y'], jdict_sensor['sensor_id'], room_number=ri)
# give t0 if exists
if 't0' in jdict_sensor:
self.rooms[ri].sensors[si].t = jdict_sensor['t0']
# get doors in dict
for di in range(n_doors):
# isolate doors
jdict_door = jdict['doors'][di]
# find rooms which door connects
r1 = None
r2 = None
for room in self.rooms:
if room.name == jdict_door['room1']:
r1 = room
if room.name == jdict_door['room2']:
r2 = room
# exit if rooms not found. Error in layout.
if r1 == None or r2 == None:
hlp.print_error('Error in layout. Door [{}] not connected to [{}] and [{}].'.format(jdict_door['name'], jdict_door['room1'], jdict_door['room2']), terminate=True)
# reformat for easier updating
p1 = [jdict_door['p1']['x'], jdict_door['p1']['y']]
p2 = [jdict_door['p2']['x'], jdict_door['p2']['y']]
# give variables to door object
self.doors[di].post_initialise(p1, p2, r1, r2, jdict_door['sensor_id'], di)
# give state if it exists
if 'closed' in jdict_door:
self.doors[di].closed = jdict_door['closed']
# adopt all sensors to self
self.sensors = []
for room in self.rooms:
for sensor in room.sensors:
self.sensors.append(sensor)
self.n_sensors = len(self.sensors)
# get objects of interest in dict
n_oofs = len(jdict['oofs'])
self.oofs = [mcl.Sensor(x=None, y=None) for i in range(n_oofs)]
for i, oof in enumerate(jdict['oofs']):
self.oofs[i].post_initialise(x=oof['x'], y=oof['y'], sensor_id=oof['sensor_id'], room_number=None)
# give t0 if exists
if 't0' in oof:
self.oofs[i].t = oof['t0']
def __generate_bounding_box(self):
"""
Set grid dimension limits based on layout corners.
"""
# find limits for x- and y-axis
self.xlim = [0, 0]
self.ylim = [0, 0]
# iterate rooms
for room in self.rooms:
# iterate corners in room:
for c in room.corners:
if c.x < self.xlim[0]:
self.xlim[0] = c.x
if c.x > self.xlim[1]:
self.xlim[1] = c.x
if c.y < self.ylim[0]:
self.ylim[0] = c.y
if c.y > self.ylim[1]:
self.ylim[1] = c.y
# rounding
self.xlim = [int(np.floor(self.xlim[0])), int(np.ceil(self.xlim[1]))]
self.ylim = [int(np.floor(self.ylim[0])), int(np.ceil(self.ylim[1]))]
# set maximum dimension for any axis
self.maxdim = max(self.xlim[1]-self.xlim[0], self.ylim[1]-self.ylim[0])
# generate interpolation axes
self.x_interp = np.linspace(self.xlim[0], self.xlim[1], int(self.resolution*(self.xlim[1]-self.xlim[0])+0.5))
self.y_interp = np.linspace(self.ylim[0], self.ylim[1], int(self.resolution*(self.ylim[1]-self.ylim[0])+0.5))
# convert to compatible grid
self.X, self.Y = np.meshgrid(self.x_interp, self.y_interp)
def __populate_grid(self, D, N, M, corner, room):
"""
Scan matrix and populate with euclidean distance for cells in line of sight of corner.
Parameters
----------
D : 2d ndarray
Matrix to be populated.
corner : object
Corner Point object for which we check line of sight.
Returns
-------
D : 2d ndarray
Populated matrix.
"""
# iterate x- and y-axis axis
for x, gx in enumerate(self.x_interp):
for y, gy in enumerate(self.y_interp):
# set active node
node = mcl.Point(self.x_interp[x], self.y_interp[y])
# get distance from corner to node if in line of sight
if not self.__has_direct_los(mcl.Point(corner.x+corner.dx, corner.y+corner.dy), node, room):
continue
d = hlp.euclidean_distance(corner.x, corner.y, node.x, node.y)
# update map if d is a valid value
if d != None:
# add distance from sensor to corner
d += corner.dmin
# update map if less than existing value
if D[y, x] == 0 or d < D[y, x]:
D[y, x] = d
N[y, x] = len(corner.visited_doors)
M[y][x] = [door.number for door in corner.visited_doors]
return D, N, M
def __reset_pathfinding_variables(self):
"""
Reset room, corner and door variables to their initial state.
"""
for room in self.rooms:
for corner in room.corners:
corner.dmin = None
corner.shortest_path = []
corner.visited_doors = []
corner.unused = True
for door in self.doors:
door.unused = True
for of in [door.o1, door.o2]:
of.dmin = None
of.shortest_path = []
of.visited_doors = []
def __euclidean_map_debug(self):
"""
Debug version of the euclidean distance mapping routine.
Does the same as __euclidean_map_threaded(), but without multithreading.
"""
# iterate sensors
for i, sensor in enumerate(self.sensors):
# initialise sensor distance map
sensor.emap = np.zeros(shape=self.X.shape)
# reset room corner distances
self.__reset_pathfinding_variables()
# recursively find shortest distance to all valid corners
path = []
doors = []
_, _ = self.__find_shortest_paths(sensor, self.rooms[sensor.room_number], path, doors, dr=0)
# initialise grids
sensor.D = np.zeros(shape=self.X.shape)
sensor.N = np.zeros(shape=self.X.shape)
sensor.M = [[[] for y in range(self.X.shape[1])] for x in range(self.X.shape[0])]
# populate map from sensor poitn of view
sensor.D, sensor.N, sensor.M = self.__populate_grid(sensor.D, sensor.N, sensor.M, sensor, self.rooms[sensor.room_number])
if 1:
self.plot_debug(start=sensor, grid=[sensor.D])
# populate grid with distances from each corner
for ri, room in enumerate(self.rooms):
# fill from doors
for di, door in enumerate(self.doors):
print('Sensor {}, Room {}, Door {}'.format(i, ri, di))
if door.outbound_room == room:
offset_node = door.outbound_offset
if len(offset_node.shortest_path) > 0:
sensor.D, sensor.N, sensor.M = self.__populate_grid(sensor.D, sensor.N, sensor.M, offset_node, room)
# plot population process
if 1:
self.plot_debug(start=sensor, grid=[sensor.D], paths=offset_node.shortest_path)
# fill from corners
for ci, corner in enumerate(room.corners):
print('Sensor {}, Room {}, Corner {}'.format(i, ri, ci))
if len(corner.shortest_path) > 0:
sensor.D, sensor.N, sensor.M = self.__populate_grid(sensor.D, sensor.N, sensor.M, corner, room)
# plot population process
if 1:
self.plot_debug(start=sensor, grid=[sensor.D], paths=corner.shortest_path)
# plot population result
if 1:
self.plot_debug(start=sensor, grid=[sensor.D])
def __euclidean_map_threaded(self):
"""
Generate euclidean distance map for each sensor.
Applies multiprocessing for a significant reduction in execution time.
"""
def map_process(sensor, i):
"""
Same as __euclidean_map_threaded() but must be isolated in a function for multiprocessing.
Writes populated distance maps to cache_dir so that we only have to do this once. It's slow.
Parameters
----------
sensor : object
Sensor object with coordinates and temperature information.
i : int
Sensor number in list.
"""
self.__reset_pathfinding_variables()
# recursively find shortest path from sensor to all corners
path = []
doors = []
_, _ = self.__find_shortest_paths(sensor, self.rooms[sensor.room_number], path, doors, dr=0)
# initialise grids
sensor.D = np.zeros(shape=self.X.shape)
sensor.N = np.zeros(shape=self.X.shape)
sensor.M = [[[] for y in range(self.X.shape[1])] for x in range(self.X.shape[0])]
# populate map from sensor poitn of view
sensor.D, sensor.N, sensor.M = self.__populate_grid(sensor.D, sensor.N, sensor.M, sensor, self.rooms[sensor.room_number])
# populate grid with distances from each corner
for ri, room in enumerate(self.rooms):
# fill from doors
for di, door in enumerate(self.doors):
print('Populating distance map: sensor {:>3}, room {:>3}, door {:>3}'.format(i, ri, di))
if door.outbound_room == room:
offset_node = door.outbound_offset
if len(offset_node.shortest_path) > 0:
sensor.D, sensor.N, sensor.M = self.__populate_grid(sensor.D, sensor.N, sensor.M, offset_node, room)
# fill from corners
for ci, corner in enumerate(room.corners):
print('Populating distance map: sensor {:>3}, room {:>3}, corner {:>3}'.format(i, ri, ci))
if len(corner.shortest_path) > 0:
sensor.D, sensor.N, sensor.M = self.__populate_grid(sensor.D, sensor.N, sensor.M, corner, room)
# write sensor object to pickle
hlp.write_pickle(sensor, os.path.join(self.cache_dir, self.pickle_id + '{}.pkl'.format(i)), cout=True)
# just skip everything and read from cache if so desired
if self.args['read']:
self.__get_cached_sensors()
return
# initialise variables needed for process
procs = []
nth_proc = 0
# iterate sensors
for i, sensor in enumerate(self.sensors):
# spawn a thread per sensor
proc = mpr.Process(target=map_process, args=(sensor, i))
procs.append(proc)
proc.start()
print('-- Process #{} spawned.'.format(nth_proc))
nth_proc = nth_proc + 1
# wait for each individual process to finish
nth_proc = 0
for proc in procs:
proc.join()
print('-- Process #{} completed.'.format(nth_proc))
nth_proc = nth_proc + 1
# fetch sensors from cache
self.__get_cached_sensors()
def __get_cached_sensors(self):
"""
Exchange self.sensors with sensors cached in cache_dir.
Usually called to recover previously calculated distance maps.
"""
# get files in cache
cache_files = os.listdir(self.cache_dir)
# iterate sensors
for i in range(self.n_sensors):
# keep track of if we found the pickle
found = False
# iterate files in cache
for f in cache_files:
# look for correct pickle
if self.pickle_id + '{}.pkl'.format(i) in f and not found:
# read pickle
pickle_path = os.path.join(self.cache_dir, self.pickle_id + '{}.pkl'.format(i))
pickle_sensor = hlp.read_pickle(pickle_path, cout=True)
# exchange
self.sensors[i].D = pickle_sensor.D
self.sensors[i].N = pickle_sensor.N
self.sensors[i].M = pickle_sensor.M
# found it
found = True
# shouldn't happen, but just in case
if not found:
hlp.print_error('Pickle at [{}] does not exist. Try running without --read.'.format(pickle_path), terminate=True)
def __find_shortest_paths(self, start, room, path, doors, dr):
"""
Recursively find the shortest path from sensor to every corner in layout.
Parameters
----------
start : object
Point object of were we currently have point of view.
room : object
Room object of which room we are currently in.
path : list
List of previously visited points in the current recursive branch.
doors : list
List of doors which have been passed through in the current recursive branch.
dr : float
Total distance traveled from initial sensor start location.
Returns
-------
path : list
List of visited points in the current recursive branch, including current.
doors : list
List of doors which have been passed through in the current recursive branch.
"""
# append path with active node
path.append(start)
# stop if we've been here before on a shorter path
if start.dmin != None and dr > start.dmin:
return path, doors
# as this is currently the sortest path from sensor to active, copy it to active
start.dmin = dr
start.shortest_path = [p for p in path]
start.visited_doors = [d for d in doors]
# find candidate corners for path expansion
corner_candidates = self.__get_corner_candidates(start, room)
door_candidates = self.__get_door_candidates(start, room)
# plot candidates
if 0:
self.plot_debug(start=start, goals=corner_candidates + door_candidates, show=False)
# recursively iterate candidates
for c in corner_candidates:
# calculate distance to candidate
ddr = hlp.euclidean_distance(start.x, start.y, c.x, c.y)
# recursive
path, doors = self.__find_shortest_paths(c, room, path, doors, dr+ddr)
path.pop()
for c in corner_candidates:
c.unused = True
for d in door_candidates:
# calculate distance to candidate
ddr = hlp.euclidean_distance(start.x, start.y, d.inbound_offset.x, d.inbound_offset.y)
# fix offset
d.outbound_offset.dx = 0
d.outbound_offset.dy = 0
# append to doors list
doors.append(d)
# recursive
path, doors = self.__find_shortest_paths(d.outbound_offset, d.outbound_room, path, doors, dr+ddr)
# pop lists as we're back to current depth
path.pop()
doors.pop()
for d in door_candidates:
d.unused = True
return path, doors
def __get_corner_candidates(self, start, room):
"""
Return a list of corners which can be used as next step in recursive __find_shortest_paths().
Parameters
----------
start : object
Point object of were we currently have point of view.
room : object
Room object of which room we are currently in.
Returns
-------
candidates : list
List of corners in room which can be used for next recursive step.
"""
# initialise list
candidates = []
# iterate corners in room
for i, corner in enumerate(room.corners):
# skip visisted
if not corner.unused:
continue
# get offset
dx, dy = self.__corner_offset(room.corners, i)
# check if corner is candidate material
if self.__has_direct_los(mcl.Point(start.x+start.dx, start.y+start.dy), mcl.Point(corner.x+dx, corner.y+dy), room):
corner.dx = dx
corner.dy = dy
candidates.append(corner)
corner.unused = False
return candidates
def __get_door_candidates(self, start, room):
"""
Return a list of doors which can be passed through as next step in recursive __find_shortest_paths().
Parameters
----------
start : object
Point object of were we currently have point of view.
room : object
Room object of which room we are currently in.
Returns
-------
candidates : list
List of doors in room which can be passed through.
"""
# initialise list
candidates = []
# iterate corners in room
for door in self.doors:
# skip visisted
if not door.unused:
continue
# check if we have LOS to either offset
offset_start = mcl.Point(start.x+start.dx, start.y+start.dy)
if self.__has_direct_los(offset_start, door.o1, room):
if room == door.room1:
door.outbound_room = door.room2
else:
door.outbound_room = door.room1
door.inbound_offset = door.o1
door.outbound_offset = door.o2
candidates.append(door)
door.unused = False
elif self.__has_direct_los(offset_start, door.o2, room):
if room == door.room1:
door.outbound_room = door.room2
else:
door.outbound_room = door.room1
door.inbound_offset = door.o2
door.outbound_offset = door.o1
candidates.append(door)
door.unused = False
return candidates
def __has_direct_los(self, start, goal, room):
"""
Check if start has line of sight (LOS) to goal.
Parameters
----------
start : object
Point object used as point of view.
goal : object
Point object we check if we have LOS to.
Returns
-------
return : float
Returns euclidean distance from start to goal if LOS is True.
Returns None if no LOS.
"""
# check if los
for i in range(len(room.corners)):
# two corners define a wall which can be intersected
ir = i + 1
if ir > len(room.corners)-1:
ir = 0
if self.__line_intersects(start, goal, room.corners[i], room.corners[ir]):
return False
return True
def __line_intersects(self, p1, q1, p2, q2):
"""
Determine if two lines intersect in 2-D space.
Parameters
----------
p1 : float
x-coordinate of first line.
q1 : float
y-coordinate of first line.
p2 : float
x-coordinate of second line.
q2 : float
y-coordinate of second line.
Returns
-------
return : bool
True if lines intersect.
False if no intersect.
"""
# find the 4 orientations required for the general and special cases
o1 = self.__orientation(p1, q1, p2)
o2 = self.__orientation(p1, q1, q2)
o3 = self.__orientation(p2, q2, p1)
o4 = self.__orientation(p2, q2, q1)
# General case
if ((o1 != o2) and (o3 != o4)):
return True
# special Cases
# p1 , q1 and p2 are colinear and p2 lies on segment p1q1
if ((o1 == 0) and self.__on_segment(p1, p2, q1)):
return True
# p1 , q1 and q2 are colinear and q2 lies on segment p1q1
if ((o2 == 0) and self.__on_segment(p1, q2, q1)):
return True
# p2 , q2 and p1 are colinear and p1 lies on segment p2q2
if ((o3 == 0) and self.__on_segment(p2, p1, q2)):
return True
# p2 , q2 and q1 are colinear and q1 lies on segment p2q2
if ((o4 == 0) and self.__on_segment(p2, q1, q2)):
return True
# if none of the cases
return False
def __orientation(self, p, q, r):
"""
Find the orientation of an ordered triplet (p,q,r) function.
See https://www.geeksforgeeks.org/orientation-3-ordered-points/amp/ for details.
Parameters
----------
p : float
First triplet index.
q : float
Second triplet index.
r : float
Third triplet index.
Returns
-------
return : int
0 if colinear points
1 if clockwise points
2 if counterclockwise
"""
val = (float(q.y - p.y) * (r.x - q.x)) - (float(q.x - p.x) * (r.y - q.y))
if (val > 0):
# Clockwise orientation
return 1
elif (val < 0):
# Counterclockwise orientation
return 2
else:
# Colinear orientation
return 0
def __on_segment(self, p, q, r):
"""
Determine if q is on the segment p-r.
Parameters
----------
p : float
First triplet index.
q : float
Second triplet index.
r : float
Third triplet index.
Returns
-------
return : bool
True if on segment.
False if not on segment.
"""
if ( (q.x <= max(p.x, r.x)) and (q.x >= min(p.x, r.x)) and
(q.y <= max(p.y, r.y)) and (q.y >= min(p.y, r.y))):
return True
return False
def __corner_offset(self, corners, i, eps=1/1e3):
"""
Generate a tiny offset in corner convex direction.
Parameters
----------
corners : list
List of corner objects in a room.
i : int
Index of current corner of interest in corner list.
eps : float
Distance of offset. Should be small.
Returns
-------
x_offset : float
Offset in the x-direction.
y_offset : float
Offset in the y-direction.
"""
# circular buffer behavior for list edges
il = i - 1
if il < 0:
il = -1
ir = i + 1
if ir > len(corners) - 1:
ir = 0
# isolate corner triplet around corner of interest
pl = corners[il]
pc = corners[i]
pr = corners[ir]
# get complex direction of corner triplet
mx = np.sign(((pc.x - pl.x) + (pc.x - pr.x)) / 2)
my = np.sign(((pc.y - pl.y) + (pc.y - pr.y)) / 2)
# plot for debugging purposes
if 0:
plt.cla()
for room in self.rooms:
xx, yy = room.get_outline()
plt.plot(xx, yy, '-k', linewidth=3)
plt.plot(pl.x, pl.y, 'or')
plt.plot(pr.x, pr.y, 'og')
plt.plot(pc.x, pc.y, 'ok')
plt.plot([pc.x, pl.x], [pc.y, pl.y], 'o-r', linewidth=3)
plt.plot([pc.x, pr.x], [pc.y, pr.y], 'o-g', linewidth=3)
plt.plot([pc.x, pc.x+mx], [pc.y, pc.y+my], 'o--k')
plt.waitforbuttonpress()
# multiply by epsilon
x_offset = mx * eps
y_offset = my * eps
return x_offset, y_offset
def update_heatmap(self):
"""
Using calculated distance- and door maps, update heatmap with temperature data.
"""
# iterate x- and y-axis axis
for x, gx in enumerate(self.x_interp):
for y, gy in enumerate(self.y_interp):
# reset lists
temperatures = []
distances = []
weights = []
# iterate sensors
for room in self.rooms:
for sensor in room.sensors:
los = True
# check if doors in path are closed
if len(sensor.M[y][x]) > 0:
for door in self.doors:
if door.closed and door.number in sensor.M[y][x]:
los = False
# check if distance grid is valid here
if los and sensor.D[y, x] > 0 and sensor.t != None:
temperatures.append(sensor.t)
distances.append(sensor.D[y, x])
# do nothing if no valid distances
if len(distances) == 0:
self.heatmap[y, x] = None
elif len(distances) == 1:
self.heatmap[y, x] = temperatures[0]
else:
# calculate weighted average
weights = (1/(np.array(distances)))**2
temperatures = np.array(temperatures)
# update mesh
self.heatmap[y, x] = sum(weights*temperatures) / sum(weights)
def __set_filters(self):
"""
Set filters for data fetched through API.
"""
# historic events
self.history_params = {
'page_size': 1000,
'start_time': self.args['starttime'],
'end_time': self.args['endtime'],
'event_types': ['temperature', 'objectPresent']
}
# stream events
self.stream_params = {
'event_types': ['temperature', 'objectPresent']
}
def __fetch_event_history(self):
"""
For each sensor in project, request all events since --starttime from API.
"""
# initialise empty event list
self.event_history = []
# combine temperature- and door sensors
project_sensors = [s for s in self.sensors] + [d for d in self.doors] + [o for o in self.oofs]
# iterate devices
for sensor in project_sensors:
# isolate id
sensor_id = sensor.sensor_id
# skip sensors without id
if sensor_id is None:
continue
# some printing
print('-- Getting event history for {}'.format(sensor_id))
# initialise next page token
self.history_params['page_token'] = None
# set endpoints for event history
event_list_url = "{}/projects/{}/devices/{}/events".format(self.api_url_base, self.project_id, sensor_id)
# perform paging
while self.history_params['page_token'] != '':
event_listing = requests.get(event_list_url, auth=(self.username, self.password), params=self.history_params)
event_json = event_listing.json()
if event_listing.status_code < 300:
self.history_params['page_token'] = event_json['nextPageToken']
self.event_history += event_json['events']
else:
print(event_json)
hlp.print_error('Status Code: {}'.format(event_listing.status_code), terminate=True)
if self.history_params['page_token'] != '':
print('\t-- paging')
# sort event history in time
self.event_history.sort(key=hlp.json_sort_key, reverse=False)
def __initialise_stream_temperatures(self):
"""
When initialising a stream, set initial sensor temperature values as last value reported.
"""
# get list of sensors in project
device_list_url = "{}/projects/{}/devices".format(self.api_url_base, self.project_id)
# request
device_listing = requests.get(device_list_url, auth=(self.username, self.password)).json()
for device in device_listing['devices']:
name = os.path.basename(device['name'])
if 'temperature' in device['reported']:
for sensor in self.sensors + self.oofs:
if name == sensor.sensor_id:
sensor.t = device['reported']['temperature']['value']
elif 'objectPresent' in device['reported']:
for door in self.doors:
if name == door.sensor_id:
state = device['reported']['objectPresent']['state']
if state == 'PRESENT':
door.closed = True
else:
door.closed = False
def run_history(self):
"""
Iterate through and calculate occupancy for event history.
"""
# do nothing if starttime not given
if not self.fetch_history:
return
# get list of hsitoric events
self.__fetch_event_history()
# estimate occupancy for history
cc = 0
for i, event_data in enumerate(self.event_history):
cc = hlp.loop_progress(cc, i, len(self.event_history), 25, name='event history')
# serve event to director
_ = self.__new_event_data(event_data, cout=False)
# get event time in unixtime
if 'temperature' in event_data['data']:
update_time = event_data['data']['temperature']['updateTime']
else:
update_time = event_data['data']['objectPresent']['updateTime']
_, unixtime = hlp.convert_event_data_timestamp(update_time)
# plot if timestep has passed
if self.__check_timestep(unixtime):
# update heatmap
self.update_heatmap()
# plot
self.plot_heatmap(update_time=update_time, blocking=True)
def run_stream(self, n_reconnects=5):
"""
Update heatmap for realtime stream data from sensors.
Parameters
----------
n_reconnects : int
Number of reconnection attempts at disconnect.
"""
# do nothing if sample
if self.sample:
return
# if no history were used, get last events from sensors
if not self.fetch_history:
self.__initialise_stream_temperatures()
# cout
print("Listening for events... (press CTRL-C to abort)")
# initial plot
if not self.args['no_plot']:
# initialise heatmap
self.update_heatmap()
# plot
self.plot_heatmap(update_time='t0', blocking=False)
# loop indefinetly
nth_reconnect = 0
while nth_reconnect < n_reconnects:
try:
# reset reconnect counter
nth_reconnect = 0
# get response
response = requests.get(self.stream_endpoint, auth=(self.username, self.password), headers={'accept':'text/event-stream'}, stream=True, params=self.stream_params)
client = sseclient.SSEClient(response)
# listen for events
print('Connected.')
for event in client.events():
event_data = json.loads(event.data)
# check for event data error
if 'error' in event_data:
hlp.print_error('Event with error msg [{}]. Skipping Event.'.format(event_data['error']['message']), terminate=False)
# new data received
event_data = event_data['result']['event']
# serve event to director
served = self.__new_event_data(event_data, cout=True)
# plot progress
if served and not self.args['no_plot']:
# get event time in unixtime
if 'temperature' in event_data['data']:
update_time = event_data['data']['temperature']['updateTime']
else:
update_time = event_data['data']['objectPresent']['updateTime']
_, unixtime = hlp.convert_event_data_timestamp(update_time)
# update heatmap
self.update_heatmap()
# Plot
self.plot_heatmap(update_time=update_time, blocking=False)
# catch errors
# Note: Some VPNs seem to cause quite a lot of packet corruption (?)
except requests.exceptions.ConnectionError:
nth_reconnect += 1
print('Connection lost, reconnection attempt {}/{}'.format(nth_reconnect, n_reconnects))
except requests.exceptions.ChunkedEncodingError:
nth_reconnect += 1
print('An error occured, reconnection attempt {}/{}'.format(nth_reconnect, n_reconnects))
except KeyError:
print('Skipping event due to KeyError.')
print(event_data)
print()
# wait 1s before attempting to reconnect
time.sleep(1)
def initialise_debug_plot(self):
self.fig, self.ax = plt.subplots()
def plot_debug(self, start=None, goals=None, grid=None, paths=None, show=False):
# initialise if if not open
if not hasattr(self, 'ax') or not plt.fignum_exists(self.fig.number):
self.initialise_debug_plot()
# clear
self.ax.clear()
# draw walls
for room in self.rooms:
xx, yy = room.get_outline()
self.ax.plot(xx, yy, '-k', linewidth=3)
# draw doors
for door in self.doors:
self.ax.plot(door.xx, door.yy, '-k', linewidth=14)
if door.closed:
self.ax.plot(door.xx, door.yy, '-', color='orangered', linewidth=8)
else:
self.ax.plot(door.xx, door.yy, '-', color='limegreen', linewidth=8)
# draw goal node
if goals != None and start != None:
for g in goals:
self.ax.plot([start.x, g.x], [start.y, g.y], '.-r', markersize=10)
# draw start node
if start != None:
self.ax.plot(start.x, start.y, 'ok', markersize=10)
# draw paths
if paths != None:
for i in range(len(paths)-1):
p1 = paths[i]
p2 = paths[i+1]
self.ax.plot([p1.x, p2.x], [p1.y, p2.y], '.-r')
# plot grid
if grid != None:
for g in grid:
pc = self.ax.contourf(self.X.T, self.Y.T, g.T, max(1, int(g.max()-g.min())))
pc.set_clim(0, max(self.xlim[1]-self.xlim[0], self.ylim[1]-self.ylim[0]))
plt.gca().set_aspect('equal', adjustable='box')
if show:
plt.show()
else:
plt.waitforbuttonpress()
def initialise_heatmap_plot(self):
self.hfig, self.hax = plt.subplots()
self.hfig.set_figheight(self.ylim[1]-self.ylim[0])
self.hfig.set_figwidth(self.xlim[1]-self.xlim[0])
self.hfig.colorbar(cm.ScalarMappable(norm=Normalize(vmin=self.t_range[0], vmax=self.t_range[1]), cmap=cm.jet))
def plot_heatmap(self, update_time='', blocking=True, show=False):
# initialise if not open
if not hasattr(self, 'hax') or not plt.fignum_exists(self.hfig.number):
self.initialise_heatmap_plot()
# clear
self.hax.clear()
# set title
self.hax.set_title(update_time)
# draw walls
for room in self.rooms:
xx, yy = room.get_outline()
self.hax.plot(xx, yy, '-k', linewidth=3)
# draw doors
for door in self.doors:
self.hax.plot(door.xx, door.yy, '-k', linewidth=14)
if door.closed:
self.hax.plot(door.xx, door.yy, '-', color='orangered', linewidth=8)
else:
self.hax.plot(door.xx, door.yy, '-', color='limegreen', linewidth=8)
# draw sensors
for sensor in self.sensors:
self.hax.plot(sensor.x, sensor.y, 'xk', markersize=10, markeredgewidth=2.5)
# draw heatmap
pc = self.hax.contourf(self.X.T, self.Y.T, self.heatmap.T, (self.t_range[1]-self.t_range[0])*5, cmap=cm.jet)
# pc = self.hax.contourf(self.X.T, self.Y.T, self.heatmap.T, 100, cmap=cm.jet)
pc.set_clim(self.t_range[0], self.t_range[1])
# draw oofs
for oof in self.oofs:
if oof.t is not None:
t = (oof.t-self.t_range[0])/(self.t_range[1]-self.t_range[0])
self.hax.plot(oof.x, oof.y, 'o', color=pc.cmap(t), markeredgecolor='k', markersize=20)
# lock aspect
plt.gca().set_aspect('equal', adjustable='box')
plt.axis('off')
plt.tight_layout()
# plt.xlabel('Distance [m]')
# plt.ylabel('Distance [m]')
if blocking:
if show:
plt.show()
else:
plt.waitforbuttonpress()
else:
plt.pause(0.01)
|
main.py
|
#coding:utf-8
import time
import threading
from html_downLoader import HtmlDownLoader
import ParseAlexa
SLEEP_TIME=1
def threaded_crawler(alexaCallback,max_threads=10):
threads=[]
result={}
crawl_queue=alexaCallback("http://s3.amazonaws.com/alexa-static/top-1m.csv.zip")
dlownloader=HtmlDownLoader()
def process_queue():
while True:
try:
url=crawl_queue.pop()
except IndexError,e:
print e.message
break
else:
html=dlownloader.downLoad(url)
result[url]=html
print "正在爬取%s"%url
while threads or crawl_queue:
while len(threads)<max_threads and crawl_queue:
thread=threading.Thread(target=process_queue)
thread.setDaemon(True)
thread.start()
threads.append(thread)
time.sleep(SLEEP_TIME)
for thread in threads:
if not thread.is_alive():
threads.remove(thread)
# print result
if __name__ == '__main__':
alexaCallback=ParseAlexa.AlexaCallback()
threaded_crawler(alexaCallback)
|
cancel_query.py
|
from django.test import TestCase
from unittest2 import skipIf
from django.db import connection
from time import sleep
from multiprocessing import Process
import json
import re
import os
from sqlshare_rest.util.db import get_backend, is_mssql, is_mysql, is_sqlite3, is_pg
from sqlshare_rest.dao.query import create_query
from sqlshare_rest.test import missing_url
from django.test.utils import override_settings
from django.test.client import Client
from django.core.urlresolvers import reverse
from sqlshare_rest.test.api.base import BaseAPITest
from sqlshare_rest.dao.dataset import create_dataset_from_query
from sqlshare_rest.util.query_queue import process_queue
from sqlshare_rest.models import Query
from testfixtures import LogCapture
@skipIf(missing_url("sqlshare_view_dataset_list") or is_sqlite3(), "SQLShare REST URLs not configured")
@override_settings(MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.RemoteUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
),
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
)
class CancelQueryAPITest(BaseAPITest):
def setUp(self):
super(CancelQueryAPITest, self).setUp()
# Try to cleanup from any previous test runs...
self.remove_users = []
self.client = Client()
def test_cancel(self):
owner = "cancel_user1"
not_owner = "cancel_user2"
self.remove_users.append(owner)
self.remove_users.append(not_owner)
backend = get_backend()
user = backend.get_user(owner)
Query.objects.all().delete()
query_text = None
if is_mssql():
query_text = "select (22) waitfor delay '00:10:30'"
if is_mysql():
query_text = "select sleep(432)"
if is_pg():
query_text = "select pg_sleep(8)"
def queue_runner():
from django import db
db.close_old_connections()
process_queue(verbose=False, thread_count=2, run_once=False)
from django import db
db.close_old_connections()
p = Process(target=queue_runner)
p.start()
# We need to have the server up and running before creating the query...
sleep(2)
query = create_query(owner, query_text)
query_id = query.pk
# This just needs to wait for the process to start. 1 wasn't reliable,
# 2 seemed to be. If this isn't, maybe turn this into a loop waiting
# for the query to show up?
sleep(3)
try:
queries = backend.get_running_queries()
has_query = False
for q in queries:
if q["sql"] == query_text:
has_query = True
self.assertTrue(has_query)
auth_headers = self.get_auth_header_for_username(owner)
bad_auth_headers = self.get_auth_header_for_username(not_owner)
url = reverse("sqlshare_view_query", kwargs={ "id": query.pk })
response = self.client.delete(url, **bad_auth_headers)
has_query = False
queries = backend.get_running_queries()
for q in queries:
if q["sql"] == query_text:
has_query = True
self.assertTrue(has_query)
with LogCapture() as l:
url = reverse("sqlshare_view_query", kwargs={ "id": query.pk })
response = self.client.delete(url, **auth_headers)
self.assertTrue(self._has_log(l, owner, None, 'sqlshare_rest.views.query', 'INFO', 'Cancelled query; ID: %s' % (query.pk)))
# This is another lame timing thing. 1 second wasn't reliably
# long enough on travis.
# 3 seconds also wasn't long enough :( Making it configurable
# from the environment
wait_time = float(os.environ.get("SQLSHARE_KILL_QUERY_WAIT", 1))
sleep(wait_time)
has_query = False
queries = backend.get_running_queries()
for q in queries:
if q["sql"] == query_text:
has_query = True
self.assertFalse(has_query)
q2 = Query.objects.get(pk = query_id)
self.assertTrue(q2.is_finished)
self.assertTrue(q2.has_error)
self.assertTrue(q2.terminated)
self.assertEquals(q2.error, "Query cancelled")
except Exception as ex:
raise
finally:
p.terminate()
p.join()
Query.objects.all().delete()
q2 = create_query(owner, query_text)
url = reverse("sqlshare_view_query", kwargs={ "id": q2.pk })
response = self.client.delete(url, **auth_headers)
q2 = Query.objects.get(pk = q2.pk)
self.assertFalse(q2.is_finished)
self.assertFalse(q2.has_error)
self.assertTrue(q2.terminated)
process_queue(run_once=True, verbose=True)
q2 = Query.objects.get(pk = q2.pk)
self.assertTrue(q2.is_finished)
self.assertTrue(q2.has_error)
self.assertTrue(q2.terminated)
@classmethod
def setUpClass(cls):
super(CancelQueryAPITest, cls).setUpClass()
def _run_query(sql):
cursor = connection.cursor()
try:
cursor.execute(sql)
except Exception as ex:
# Hopefully all of these will fail, so ignore the failures
pass
# This is just an embarrassing list of things to cleanup if something fails.
# It gets added to when something like this blocks one of my test runs...
_run_query("drop login cancel_user1")
|
kernel.py
|
"""Hooks for Jupyter Xonsh Kernel."""
import datetime
import errno
import hashlib
import hmac
import json
import sys
import threading
import uuid
from argparse import ArgumentParser
from collections.abc import Set
from pprint import pformat
import zmq
from xonsh import __version__ as version
from xonsh.built_ins import XSH
from xonsh.commands_cache import predict_true
from xonsh.completer import Completer
from xonsh.main import setup
from zmq.error import ZMQError
from zmq.eventloop import ioloop, zmqstream
MAX_SIZE = 8388608 # 8 Mb
DELIM = b"<IDS|MSG>"
def dump_bytes(*args, **kwargs):
"""Converts an object to JSON and returns the bytes."""
return json.dumps(*args, **kwargs).encode("ascii")
def load_bytes(b):
"""Converts bytes of JSON to an object."""
return json.loads(b.decode("ascii"))
def bind(socket, connection, port):
"""Binds a socket to a port, or a random port if needed. Returns the port."""
if port <= 0:
return socket.bind_to_random_port(connection)
else:
socket.bind(f"{connection}:{port}")
return port
class XonshKernel:
"""Xonsh xernal for Jupyter"""
implementation = "Xonsh " + version
implementation_version = version
language = "xonsh"
language_version = version.split(".")[:3]
banner = "Xonsh - Python-powered, cross-platform shell"
language_info = {
"name": "xonsh",
"version": version,
"pygments_lexer": "xonsh",
"codemirror_mode": "shell",
"mimetype": "text/x-sh",
"file_extension": ".xsh",
}
signature_schemes = {"hmac-sha256": hashlib.sha256}
def __init__(self, debug_level=0, session_id=None, config=None, **kwargs):
"""
Parameters
----------
debug_level : int, optional
Integer from 0 (no debugging) to 3 (all debugging), default: 0.
session_id : str or None, optional
Unique string id representing the kernel session. If None, this will
be replaced with a random UUID.
config : dict or None, optional
Configuration dictionary to start server with. BY default will
search the command line for options (if given) or use default
configuration.
"""
self.debug_level = debug_level
self.session_id = str(uuid.uuid4()) if session_id is None else session_id
self._parser = None
self.config = self.make_default_config() if config is None else config
self.exiting = False
self.execution_count = 1
self.completer = Completer()
@property
def parser(self):
if self._parser is None:
p = ArgumentParser("jupyter_kerenel")
p.add_argument("-f", dest="config_file", default=None)
self._parser = p
return self._parser
def make_default_config(self):
"""Provides default configuration"""
ns, unknown = self.parser.parse_known_args(sys.argv)
if ns.config_file is None:
self.dprint(1, "Starting xonsh kernel with default args...")
config = {
"control_port": 0,
"hb_port": 0,
"iopub_port": 0,
"ip": "127.0.0.1",
"key": str(uuid.uuid4()),
"shell_port": 0,
"signature_scheme": "hmac-sha256",
"stdin_port": 0,
"transport": "tcp",
}
else:
self.dprint(1, "Loading simple_kernel with args:", sys.argv)
self.dprint(1, f"Reading config file {ns.config_file!r}...")
with open(ns.config_file) as f:
config = json.load(f)
return config
def iopub_handler(self, message):
"""Handles iopub requests."""
self.dprint(2, "iopub received:", message)
def control_handler(self, wire_message):
"""Handles control requests"""
self.dprint(1, "control received:", wire_message)
identities, msg = self.deserialize_wire_message(wire_message)
if msg["header"]["msg_type"] == "shutdown_request":
self.shutdown()
def stdin_handler(self, message):
self.dprint(2, "stdin received:", message)
def start(self):
"""Starts the server"""
ioloop.install()
connection = self.config["transport"] + "://" + self.config["ip"]
secure_key = self.config["key"].encode()
digestmod = self.signature_schemes[self.config["signature_scheme"]]
self.auth = hmac.HMAC(secure_key, digestmod=digestmod)
# Heartbeat
ctx = zmq.Context()
self.heartbeat_socket = ctx.socket(zmq.REP)
self.config["hb_port"] = bind(
self.heartbeat_socket, connection, self.config["hb_port"]
)
# IOPub/Sub, aslo called SubSocketChannel in IPython sources
self.iopub_socket = ctx.socket(zmq.PUB)
self.config["iopub_port"] = bind(
self.iopub_socket, connection, self.config["iopub_port"]
)
self.iopub_stream = zmqstream.ZMQStream(self.iopub_socket)
self.iopub_stream.on_recv(self.iopub_handler)
# Control
self.control_socket = ctx.socket(zmq.ROUTER)
self.config["control_port"] = bind(
self.control_socket, connection, self.config["control_port"]
)
self.control_stream = zmqstream.ZMQStream(self.control_socket)
self.control_stream.on_recv(self.control_handler)
# Stdin:
self.stdin_socket = ctx.socket(zmq.ROUTER)
self.config["stdin_port"] = bind(
self.stdin_socket, connection, self.config["stdin_port"]
)
self.stdin_stream = zmqstream.ZMQStream(self.stdin_socket)
self.stdin_stream.on_recv(self.stdin_handler)
# Shell
self.shell_socket = ctx.socket(zmq.ROUTER)
self.config["shell_port"] = bind(
self.shell_socket, connection, self.config["shell_port"]
)
self.shell_stream = zmqstream.ZMQStream(self.shell_socket)
self.shell_stream.on_recv(self.shell_handler)
# start up configurtation
self.dprint(2, "Config:", json.dumps(self.config))
self.dprint(1, "Starting loops...")
self.hb_thread = threading.Thread(target=self.heartbeat_loop)
self.hb_thread.daemon = True
self.hb_thread.start()
self.dprint(1, "Ready! Listening...")
ioloop.IOLoop.instance().start()
def shutdown(self):
"""Shutsdown the kernel"""
self.exiting = True
ioloop.IOLoop.instance().stop()
def dprint(self, level, *args, **kwargs):
"""Print but with debug information."""
if level <= self.debug_level:
print("DEBUG" + str(level) + ":", file=sys.__stdout__, *args, **kwargs)
sys.__stdout__.flush()
def sign(self, messages):
"""Sign a message list with a secure signature."""
h = self.auth.copy()
for m in messages:
h.update(m)
return h.hexdigest().encode("ascii")
def new_header(self, message_type):
"""Make a new header"""
return {
"date": datetime.datetime.now().isoformat(),
"msg_id": str(uuid.uuid4()),
"username": "kernel",
"session": self.session_id,
"msg_type": message_type,
"version": "5.0",
}
def send(
self,
stream,
message_type,
content=None,
parent_header=None,
metadata=None,
identities=None,
):
"""Send data to the client via a stream"""
header = self.new_header(message_type)
if content is None:
content = {}
if parent_header is None:
parent_header = {}
if metadata is None:
metadata = {}
messages = list(map(dump_bytes, [header, parent_header, metadata, content]))
signature = self.sign(messages)
parts = [DELIM, signature] + messages
if identities:
parts = identities + parts
self.dprint(3, "send parts:", parts)
stream.send_multipart(parts)
if isinstance(stream, zmqstream.ZMQStream):
stream.flush()
def deserialize_wire_message(self, wire_message):
"""Split the routing prefix and message frames from a message on the wire"""
delim_idx = wire_message.index(DELIM)
identities = wire_message[:delim_idx]
m_signature = wire_message[delim_idx + 1]
msg_frames = wire_message[delim_idx + 2 :]
keys = ("header", "parent_header", "metadata", "content")
m = {k: load_bytes(v) for k, v in zip(keys, msg_frames)}
check_sig = self.sign(msg_frames)
if check_sig != m_signature:
raise ValueError("Signatures do not match")
return identities, m
def run_thread(self, loop, name):
"""Run main thread"""
self.dprint(2, f"Starting loop for {name!r}...")
while not self.exiting:
self.dprint(2, f"{name} Loop!")
try:
loop.start()
except ZMQError as e:
self.dprint(1, f"{name} ZMQError!\n {e}")
if e.errno == errno.EINTR:
continue
else:
raise
except Exception:
self.dprint(2, f"{name} Exception!")
if self.exiting:
break
else:
raise
else:
self.dprint(2, f"{name} Break!")
break
def heartbeat_loop(self):
"""Run heartbeat"""
self.dprint(2, "Starting heartbeat loop...")
while not self.exiting:
self.dprint(3, ".", end="")
try:
zmq.device(zmq.FORWARDER, self.heartbeat_socket, self.heartbeat_socket)
except zmq.ZMQError as e:
if e.errno == errno.EINTR:
continue
else:
raise
else:
break
def shell_handler(self, message):
"""Dispatch shell messages to their handlers"""
self.dprint(1, "received:", message)
identities, msg = self.deserialize_wire_message(message)
handler = getattr(self, "handle_" + msg["header"]["msg_type"], None)
if handler is None:
self.dprint(0, "unknown message type:", msg["header"]["msg_type"])
return
handler(msg, identities)
def handle_execute_request(self, message, identities):
"""Handle execute request messages."""
self.dprint(2, "Xonsh Kernel Executing:", pformat(message["content"]["code"]))
# Start by sending busy signal
content = {"execution_state": "busy"}
self.send(self.iopub_stream, "status", content, parent_header=message["header"])
# confirm the input that we are executing
content = {
"execution_count": self.execution_count,
"code": message["content"]["code"],
}
self.send(
self.iopub_stream, "execute_input", content, parent_header=message["header"]
)
# execute the code
metadata = {
"dependencies_met": True,
"engine": self.session_id,
"status": "ok",
"started": datetime.datetime.now().isoformat(),
}
content = self.do_execute(parent_header=message["header"], **message["content"])
self.send(
self.shell_stream,
"execute_reply",
content,
metadata=metadata,
parent_header=message["header"],
identities=identities,
)
self.execution_count += 1
# once we are done, send a signal that we are idle
content = {"execution_state": "idle"}
self.send(self.iopub_stream, "status", content, parent_header=message["header"])
def do_execute(
self,
code="",
silent=False,
store_history=True,
user_expressions=None,
allow_stdin=False,
parent_header=None,
**kwargs,
):
"""Execute user code."""
if len(code.strip()) == 0:
return {
"status": "ok",
"execution_count": self.execution_count,
"payload": [],
"user_expressions": {},
}
shell = XSH.shell
hist = XSH.history
try:
shell.default(code, self, parent_header)
interrupted = False
except KeyboardInterrupt:
interrupted = True
if interrupted:
return {"status": "abort", "execution_count": self.execution_count}
rtn = 0 if (hist is None or len(hist) == 0) else hist.rtns[-1]
if 0 < rtn:
message = {
"status": "error",
"execution_count": self.execution_count,
"ename": "",
"evalue": str(rtn),
"traceback": [],
}
else:
message = {
"status": "ok",
"execution_count": self.execution_count,
"payload": [],
"user_expressions": {},
}
return message
def _respond_in_chunks(self, name, s, chunksize=1024, parent_header=None):
if s is None:
return
n = len(s)
if n == 0:
return
lower = range(0, n, chunksize)
upper = range(chunksize, n + chunksize, chunksize)
for lwr, upr in zip(lower, upper):
response = {"name": name, "text": s[lwr:upr]}
self.send(
self.iopub_socket, "stream", response, parent_header=parent_header
)
def handle_complete_request(self, message, identities):
"""Handles kernel info requests."""
content = self.do_complete(
message["content"]["code"], message["content"]["cursor_pos"]
)
self.send(
self.shell_stream,
"complete_reply",
content,
parent_header=message["header"],
identities=identities,
)
def do_complete(self, code: str, pos: int):
"""Get completions."""
shell = XSH.shell # type: ignore
line_start = code.rfind("\n", 0, pos) + 1
line_stop = code.find("\n", pos)
if line_stop == -1:
line_stop = len(code)
else:
line_stop += 1
line = code[line_start:line_stop]
endidx = pos - line_start
line_ex: str = XSH.aliases.expand_alias(line, endidx) # type: ignore
begidx = line[:endidx].rfind(" ") + 1 if line[:endidx].rfind(" ") >= 0 else 0
prefix = line[begidx:endidx]
expand_offset = len(line_ex) - len(line)
multiline_text = code
cursor_index = pos
if line != line_ex:
multiline_text = (
multiline_text[:line_start] + line_ex + multiline_text[line_stop:]
)
cursor_index += expand_offset
rtn, _ = self.completer.complete(
prefix,
line_ex,
begidx + expand_offset,
endidx + expand_offset,
shell.ctx,
multiline_text=multiline_text,
cursor_index=cursor_index,
)
if isinstance(rtn, Set):
rtn = list(rtn)
message = {
"matches": rtn,
"cursor_start": begidx,
"cursor_end": endidx,
"metadata": {},
"status": "ok",
}
return message
def handle_kernel_info_request(self, message, identities):
"""Handles kernel info requests."""
content = {
"protocol_version": "5.0",
"ipython_version": [1, 1, 0, ""],
"language": self.language,
"language_version": self.language_version,
"implementation": self.implementation,
"implementation_version": self.implementation_version,
"language_info": self.language_info,
"banner": self.banner,
}
self.send(
self.shell_stream,
"kernel_info_reply",
content,
parent_header=message["header"],
identities=identities,
)
def main():
setup(
shell_type="jupyter",
env={"PAGER": "cat"},
aliases={"less": "cat"},
xontribs=["coreutils"],
threadable_predictors={"git": predict_true, "man": predict_true},
)
if XSH.commands_cache.is_only_functional_alias("cat"): # type:ignore
# this is needed if the underlying system doesn't have cat
# we supply our own, because we can
XSH.aliases["cat"] = "xonsh-cat" # type:ignore
XSH.env["PAGER"] = "xonsh-cat" # type:ignore
shell = XSH.shell # type:ignore
kernel = shell.kernel = XonshKernel()
kernel.start()
if __name__ == "__main__":
main()
|
test_diskfile.py
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.obj.diskfile"""
import six.moves.cPickle as pickle
import os
import errno
import itertools
from unittest.util import safe_repr
import mock
import unittest
import email
import tempfile
import threading
import uuid
import xattr
import re
import six
from collections import defaultdict
from random import shuffle, randint
from shutil import rmtree
from time import time
from tempfile import mkdtemp
from contextlib import closing, contextmanager
from gzip import GzipFile
import pyeclib.ec_iface
from eventlet import hubs, timeout, tpool
from swift.obj.diskfile import MD5_OF_EMPTY_STRING, update_auditor_status
from test.unit import (mock as unit_mock, temptree, mock_check_drive,
patch_policies, debug_logger, EMPTY_ETAG,
make_timestamp_iter, DEFAULT_TEST_EC_TYPE,
requires_o_tmpfile_support_in_tmp,
encode_frag_archive_bodies, skip_if_no_xattrs)
from swift.obj import diskfile
from swift.common import utils
from swift.common.utils import hash_path, mkdirs, Timestamp, \
encode_timestamps, O_TMPFILE, md5 as _md5
from swift.common import ring
from swift.common.splice import splice
from swift.common.exceptions import DiskFileNotExist, DiskFileQuarantined, \
DiskFileDeviceUnavailable, DiskFileDeleted, DiskFileNotOpen, \
DiskFileError, ReplicationLockTimeout, DiskFileCollision, \
DiskFileExpired, SwiftException, DiskFileNoSpace, \
DiskFileXattrNotSupported, PartitionLockTimeout
from swift.common.storage_policy import (
POLICIES, get_policy_string, StoragePolicy, ECStoragePolicy, REPL_POLICY,
EC_POLICY, PolicyError)
from test.unit.obj.common import write_diskfile
test_policies = [
StoragePolicy(0, name='zero', is_default=True),
ECStoragePolicy(1, name='one', is_default=False,
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4),
]
class md5(object):
def __init__(self, s=b''):
if not isinstance(s, bytes):
s = s.encode('ascii')
self.md = _md5(s, usedforsecurity=False)
def update(self, s=b''):
if not isinstance(s, bytes):
s = s.encode('ascii')
return self.md.update(s)
@property
def hexdigest(self):
return self.md.hexdigest
@property
def digest(self):
return self.md.digest
def find_paths_with_matching_suffixes(needed_matches=2, needed_suffixes=3):
paths = defaultdict(list)
while True:
path = ('a', 'c', uuid.uuid4().hex)
hash_ = hash_path(*path)
suffix = hash_[-3:]
paths[suffix].append(path)
if len(paths) < needed_suffixes:
# in the extreamly unlikely situation where you land the matches
# you need before you get the total suffixes you need - it's
# simpler to just ignore this suffix for now
continue
if len(paths[suffix]) >= needed_matches:
break
return paths, suffix
def _create_test_ring(path, policy):
ring_name = get_policy_string('object', policy)
testgz = os.path.join(path, ring_name + '.ring.gz')
intended_replica2part2dev_id = [
[0, 1, 2, 3, 4, 5, 6],
[1, 2, 3, 0, 5, 6, 4],
[2, 3, 0, 1, 6, 4, 5]]
intended_devs = [
{'id': 0, 'device': 'sda1', 'zone': 0, 'ip': '127.0.0.0',
'port': 6200},
{'id': 1, 'device': 'sda1', 'zone': 1, 'ip': '127.0.0.1',
'port': 6200},
{'id': 2, 'device': 'sda1', 'zone': 2, 'ip': '127.0.0.2',
'port': 6200},
{'id': 3, 'device': 'sda1', 'zone': 4, 'ip': '127.0.0.3',
'port': 6200},
{'id': 4, 'device': 'sda1', 'zone': 5, 'ip': '127.0.0.4',
'port': 6200},
{'id': 5, 'device': 'sda1', 'zone': 6,
'ip': 'fe80::202:b3ff:fe1e:8329', 'port': 6200},
{'id': 6, 'device': 'sda1', 'zone': 7,
'ip': '2001:0db8:85a3:0000:0000:8a2e:0370:7334',
'port': 6200}]
intended_part_shift = 30
intended_reload_time = 15
with closing(GzipFile(testgz, 'wb')) as f:
pickle.dump(
ring.RingData(intended_replica2part2dev_id, intended_devs,
intended_part_shift),
f)
return ring.Ring(path, ring_name=ring_name,
reload_time=intended_reload_time)
def _make_datafilename(timestamp, policy, frag_index=None, durable=False):
if frag_index is None:
frag_index = randint(0, 9)
filename = timestamp.internal
if policy.policy_type == EC_POLICY:
filename += '#%d' % int(frag_index)
if durable:
filename += '#d'
filename += '.data'
return filename
def _make_metafilename(meta_timestamp, ctype_timestamp=None):
filename = meta_timestamp.internal
if ctype_timestamp is not None:
delta = meta_timestamp.raw - ctype_timestamp.raw
filename = '%s-%x' % (filename, delta)
filename += '.meta'
return filename
@patch_policies
class TestDiskFileModuleMethods(unittest.TestCase):
def setUp(self):
skip_if_no_xattrs()
utils.HASH_PATH_SUFFIX = b'endcap'
utils.HASH_PATH_PREFIX = b''
# Setup a test ring per policy (stolen from common/test_ring.py)
self.testdir = tempfile.mkdtemp()
self.devices = os.path.join(self.testdir, 'node')
rmtree(self.testdir, ignore_errors=1)
os.mkdir(self.testdir)
os.mkdir(self.devices)
self.existing_device = 'sda1'
os.mkdir(os.path.join(self.devices, self.existing_device))
self.objects = os.path.join(self.devices, self.existing_device,
'objects')
os.mkdir(self.objects)
self.parts = {}
for part in ['0', '1', '2', '3']:
self.parts[part] = os.path.join(self.objects, part)
os.mkdir(os.path.join(self.objects, part))
self.ring = _create_test_ring(self.testdir, POLICIES.legacy)
self.conf = dict(
swift_dir=self.testdir, devices=self.devices, mount_check='false',
timeout='300', stats_interval='1')
self.logger = debug_logger()
self.df_mgr = diskfile.DiskFileManager(self.conf, logger=self.logger)
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def _create_diskfile(self, policy):
return self.df_mgr.get_diskfile(self.existing_device,
'0', 'a', 'c', 'o',
policy=policy)
def test_relink_paths(self):
target_dir = os.path.join(self.testdir, 'd1')
os.mkdir(target_dir)
target_path = os.path.join(target_dir, 'f1')
with open(target_path, 'w') as fd:
fd.write('junk')
new_target_path = os.path.join(self.testdir, 'd2', 'f1')
created = diskfile.relink_paths(target_path, new_target_path)
self.assertTrue(created)
self.assertTrue(os.path.isfile(new_target_path))
with open(new_target_path, 'r') as fd:
self.assertEqual('junk', fd.read())
def test_relink_paths_makedirs_error(self):
target_dir = os.path.join(self.testdir, 'd1')
os.mkdir(target_dir)
target_path = os.path.join(target_dir, 'f1')
with open(target_path, 'w') as fd:
fd.write('junk')
new_target_path = os.path.join(self.testdir, 'd2', 'f1')
with mock.patch('swift.obj.diskfile.os.makedirs',
side_effect=Exception('oops')):
with self.assertRaises(Exception) as cm:
diskfile.relink_paths(target_path, new_target_path)
self.assertEqual('oops', str(cm.exception))
with self.assertRaises(Exception) as cm:
diskfile.relink_paths(target_path, new_target_path,
ignore_missing=False)
self.assertEqual('oops', str(cm.exception))
def test_relink_paths_makedirs_race(self):
# test two concurrent relinks of the same object hash dir with race
# around makedirs
target_dir = os.path.join(self.testdir, 'd1')
# target dir exists
os.mkdir(target_dir)
target_path_1 = os.path.join(target_dir, 't1.data')
target_path_2 = os.path.join(target_dir, 't2.data')
# new target dir and files do not exist
new_target_dir = os.path.join(self.testdir, 'd2')
new_target_path_1 = os.path.join(new_target_dir, 't1.data')
new_target_path_2 = os.path.join(new_target_dir, 't2.data')
created = []
def write_and_relink(target_path, new_target_path):
with open(target_path, 'w') as fd:
fd.write(target_path)
created.append(diskfile.relink_paths(target_path, new_target_path))
calls = []
orig_makedirs = os.makedirs
def mock_makedirs(path, *args):
calls.append(path)
if len(calls) == 1:
# pretend another process jumps in here and relinks same dirs
write_and_relink(target_path_2, new_target_path_2)
return orig_makedirs(path, *args)
with mock.patch('swift.obj.diskfile.os.makedirs', mock_makedirs):
write_and_relink(target_path_1, new_target_path_1)
self.assertEqual([new_target_dir, new_target_dir], calls)
self.assertTrue(os.path.isfile(new_target_path_1))
with open(new_target_path_1, 'r') as fd:
self.assertEqual(target_path_1, fd.read())
self.assertTrue(os.path.isfile(new_target_path_2))
with open(new_target_path_2, 'r') as fd:
self.assertEqual(target_path_2, fd.read())
self.assertEqual([True, True], created)
def test_relink_paths_object_dir_exists_but_not_dir(self):
target_dir = os.path.join(self.testdir, 'd1')
os.mkdir(target_dir)
target_path = os.path.join(target_dir, 't1.data')
with open(target_path, 'w') as fd:
fd.write(target_path)
# make a file where the new object dir should be
new_target_dir = os.path.join(self.testdir, 'd2')
with open(new_target_dir, 'w') as fd:
fd.write(new_target_dir)
new_target_path = os.path.join(new_target_dir, 't1.data')
with self.assertRaises(OSError) as cm:
diskfile.relink_paths(target_path, new_target_path)
self.assertEqual(errno.ENOTDIR, cm.exception.errno)
# make a symlink to target where the new object dir should be
os.unlink(new_target_dir)
os.symlink(target_path, new_target_dir)
with self.assertRaises(OSError) as cm:
diskfile.relink_paths(target_path, new_target_path)
self.assertEqual(errno.ENOTDIR, cm.exception.errno)
def test_relink_paths_os_link_error(self):
# check relink_paths raises exception from os.link
target_dir = os.path.join(self.testdir, 'd1')
os.mkdir(target_dir)
target_path = os.path.join(target_dir, 'f1')
with open(target_path, 'w') as fd:
fd.write('junk')
new_target_path = os.path.join(self.testdir, 'd2', 'f1')
with mock.patch('swift.obj.diskfile.os.link',
side_effect=OSError(errno.EPERM, 'nope')):
with self.assertRaises(Exception) as cm:
diskfile.relink_paths(target_path, new_target_path)
self.assertEqual(errno.EPERM, cm.exception.errno)
def test_relink_paths_target_path_does_not_exist(self):
# check relink_paths does not raise exception
target_dir = os.path.join(self.testdir, 'd1')
os.mkdir(target_dir)
target_path = os.path.join(target_dir, 'f1')
new_target_path = os.path.join(self.testdir, 'd2', 'f1')
created = diskfile.relink_paths(target_path, new_target_path)
self.assertFalse(os.path.exists(target_path))
self.assertFalse(os.path.exists(new_target_path))
self.assertFalse(created)
with self.assertRaises(OSError) as cm:
diskfile.relink_paths(target_path, new_target_path,
ignore_missing=False)
self.assertEqual(errno.ENOENT, cm.exception.errno)
self.assertFalse(os.path.exists(target_path))
self.assertFalse(os.path.exists(new_target_path))
def test_relink_paths_os_link_race(self):
# test two concurrent relinks of the same object hash dir with race
# around os.link
target_dir = os.path.join(self.testdir, 'd1')
# target dir exists
os.mkdir(target_dir)
target_path = os.path.join(target_dir, 't1.data')
# new target dir and file do not exist
new_target_dir = os.path.join(self.testdir, 'd2')
new_target_path = os.path.join(new_target_dir, 't1.data')
created = []
def write_and_relink(target_path, new_target_path):
with open(target_path, 'w') as fd:
fd.write(target_path)
created.append(diskfile.relink_paths(target_path, new_target_path))
calls = []
orig_link = os.link
def mock_link(path, new_path):
calls.append((path, new_path))
if len(calls) == 1:
# pretend another process jumps in here and links same files
write_and_relink(target_path, new_target_path)
return orig_link(path, new_path)
with mock.patch('swift.obj.diskfile.os.link', mock_link):
write_and_relink(target_path, new_target_path)
self.assertEqual([(target_path, new_target_path)] * 2, calls)
self.assertTrue(os.path.isfile(new_target_path))
with open(new_target_path, 'r') as fd:
self.assertEqual(target_path, fd.read())
with open(target_path, 'r') as fd:
self.assertEqual(target_path, fd.read())
self.assertEqual([True, False], created)
def test_relink_paths_different_file_exists(self):
# check for an exception if a hard link cannot be made because a
# different file already exists at new_target_path
target_dir = os.path.join(self.testdir, 'd1')
# target dir and file exists
os.mkdir(target_dir)
target_path = os.path.join(target_dir, 't1.data')
with open(target_path, 'w') as fd:
fd.write(target_path)
# new target dir and different file exist
new_target_dir = os.path.join(self.testdir, 'd2')
os.mkdir(new_target_dir)
new_target_path = os.path.join(new_target_dir, 't1.data')
with open(new_target_path, 'w') as fd:
fd.write(new_target_path)
with self.assertRaises(OSError) as cm:
diskfile.relink_paths(target_path, new_target_path)
self.assertEqual(errno.EEXIST, cm.exception.errno)
# check nothing got deleted...
self.assertTrue(os.path.isfile(target_path))
with open(target_path, 'r') as fd:
self.assertEqual(target_path, fd.read())
self.assertTrue(os.path.isfile(new_target_path))
with open(new_target_path, 'r') as fd:
self.assertEqual(new_target_path, fd.read())
def test_relink_paths_same_file_exists(self):
# check for no exception if a hard link cannot be made because a link
# to the same file already exists at the path
target_dir = os.path.join(self.testdir, 'd1')
# target dir and file exists
os.mkdir(target_dir)
target_path = os.path.join(target_dir, 't1.data')
with open(target_path, 'w') as fd:
fd.write(target_path)
# new target dir and link to same file exist
new_target_dir = os.path.join(self.testdir, 'd2')
os.mkdir(new_target_dir)
new_target_path = os.path.join(new_target_dir, 't1.data')
os.link(target_path, new_target_path)
with open(new_target_path, 'r') as fd:
self.assertEqual(target_path, fd.read()) # sanity check
# existing link checks ok
created = diskfile.relink_paths(target_path, new_target_path)
with open(new_target_path, 'r') as fd:
self.assertEqual(target_path, fd.read()) # sanity check
self.assertFalse(created)
# now pretend there is an error when checking that the link already
# exists - expect the EEXIST exception to be raised
orig_stat = os.stat
def mocked_stat(path):
if path == new_target_path:
raise OSError(errno.EPERM, 'cannot be sure link exists :(')
return orig_stat(path)
with mock.patch('swift.obj.diskfile.os.stat', mocked_stat):
with self.assertRaises(OSError) as cm:
diskfile.relink_paths(target_path, new_target_path)
self.assertEqual(errno.EEXIST, cm.exception.errno, str(cm.exception))
with open(new_target_path, 'r') as fd:
self.assertEqual(target_path, fd.read()) # sanity check
# ...unless while checking for an existing link the target file is
# found to no longer exists, which is ok
def mocked_stat(path):
if path == target_path:
raise OSError(errno.ENOENT, 'target longer here :)')
return orig_stat(path)
with mock.patch('swift.obj.diskfile.os.stat', mocked_stat):
created = diskfile.relink_paths(target_path, new_target_path)
with open(new_target_path, 'r') as fd:
self.assertEqual(target_path, fd.read()) # sanity check
self.assertFalse(created)
def test_extract_policy(self):
# good path names
pn = 'objects/0/606/1984527ed7ef6247c78606/1401379842.14643.data'
self.assertEqual(diskfile.extract_policy(pn), POLICIES[0])
pn = 'objects-1/0/606/198452b6ef6247c78606/1401379842.14643.data'
self.assertEqual(diskfile.extract_policy(pn), POLICIES[1])
# leading slash
pn = '/objects/0/606/1984527ed7ef6247c78606/1401379842.14643.data'
self.assertEqual(diskfile.extract_policy(pn), POLICIES[0])
pn = '/objects-1/0/606/198452b6ef6247c78606/1401379842.14643.data'
self.assertEqual(diskfile.extract_policy(pn), POLICIES[1])
# full paths
good_path = '/srv/node/sda1/objects-1/1/abc/def/1234.data'
self.assertEqual(diskfile.extract_policy(good_path), POLICIES[1])
good_path = '/srv/node/sda1/objects/1/abc/def/1234.data'
self.assertEqual(diskfile.extract_policy(good_path), POLICIES[0])
# short paths
path = '/srv/node/sda1/objects/1/1234.data'
self.assertEqual(diskfile.extract_policy(path), POLICIES[0])
path = '/srv/node/sda1/objects-1/1/1234.data'
self.assertEqual(diskfile.extract_policy(path), POLICIES[1])
# well formatted but, unknown policy index
pn = 'objects-2/0/606/198427efcff042c78606/1401379842.14643.data'
self.assertIsNone(diskfile.extract_policy(pn))
# malformed path
self.assertIsNone(diskfile.extract_policy(''))
bad_path = '/srv/node/sda1/objects-t/1/abc/def/1234.data'
self.assertIsNone(diskfile.extract_policy(bad_path))
pn = 'XXXX/0/606/1984527ed42b6ef6247c78606/1401379842.14643.data'
self.assertIsNone(diskfile.extract_policy(pn))
bad_path = '/srv/node/sda1/foo-1/1/abc/def/1234.data'
self.assertIsNone(diskfile.extract_policy(bad_path))
bad_path = '/srv/node/sda1/obj1/1/abc/def/1234.data'
self.assertIsNone(diskfile.extract_policy(bad_path))
def test_quarantine_renamer(self):
for policy in POLICIES:
# we use this for convenience, not really about a diskfile layout
df = self._create_diskfile(policy=policy)
mkdirs(df._datadir)
exp_dir = os.path.join(self.devices, 'quarantined',
diskfile.get_data_dir(policy),
os.path.basename(df._datadir))
qbit = os.path.join(df._datadir, 'qbit')
with open(qbit, 'w') as f:
f.write('abc')
to_dir = diskfile.quarantine_renamer(self.devices, qbit)
self.assertEqual(to_dir, exp_dir)
self.assertRaises(OSError, diskfile.quarantine_renamer,
self.devices, qbit)
def test_get_data_dir(self):
self.assertEqual(diskfile.get_data_dir(POLICIES[0]),
diskfile.DATADIR_BASE)
self.assertEqual(diskfile.get_data_dir(POLICIES[1]),
diskfile.DATADIR_BASE + "-1")
self.assertRaises(ValueError, diskfile.get_data_dir, 'junk')
self.assertRaises(ValueError, diskfile.get_data_dir, 99)
def test_get_async_dir(self):
self.assertEqual(diskfile.get_async_dir(POLICIES[0]),
diskfile.ASYNCDIR_BASE)
self.assertEqual(diskfile.get_async_dir(POLICIES[1]),
diskfile.ASYNCDIR_BASE + "-1")
self.assertRaises(ValueError, diskfile.get_async_dir, 'junk')
self.assertRaises(ValueError, diskfile.get_async_dir, 99)
def test_get_tmp_dir(self):
self.assertEqual(diskfile.get_tmp_dir(POLICIES[0]),
diskfile.TMP_BASE)
self.assertEqual(diskfile.get_tmp_dir(POLICIES[1]),
diskfile.TMP_BASE + "-1")
self.assertRaises(ValueError, diskfile.get_tmp_dir, 'junk')
self.assertRaises(ValueError, diskfile.get_tmp_dir, 99)
def test_pickle_async_update_tmp_dir(self):
for policy in POLICIES:
if int(policy) == 0:
tmp_part = 'tmp'
else:
tmp_part = 'tmp-%d' % policy
tmp_path = os.path.join(
self.devices, self.existing_device, tmp_part)
self.assertFalse(os.path.isdir(tmp_path))
pickle_args = (self.existing_device, 'a', 'c', 'o',
'data', 0.0, policy)
os.makedirs(tmp_path)
# now create a async update
self.df_mgr.pickle_async_update(*pickle_args)
# check tempdir
self.assertTrue(os.path.isdir(tmp_path))
def test_get_part_path(self):
# partition passed as 'str'
part_dir = diskfile.get_part_path('/srv/node/sda1', POLICIES[0], '123')
exp_dir = '/srv/node/sda1/objects/123'
self.assertEqual(part_dir, exp_dir)
# partition passed as 'int'
part_dir = diskfile.get_part_path('/srv/node/sdb5', POLICIES[1], 123)
exp_dir = '/srv/node/sdb5/objects-1/123'
self.assertEqual(part_dir, exp_dir)
def test_write_read_metadata(self):
path = os.path.join(self.testdir, str(uuid.uuid4()))
metadata = {'name': '/a/c/o',
'Content-Length': 99,
u'X-Object-Sysmeta-Ec-Frag-Index': 4,
u'X-Object-Meta-Strange': u'should be bytes',
b'X-Object-Meta-x\xff': b'not utf8 \xff',
u'X-Object-Meta-y\xe8': u'not ascii \xe8'}
as_bytes = {b'name': b'/a/c/o',
b'Content-Length': 99,
b'X-Object-Sysmeta-Ec-Frag-Index': 4,
b'X-Object-Meta-Strange': b'should be bytes',
b'X-Object-Meta-x\xff': b'not utf8 \xff',
b'X-Object-Meta-y\xc3\xa8': b'not ascii \xc3\xa8'}
if six.PY2:
as_native = as_bytes
else:
as_native = dict((k.decode('utf-8', 'surrogateescape'),
v if isinstance(v, int) else
v.decode('utf-8', 'surrogateescape'))
for k, v in as_bytes.items())
def check_metadata(expected, typ):
with open(path, 'rb') as fd:
actual = diskfile.read_metadata(fd)
self.assertEqual(expected, actual)
for k, v in actual.items():
self.assertIsInstance(k, typ)
self.assertIsInstance(v, (typ, int))
# Check can write raw bytes
with open(path, 'wb') as fd:
diskfile.write_metadata(fd, as_bytes)
check_metadata(as_native, str)
# Check can write native (with surrogates on py3)
with open(path, 'wb') as fd:
diskfile.write_metadata(fd, as_native)
check_metadata(as_native, str)
# Check can write some crazy mix
with open(path, 'wb') as fd:
diskfile.write_metadata(fd, metadata)
check_metadata(as_native, str)
# mock the read path to check the write path encoded persisted metadata
with mock.patch.object(diskfile, '_decode_metadata', lambda x: x):
check_metadata(as_bytes, bytes)
# simulate a legacy diskfile that might have persisted
# (some) unicode metadata
with mock.patch.object(diskfile, '_encode_metadata', lambda x: x):
with open(path, 'wb') as fd:
diskfile.write_metadata(fd, metadata)
# sanity check: mock read path again to see that we did persist unicode
with mock.patch.object(diskfile, '_decode_metadata', lambda x: x):
with open(path, 'rb') as fd:
actual = diskfile.read_metadata(fd)
for k, v in actual.items():
if isinstance(k, six.text_type) and \
k == u'X-Object-Meta-Strange':
self.assertIsInstance(v, six.text_type)
break
else:
self.fail('Did not find X-Object-Meta-Strange')
# check that read_metadata converts binary_type
check_metadata(as_native, str)
@patch_policies
class TestObjectAuditLocationGenerator(unittest.TestCase):
def _make_file(self, path):
try:
os.makedirs(os.path.dirname(path))
except OSError as err:
if err.errno != errno.EEXIST:
raise
with open(path, 'w'):
pass
def test_audit_location_class(self):
al = diskfile.AuditLocation('abc', '123', '_-_',
policy=POLICIES.legacy)
self.assertEqual(str(al), 'abc')
def test_finding_of_hashdirs(self):
with temptree([]) as tmpdir:
# the good
os.makedirs(os.path.join(tmpdir, "sdp", "objects", "1519", "aca",
"5c1fdc1ffb12e5eaf84edc30d8b67aca"))
os.makedirs(os.path.join(tmpdir, "sdp", "objects", "1519", "aca",
"fdfd184d39080020bc8b487f8a7beaca"))
os.makedirs(os.path.join(tmpdir, "sdp", "objects", "1519", "df2",
"b0fe7af831cc7b1af5bf486b1c841df2"))
os.makedirs(os.path.join(tmpdir, "sdp", "objects", "9720", "ca5",
"4a943bc72c2e647c4675923d58cf4ca5"))
os.makedirs(os.path.join(tmpdir, "sdq", "objects", "3071", "8eb",
"fcd938702024c25fef6c32fef05298eb"))
os.makedirs(os.path.join(tmpdir, "sdp", "objects-1", "9970", "ca5",
"4a943bc72c2e647c4675923d58cf4ca5"))
self._make_file(os.path.join(tmpdir, "sdp", "objects", "1519",
"fed"))
self._make_file(os.path.join(tmpdir, "sdq", "objects", "9876"))
# the empty
os.makedirs(os.path.join(tmpdir, "sdr"))
os.makedirs(os.path.join(tmpdir, "sds", "objects"))
os.makedirs(os.path.join(tmpdir, "sdt", "objects", "9601"))
os.makedirs(os.path.join(tmpdir, "sdu", "objects", "6499", "f80"))
# the irrelevant
os.makedirs(os.path.join(tmpdir, "sdv", "accounts", "77", "421",
"4b8c86149a6d532f4af018578fd9f421"))
os.makedirs(os.path.join(tmpdir, "sdw", "containers", "28", "51e",
"4f9eee668b66c6f0250bfa3c7ab9e51e"))
logger = debug_logger()
loc_generators = []
datadirs = ["objects", "objects-1"]
for datadir in datadirs:
loc_generators.append(
diskfile.object_audit_location_generator(
devices=tmpdir, datadir=datadir, mount_check=False,
logger=logger))
all_locs = itertools.chain(*loc_generators)
locations = [(loc.path, loc.device, loc.partition, loc.policy) for
loc in all_locs]
locations.sort()
expected = \
[(os.path.join(tmpdir, "sdp", "objects-1", "9970", "ca5",
"4a943bc72c2e647c4675923d58cf4ca5"),
"sdp", "9970", POLICIES[1]),
(os.path.join(tmpdir, "sdp", "objects", "1519", "aca",
"5c1fdc1ffb12e5eaf84edc30d8b67aca"),
"sdp", "1519", POLICIES[0]),
(os.path.join(tmpdir, "sdp", "objects", "1519", "aca",
"fdfd184d39080020bc8b487f8a7beaca"),
"sdp", "1519", POLICIES[0]),
(os.path.join(tmpdir, "sdp", "objects", "1519", "df2",
"b0fe7af831cc7b1af5bf486b1c841df2"),
"sdp", "1519", POLICIES[0]),
(os.path.join(tmpdir, "sdp", "objects", "9720", "ca5",
"4a943bc72c2e647c4675923d58cf4ca5"),
"sdp", "9720", POLICIES[0]),
(os.path.join(tmpdir, "sdq", "objects", "3071", "8eb",
"fcd938702024c25fef6c32fef05298eb"),
"sdq", "3071", POLICIES[0]),
]
self.assertEqual(locations, expected)
# Reset status file for next run
for datadir in datadirs:
diskfile.clear_auditor_status(tmpdir, datadir)
# now without a logger
for datadir in datadirs:
loc_generators.append(
diskfile.object_audit_location_generator(
devices=tmpdir, datadir=datadir, mount_check=False,
logger=logger))
all_locs = itertools.chain(*loc_generators)
locations = [(loc.path, loc.device, loc.partition, loc.policy) for
loc in all_locs]
locations.sort()
self.assertEqual(locations, expected)
def test_skipping_unmounted_devices(self):
with temptree([]) as tmpdir, mock_check_drive() as mocks:
mocks['ismount'].side_effect = lambda path: path.endswith('sdp')
os.makedirs(os.path.join(tmpdir, "sdp", "objects",
"2607", "df3",
"ec2871fe724411f91787462f97d30df3"))
os.makedirs(os.path.join(tmpdir, "sdq", "objects",
"9785", "a10",
"4993d582f41be9771505a8d4cb237a10"))
locations = [
(loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=tmpdir, datadir="objects", mount_check=True)]
locations.sort()
self.assertEqual(
locations,
[(os.path.join(tmpdir, "sdp", "objects",
"2607", "df3",
"ec2871fe724411f91787462f97d30df3"),
"sdp", "2607", POLICIES[0])])
# Do it again, this time with a logger.
logger = debug_logger()
locations = [
(loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=tmpdir, datadir="objects", mount_check=True,
logger=logger)]
debug_lines = logger.get_lines_for_level('debug')
self.assertEqual([
'Skipping: %s/sdq is not mounted' % tmpdir,
], debug_lines)
def test_skipping_files(self):
with temptree([]) as tmpdir:
os.makedirs(os.path.join(tmpdir, "sdp", "objects",
"2607", "df3",
"ec2871fe724411f91787462f97d30df3"))
with open(os.path.join(tmpdir, "garbage"), "wb"):
pass
locations = [
(loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=tmpdir, datadir="objects", mount_check=False)]
self.assertEqual(
locations,
[(os.path.join(tmpdir, "sdp", "objects",
"2607", "df3",
"ec2871fe724411f91787462f97d30df3"),
"sdp", "2607", POLICIES[0])])
# Do it again, this time with a logger.
logger = debug_logger('test')
locations = [
(loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=tmpdir, datadir="objects", mount_check=False,
logger=logger)]
debug_lines = logger.get_lines_for_level('debug')
self.assertEqual([
'Skipping: %s/garbage is not a directory' % tmpdir,
], debug_lines)
logger.clear()
with mock_check_drive() as mocks:
mocks['ismount'].side_effect = lambda path: (
False if path.endswith('garbage') else True)
locations = [
(loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=tmpdir, datadir="objects", mount_check=True,
logger=logger)]
debug_lines = logger.get_lines_for_level('debug')
self.assertEqual([
'Skipping: %s/garbage is not mounted' % tmpdir,
], debug_lines)
def test_only_catch_expected_errors(self):
# Crazy exceptions should still escape object_audit_location_generator
# so that errors get logged and a human can see what's going wrong;
# only normal FS corruption should be skipped over silently.
def list_locations(dirname, datadir):
return [(loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=dirname, datadir=datadir, mount_check=False)]
real_listdir = os.listdir
def splode_if_endswith(suffix):
def sploder(path):
if path.endswith(suffix):
raise OSError(errno.EACCES, "don't try to ad-lib")
else:
return real_listdir(path)
return sploder
with temptree([]) as tmpdir:
os.makedirs(os.path.join(tmpdir, "sdf", "objects",
"2607", "b54",
"fe450ec990a88cc4b252b181bab04b54"))
with mock.patch('os.listdir', splode_if_endswith("sdf/objects")):
self.assertRaises(OSError, list_locations, tmpdir, "objects")
with mock.patch('os.listdir', splode_if_endswith("2607")):
self.assertRaises(OSError, list_locations, tmpdir, "objects")
with mock.patch('os.listdir', splode_if_endswith("b54")):
self.assertRaises(OSError, list_locations, tmpdir, "objects")
def test_auditor_status(self):
with temptree([]) as tmpdir:
os.makedirs(os.path.join(tmpdir, "sdf", "objects", "1", "a", "b"))
os.makedirs(os.path.join(tmpdir, "sdf", "objects", "2", "a", "b"))
datadir = "objects"
# Pretend that some time passed between each partition
with mock.patch('os.stat') as mock_stat, \
mock_check_drive(isdir=True):
mock_stat.return_value.st_mtime = time() - 60
# Auditor starts, there are two partitions to check
gen = diskfile.object_audit_location_generator(tmpdir,
datadir,
False)
next(gen)
next(gen)
# Auditor stopped for some reason without raising StopIterator in
# the generator and restarts There is now only one remaining
# partition to check
gen = diskfile.object_audit_location_generator(tmpdir, datadir,
False)
with mock_check_drive(isdir=True):
next(gen)
# There are no more remaining partitions
self.assertRaises(StopIteration, next, gen)
# There are no partitions to check if the auditor restarts another
# time and the status files have not been cleared
gen = diskfile.object_audit_location_generator(tmpdir, datadir,
False)
with mock_check_drive(isdir=True):
self.assertRaises(StopIteration, next, gen)
# Reset status file
diskfile.clear_auditor_status(tmpdir, datadir)
# If the auditor restarts another time, we expect to
# check two partitions again, because the remaining
# partitions were empty and a new listdir was executed
gen = diskfile.object_audit_location_generator(tmpdir, datadir,
False)
with mock_check_drive(isdir=True):
next(gen)
next(gen)
def test_update_auditor_status_throttle(self):
# If there are a lot of nearly empty partitions, the
# update_auditor_status will write the status file many times a second,
# creating some unexpected high write load. This test ensures that the
# status file is only written once a minute.
with temptree([]) as tmpdir:
os.makedirs(os.path.join(tmpdir, "sdf", "objects", "1", "a", "b"))
with mock.patch('swift.obj.diskfile.open') as mock_open:
# File does not exist yet - write expected
update_auditor_status(tmpdir, None, ['42'], "ALL")
self.assertEqual(1, mock_open.call_count)
mock_open.reset_mock()
# File exists, updated just now - no write expected
with mock.patch('os.stat') as mock_stat:
mock_stat.return_value.st_mtime = time()
update_auditor_status(tmpdir, None, ['42'], "ALL")
self.assertEqual(0, mock_open.call_count)
mock_open.reset_mock()
# File exists, updated just now, but empty partition list. This
# is a finalizing call, write expected
with mock.patch('os.stat') as mock_stat:
mock_stat.return_value.st_mtime = time()
update_auditor_status(tmpdir, None, [], "ALL")
self.assertEqual(1, mock_open.call_count)
mock_open.reset_mock()
# File updated more than 60 seconds ago - write expected
with mock.patch('os.stat') as mock_stat:
mock_stat.return_value.st_mtime = time() - 61
update_auditor_status(tmpdir, None, ['42'], "ALL")
self.assertEqual(1, mock_open.call_count)
class TestDiskFileRouter(unittest.TestCase):
@patch_policies(test_policies)
def test_policy(self):
conf = {}
logger = debug_logger('test-' + self.__class__.__name__)
df_router = diskfile.DiskFileRouter(conf, logger)
manager_0 = df_router[POLICIES[0]]
self.assertTrue(isinstance(manager_0, diskfile.DiskFileManager))
manager_1 = df_router[POLICIES[1]]
self.assertTrue(isinstance(manager_1, diskfile.ECDiskFileManager))
# The DiskFileRouter should not have to load the policy again
with mock.patch('swift.common.storage_policy.BaseStoragePolicy.' +
'get_diskfile_manager') as mock_load:
manager_3 = df_router[POLICIES[0]]
mock_load.assert_not_called()
self.assertIs(manager_3, manager_0)
self.assertTrue(isinstance(manager_3, diskfile.DiskFileManager))
def test_invalid_policy_config(self):
# verify that invalid policy diskfile configs are detected when the
# DiskfileRouter is created
bad_policy = StoragePolicy(0, name='zero', is_default=True,
diskfile_module='erasure_coding.fs')
with patch_policies([bad_policy]):
with self.assertRaises(PolicyError) as cm:
diskfile.DiskFileRouter({}, debug_logger())
self.assertIn('Invalid diskfile_module erasure_coding.fs',
str(cm.exception))
bad_policy = ECStoragePolicy(0, name='one', is_default=True,
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4,
diskfile_module='replication.fs')
with patch_policies([bad_policy]):
with self.assertRaises(PolicyError) as cm:
diskfile.DiskFileRouter({}, debug_logger())
self.assertIn('Invalid diskfile_module replication.fs',
str(cm.exception))
bad_policy = StoragePolicy(0, name='zero', is_default=True,
diskfile_module='thin_air.fs')
with patch_policies([bad_policy]):
with self.assertRaises(PolicyError) as cm:
diskfile.DiskFileRouter({}, debug_logger())
self.assertIn('Unable to load diskfile_module thin_air.fs',
str(cm.exception))
class BaseDiskFileTestMixin(object):
"""
Bag of helpers that are useful in the per-policy DiskFile test classes,
plus common setUp and tearDown methods.
"""
# set mgr_cls on subclasses
mgr_cls = None
def setUp(self):
skip_if_no_xattrs()
self.tmpdir = mkdtemp()
self.testdir = os.path.join(
self.tmpdir, 'tmp_test_obj_server_DiskFile')
self.existing_device = 'sda1'
self.existing_device2 = 'sda2'
for policy in POLICIES:
mkdirs(os.path.join(self.testdir, self.existing_device,
diskfile.get_tmp_dir(policy)))
mkdirs(os.path.join(self.testdir, self.existing_device2,
diskfile.get_tmp_dir(policy)))
self._orig_tpool_exc = tpool.execute
tpool.execute = lambda f, *args, **kwargs: f(*args, **kwargs)
self.conf = dict(devices=self.testdir, mount_check='false',
keep_cache_size=2 * 1024, mb_per_sync=1)
self.logger = debug_logger('test-' + self.__class__.__name__)
self.df_mgr = self.mgr_cls(self.conf, self.logger)
self.df_router = diskfile.DiskFileRouter(self.conf, self.logger)
self._ts_iter = (Timestamp(t) for t in
itertools.count(int(time())))
def tearDown(self):
rmtree(self.tmpdir, ignore_errors=True)
tpool.execute = self._orig_tpool_exc
def _manager_mock(self, manager_attribute_name, df=None):
mgr_cls = df._manager.__class__ if df else self.mgr_cls
return '.'.join([
mgr_cls.__module__, mgr_cls.__name__, manager_attribute_name])
def _assertDictContainsSubset(self, subset, dictionary, msg=None):
"""Checks whether dictionary is a superset of subset."""
# This is almost identical to the method in python3.4 version of
# unitest.case.TestCase.assertDictContainsSubset, reproduced here to
# avoid the deprecation warning in the original when using python3.
missing = []
mismatched = []
for key, value in subset.items():
if key not in dictionary:
missing.append(key)
elif value != dictionary[key]:
mismatched.append('%s, expected: %s, actual: %s' %
(safe_repr(key), safe_repr(value),
safe_repr(dictionary[key])))
if not (missing or mismatched):
return
standardMsg = ''
if missing:
standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
missing)
if mismatched:
if standardMsg:
standardMsg += '; '
standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
self.fail(self._formatMessage(msg, standardMsg))
class DiskFileManagerMixin(BaseDiskFileTestMixin):
"""
Abstract test method mixin for concrete test cases - this class
won't get picked up by test runners because it doesn't subclass
unittest.TestCase and doesn't have [Tt]est in the name.
"""
def _get_diskfile(self, policy, frag_index=None, **kwargs):
df_mgr = self.df_router[policy]
return df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy, frag_index=frag_index,
**kwargs)
def test_cleanup_uses_configured_reclaim_age(self):
# verify that the reclaim_age used when cleaning up tombstones is
# either the default or the configured value
def do_test(ts, expect_reclaim):
for policy in POLICIES:
self.df_router = diskfile.DiskFileRouter(
self.conf, self.logger)
df = self._get_diskfile(policy)
df.delete(ts.internal)
tombstone_file = os.path.join(df._datadir, ts.internal + '.ts')
# cleanup_ondisk_files always uses the configured value
df._manager.cleanup_ondisk_files(
os.path.dirname(tombstone_file))
self.assertNotEqual(
expect_reclaim, os.path.exists(tombstone_file))
# reclaim_age not configured so default should be used
do_test(Timestamp(time() - diskfile.DEFAULT_RECLAIM_AGE - 1), True)
do_test(Timestamp(time() - diskfile.DEFAULT_RECLAIM_AGE + 100), False)
# reclaim_age configured value should be used
self.conf['reclaim_age'] = 1000
do_test(Timestamp(time() - diskfile.DEFAULT_RECLAIM_AGE + 100), True)
do_test(Timestamp(time() - 1001), True)
do_test(Timestamp(time() + 100), False)
def _test_get_ondisk_files(self, scenarios, policy,
frag_index=None, **kwargs):
class_under_test = self._get_diskfile(
policy, frag_index=frag_index, **kwargs)
for test in scenarios:
# test => [('filename.ext', '.ext'|False, ...), ...]
expected = {
ext[1:] + '_file': os.path.join(
class_under_test._datadir, filename)
for (filename, ext) in [v[:2] for v in test]
if ext in ('.data', '.meta', '.ts')}
# list(zip(...)) for py3 compatibility (zip is lazy there)
files = list(list(zip(*test))[0])
for _order in ('ordered', 'shuffled', 'shuffled'):
class_under_test = self._get_diskfile(
policy, frag_index=frag_index, **kwargs)
try:
actual = class_under_test._get_ondisk_files(files)
self._assertDictContainsSubset(
expected, actual,
'Expected %s from %s but got %s'
% (expected, files, actual))
except AssertionError as e:
self.fail('%s with files %s' % (str(e), files))
shuffle(files)
def _test_cleanup_ondisk_files(self, scenarios, policy,
reclaim_age=None):
# check that expected files are left in hashdir after cleanup
for test in scenarios:
class_under_test = self.df_router[policy]
# list(zip(...)) for py3 compatibility (zip is lazy there)
files = list(list(zip(*test))[0])
hashdir = os.path.join(self.testdir, str(uuid.uuid4()))
os.mkdir(hashdir)
for fname in files:
open(os.path.join(hashdir, fname), 'w')
expected_after_cleanup = set([f[0] for f in test
if (f[2] if len(f) > 2 else f[1])])
if reclaim_age:
class_under_test.reclaim_age = reclaim_age
class_under_test.cleanup_ondisk_files(hashdir)
else:
with mock.patch('swift.obj.diskfile.time') as mock_time:
# don't reclaim anything
mock_time.time.return_value = 0.0
class_under_test.cleanup_ondisk_files(hashdir)
if expected_after_cleanup:
after_cleanup = set(os.listdir(hashdir))
errmsg = "expected %r, got %r for test %r" % (
sorted(expected_after_cleanup), sorted(after_cleanup), test
)
self.assertEqual(expected_after_cleanup, after_cleanup, errmsg)
else:
self.assertFalse(os.path.exists(hashdir))
def _test_yield_hashes_cleanup(self, scenarios, policy):
# opportunistic test to check that yield_hashes cleans up dir using
# same scenarios as passed to _test_cleanup_ondisk_files_files
for test in scenarios:
class_under_test = self.df_router[policy]
# list(zip(...)) for py3 compatibility (zip is lazy there)
files = list(list(zip(*test))[0])
dev_path = os.path.join(self.testdir, str(uuid.uuid4()))
hashdir = os.path.join(
dev_path, diskfile.get_data_dir(policy),
'0', 'abc', '9373a92d072897b136b3fc06595b4abc')
os.makedirs(hashdir)
for fname in files:
open(os.path.join(hashdir, fname), 'w')
expected_after_cleanup = set([f[0] for f in test
if f[1] or len(f) > 2 and f[2]])
with mock.patch('swift.obj.diskfile.time') as mock_time:
# don't reclaim anything
mock_time.time.return_value = 0.0
mocked = 'swift.obj.diskfile.BaseDiskFileManager.get_dev_path'
with mock.patch(mocked) as mock_path:
mock_path.return_value = dev_path
for _ in class_under_test.yield_hashes(
'ignored', '0', policy, suffixes=['abc']):
# return values are tested in test_yield_hashes_*
pass
if expected_after_cleanup:
after_cleanup = set(os.listdir(hashdir))
errmsg = "expected %r, got %r for test %r" % (
sorted(expected_after_cleanup), sorted(after_cleanup), test
)
self.assertEqual(expected_after_cleanup, after_cleanup, errmsg)
else:
self.assertFalse(os.path.exists(hashdir))
def test_get_ondisk_files_with_empty_dir(self):
files = []
expected = dict(
data_file=None, meta_file=None, ctype_file=None, ts_file=None)
for policy in POLICIES:
for frag_index in (0, None, '13'):
# check manager
df_mgr = self.df_router[policy]
datadir = os.path.join('/srv/node/sdb1/',
diskfile.get_data_dir(policy))
actual = df_mgr.get_ondisk_files(files, datadir)
self._assertDictContainsSubset(expected, actual)
# check diskfile under the hood
df = self._get_diskfile(policy, frag_index=frag_index)
actual = df._get_ondisk_files(files)
self._assertDictContainsSubset(expected, actual)
# check diskfile open
self.assertRaises(DiskFileNotExist, df.open)
def test_get_ondisk_files_with_unexpected_file(self):
unexpected_files = ['junk', 'junk.data', '.junk']
timestamp = next(make_timestamp_iter())
tomb_file = timestamp.internal + '.ts'
for policy in POLICIES:
for unexpected in unexpected_files:
self.logger.clear()
files = [unexpected, tomb_file]
df_mgr = self.df_router[policy]
datadir = os.path.join('/srv/node/sdb1/',
diskfile.get_data_dir(policy))
results = df_mgr.get_ondisk_files(files, datadir)
expected = {'ts_file': os.path.join(datadir, tomb_file)}
self._assertDictContainsSubset(expected, results)
log_lines = df_mgr.logger.get_lines_for_level('warning')
self.assertTrue(
log_lines[0].startswith(
'Unexpected file %s'
% os.path.join(datadir, unexpected)))
def test_get_ondisk_files_no_rsync_temp_file_warning(self):
# get_ondisk_files logs no warnings for rsync temp files
class_under_test = self._get_diskfile(POLICIES[0])
files = [
'.1472017820.44503.data.QBYCYU', # rsync tempfile for a .data
'.total-bs.abcdef', # example of false positive
]
paths = [os.path.join(class_under_test._datadir, f) for f in files]
expected = {'unexpected': paths}
results = class_under_test._get_ondisk_files(files)
for k, v in expected.items():
self.assertEqual(results[k], v)
# no warnings
self.assertFalse(self.logger.get_lines_for_level('warning'))
# but we do get a debug!
lines = self.logger.get_lines_for_level('debug')
for path in paths:
expected_msg = 'Rsync tempfile: %s' % path
self.assertIn(expected_msg, lines)
def test_cleanup_ondisk_files_reclaim_non_data_files(self):
# Each scenario specifies a list of (filename, extension, [survives])
# tuples. If extension is set or 'survives' is True, the filename
# should still be in the dir after cleanup.
much_older = Timestamp(time() - 2000).internal
older = Timestamp(time() - 1001).internal
newer = Timestamp(time() - 900).internal
scenarios = [
[('%s.ts' % older, False, False)],
# fresh tombstone is preserved
[('%s.ts' % newer, '.ts', True)],
# tombstone reclaimed despite junk file
[('junk', False, True),
('%s.ts' % much_older, '.ts', False)],
# fresh .meta not reclaimed even if isolated
[('%s.meta' % newer, '.meta')],
# fresh .meta not reclaimed when tombstone is reclaimed
[('%s.meta' % newer, '.meta'),
('%s.ts' % older, False, False)],
# stale isolated .meta is reclaimed
[('%s.meta' % older, False, False)],
# stale .meta is reclaimed along with tombstone
[('%s.meta' % older, False, False),
('%s.ts' % older, False, False)]]
self._test_cleanup_ondisk_files(scenarios, POLICIES.default,
reclaim_age=1000)
def test_construct_dev_path(self):
res_path = self.df_mgr.construct_dev_path('abc')
self.assertEqual(os.path.join(self.df_mgr.devices, 'abc'), res_path)
def test_pickle_async_update(self):
self.df_mgr.logger.increment = mock.MagicMock()
ts = Timestamp(10000.0).internal
with mock.patch('swift.obj.diskfile.write_pickle') as wp:
self.df_mgr.pickle_async_update(self.existing_device,
'a', 'c', 'o',
dict(a=1, b=2), ts, POLICIES[0])
dp = self.df_mgr.construct_dev_path(self.existing_device)
ohash = diskfile.hash_path('a', 'c', 'o')
wp.assert_called_with({'a': 1, 'b': 2},
os.path.join(
dp, diskfile.get_async_dir(POLICIES[0]),
ohash[-3:], ohash + '-' + ts),
os.path.join(dp, 'tmp'))
self.df_mgr.logger.increment.assert_called_with('async_pendings')
def test_object_audit_location_generator(self):
locations = list(
self.df_mgr.object_audit_location_generator(POLICIES[0]))
self.assertEqual(locations, [])
def test_replication_one_per_device_deprecation(self):
conf = dict(**self.conf)
mgr = diskfile.DiskFileManager(conf, self.logger)
self.assertEqual(mgr.replication_concurrency_per_device, 1)
conf = dict(replication_concurrency_per_device='0', **self.conf)
mgr = diskfile.DiskFileManager(conf, self.logger)
self.assertEqual(mgr.replication_concurrency_per_device, 0)
conf = dict(replication_concurrency_per_device='2', **self.conf)
mgr = diskfile.DiskFileManager(conf, self.logger)
self.assertEqual(mgr.replication_concurrency_per_device, 2)
conf = dict(replication_concurrency_per_device=2, **self.conf)
mgr = diskfile.DiskFileManager(conf, self.logger)
self.assertEqual(mgr.replication_concurrency_per_device, 2)
# Check backward compatibility
conf = dict(replication_one_per_device='true', **self.conf)
mgr = diskfile.DiskFileManager(conf, self.logger)
self.assertEqual(mgr.replication_concurrency_per_device, 1)
log_lines = mgr.logger.get_lines_for_level('warning')
self.assertIn('replication_one_per_device is deprecated',
log_lines[-1])
conf = dict(replication_one_per_device='false', **self.conf)
mgr = diskfile.DiskFileManager(conf, self.logger)
self.assertEqual(mgr.replication_concurrency_per_device, 0)
log_lines = mgr.logger.get_lines_for_level('warning')
self.assertIn('replication_one_per_device is deprecated',
log_lines[-1])
# If defined, new parameter has precedence
conf = dict(replication_concurrency_per_device='2',
replication_one_per_device='true', **self.conf)
mgr = diskfile.DiskFileManager(conf, self.logger)
self.assertEqual(mgr.replication_concurrency_per_device, 2)
log_lines = mgr.logger.get_lines_for_level('warning')
self.assertIn('replication_one_per_device ignored',
log_lines[-1])
conf = dict(replication_concurrency_per_device='2',
replication_one_per_device='false', **self.conf)
mgr = diskfile.DiskFileManager(conf, self.logger)
self.assertEqual(mgr.replication_concurrency_per_device, 2)
log_lines = mgr.logger.get_lines_for_level('warning')
self.assertIn('replication_one_per_device ignored',
log_lines[-1])
conf = dict(replication_concurrency_per_device='0',
replication_one_per_device='true', **self.conf)
mgr = diskfile.DiskFileManager(conf, self.logger)
self.assertEqual(mgr.replication_concurrency_per_device, 0)
log_lines = mgr.logger.get_lines_for_level('warning')
self.assertIn('replication_one_per_device ignored',
log_lines[-1])
def test_replication_lock_on(self):
# Double check settings
self.df_mgr.replication_concurrency_per_device = 1
self.df_mgr.replication_lock_timeout = 0.1
success = False
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '1'):
with self.assertRaises(ReplicationLockTimeout):
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '2'):
success = True
self.assertFalse(success)
def test_replication_lock_off(self):
# Double check settings
self.df_mgr.replication_concurrency_per_device = 0
self.df_mgr.replication_lock_timeout = 0.1
# 2 locks must succeed
success = False
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '1'):
try:
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '2'):
success = True
except ReplicationLockTimeout as err:
self.fail('Unexpected exception: %s' % err)
self.assertTrue(success)
# 3 locks must succeed
success = False
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '1'):
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '2'):
try:
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '3'):
success = True
except ReplicationLockTimeout as err:
self.fail('Unexpected exception: %s' % err)
self.assertTrue(success)
def test_replication_lock_2(self):
# Double check settings
self.df_mgr.replication_concurrency_per_device = 2
self.df_mgr.replication_lock_timeout = 0.1
# 2 locks with replication_concurrency_per_device=2 must succeed
success = False
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '1'):
try:
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '2'):
success = True
except ReplicationLockTimeout as err:
self.fail('Unexpected exception: %s' % err)
self.assertTrue(success)
# 3 locks with replication_concurrency_per_device=2 must fail
success = False
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '1'):
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '2'):
with self.assertRaises(ReplicationLockTimeout):
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '3'):
success = True
self.assertFalse(success)
def test_replication_lock_another_device_fine(self):
# Double check settings
self.df_mgr.replication_concurrency_per_device = 1
self.df_mgr.replication_lock_timeout = 0.1
success = False
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '1'):
try:
with self.df_mgr.replication_lock(self.existing_device2,
POLICIES.legacy, '2'):
success = True
except ReplicationLockTimeout as err:
self.fail('Unexpected exception: %s' % err)
self.assertTrue(success)
def test_replication_lock_same_partition(self):
# Double check settings
self.df_mgr.replication_concurrency_per_device = 2
self.df_mgr.replication_lock_timeout = 0.1
success = False
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '1'):
with self.assertRaises(PartitionLockTimeout):
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '1'):
success = True
self.assertFalse(success)
def test_partition_lock_same_partition(self):
# Double check settings
self.df_mgr.replication_lock_timeout = 0.1
success = False
with self.df_mgr.partition_lock(self.existing_device,
POLICIES.legacy, '1', name='foo'):
with self.assertRaises(PartitionLockTimeout):
with self.df_mgr.partition_lock(self.existing_device,
POLICIES.legacy, '1',
name='foo'):
success = True
self.assertFalse(success)
def test_partition_lock_same_partition_different_name(self):
# Double check settings
self.df_mgr.replication_lock_timeout = 0.1
success = False
with self.df_mgr.partition_lock(self.existing_device,
POLICIES.legacy, '1', name='foo'):
with self.df_mgr.partition_lock(self.existing_device,
POLICIES.legacy, '1',
name='bar'):
success = True
self.assertTrue(success)
def test_partition_lock_and_replication_lock_same_partition(self):
# Double check settings
self.df_mgr.replication_lock_timeout = 0.1
success = False
with self.df_mgr.partition_lock(self.existing_device,
POLICIES.legacy, '1',
name='replication'):
with self.assertRaises(PartitionLockTimeout):
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '1'):
success = True
self.assertFalse(success)
success = False
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '1'):
with self.assertRaises(PartitionLockTimeout):
with self.df_mgr.partition_lock(self.existing_device,
POLICIES.legacy, '1',
name='replication'):
success = True
self.assertFalse(success)
def test_missing_splice_warning(self):
with mock.patch('swift.common.splice.splice._c_splice', None):
self.conf['splice'] = 'yes'
mgr = diskfile.DiskFileManager(self.conf, logger=self.logger)
warnings = self.logger.get_lines_for_level('warning')
self.assertGreater(len(warnings), 0)
self.assertTrue('splice()' in warnings[-1])
self.assertFalse(mgr.use_splice)
def test_get_diskfile_from_hash_dev_path_fail(self):
self.df_mgr.get_dev_path = mock.MagicMock(return_value=None)
with mock.patch(self._manager_mock('diskfile_cls')), \
mock.patch(self._manager_mock(
'cleanup_ondisk_files')) as cleanup, \
mock.patch('swift.obj.diskfile.read_metadata') as readmeta:
cleanup.return_value = {'files': ['1381679759.90941.data']}
readmeta.return_value = {'name': '/a/c/o'}
self.assertRaises(
DiskFileDeviceUnavailable,
self.df_mgr.get_diskfile_from_hash,
'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0])
def test_get_diskfile_from_hash_not_dir(self):
self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
with mock.patch(self._manager_mock('diskfile_cls')), \
mock.patch(self._manager_mock(
'cleanup_ondisk_files')) as cleanup, \
mock.patch('swift.obj.diskfile.read_metadata') as readmeta, \
mock.patch(self._manager_mock(
'quarantine_renamer')) as quarantine_renamer:
osexc = OSError()
osexc.errno = errno.ENOTDIR
cleanup.side_effect = osexc
readmeta.return_value = {'name': '/a/c/o'}
self.assertRaises(
DiskFileNotExist,
self.df_mgr.get_diskfile_from_hash,
'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0])
quarantine_renamer.assert_called_once_with(
'/srv/dev/',
('/srv/dev/objects/9/900/9a7175077c01a23ade5956b8a2bba900/' +
'made-up-filename'))
def test_get_diskfile_from_hash_no_dir(self):
self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
with mock.patch(self._manager_mock('diskfile_cls')), \
mock.patch(self._manager_mock(
'cleanup_ondisk_files')) as cleanup, \
mock.patch('swift.obj.diskfile.read_metadata') as readmeta:
osexc = OSError()
osexc.errno = errno.ENOENT
cleanup.side_effect = osexc
readmeta.return_value = {'name': '/a/c/o'}
self.assertRaises(
DiskFileNotExist,
self.df_mgr.get_diskfile_from_hash,
'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0])
def test_get_diskfile_from_hash_other_oserror(self):
self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
with mock.patch(self._manager_mock('diskfile_cls')), \
mock.patch(self._manager_mock(
'cleanup_ondisk_files')) as cleanup, \
mock.patch('swift.obj.diskfile.read_metadata') as readmeta:
osexc = OSError()
cleanup.side_effect = osexc
readmeta.return_value = {'name': '/a/c/o'}
self.assertRaises(
OSError,
self.df_mgr.get_diskfile_from_hash,
'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0])
def test_get_diskfile_from_hash_no_actual_files(self):
self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
with mock.patch(self._manager_mock('diskfile_cls')), \
mock.patch(self._manager_mock(
'cleanup_ondisk_files')) as cleanup, \
mock.patch('swift.obj.diskfile.read_metadata') as readmeta:
cleanup.return_value = {'files': []}
readmeta.return_value = {'name': '/a/c/o'}
self.assertRaises(
DiskFileNotExist,
self.df_mgr.get_diskfile_from_hash,
'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0])
def test_get_diskfile_from_hash_read_metadata_problem(self):
self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
with mock.patch(self._manager_mock('diskfile_cls')), \
mock.patch(self._manager_mock(
'cleanup_ondisk_files')) as cleanup, \
mock.patch('swift.obj.diskfile.read_metadata') as readmeta:
cleanup.return_value = {'files': ['1381679759.90941.data']}
readmeta.side_effect = EOFError()
self.assertRaises(
DiskFileNotExist,
self.df_mgr.get_diskfile_from_hash,
'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0])
def test_get_diskfile_from_hash_no_meta_name(self):
self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
with mock.patch(self._manager_mock('diskfile_cls')), \
mock.patch(self._manager_mock(
'cleanup_ondisk_files')) as cleanup, \
mock.patch('swift.obj.diskfile.read_metadata') as readmeta:
cleanup.return_value = {'files': ['1381679759.90941.data']}
readmeta.return_value = {}
try:
self.df_mgr.get_diskfile_from_hash(
'dev', '9', '9a7175077c01a23ade5956b8a2bba900',
POLICIES[0])
except DiskFileNotExist as err:
exc = err
self.assertEqual(str(exc), '')
def test_get_diskfile_from_hash_bad_meta_name(self):
self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
with mock.patch(self._manager_mock('diskfile_cls')), \
mock.patch(self._manager_mock(
'cleanup_ondisk_files')) as cleanup, \
mock.patch('swift.obj.diskfile.read_metadata') as readmeta:
cleanup.return_value = {'files': ['1381679759.90941.data']}
readmeta.return_value = {'name': 'bad'}
try:
self.df_mgr.get_diskfile_from_hash(
'dev', '9', '9a7175077c01a23ade5956b8a2bba900',
POLICIES[0])
except DiskFileNotExist as err:
exc = err
self.assertEqual(str(exc), '')
def test_get_diskfile_from_hash(self):
self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
with mock.patch(self._manager_mock('diskfile_cls')) as dfclass, \
mock.patch(self._manager_mock(
'cleanup_ondisk_files')) as cleanup, \
mock.patch('swift.obj.diskfile.read_metadata') as readmeta:
cleanup.return_value = {'files': ['1381679759.90941.data']}
readmeta.return_value = {'name': '/a/c/o'}
self.df_mgr.get_diskfile_from_hash(
'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0])
dfclass.assert_called_once_with(
self.df_mgr, '/srv/dev/', '9',
'a', 'c', 'o', policy=POLICIES[0])
cleanup.assert_called_once_with(
'/srv/dev/objects/9/900/9a7175077c01a23ade5956b8a2bba900')
readmeta.assert_called_once_with(
'/srv/dev/objects/9/900/9a7175077c01a23ade5956b8a2bba900/'
'1381679759.90941.data')
def test_listdir_enoent(self):
oserror = OSError()
oserror.errno = errno.ENOENT
self.df_mgr.logger.error = mock.MagicMock()
with mock.patch('os.listdir', side_effect=oserror):
self.assertEqual(self.df_mgr._listdir('path'), [])
self.assertEqual(self.df_mgr.logger.error.mock_calls, [])
def test_listdir_other_oserror(self):
oserror = OSError()
self.df_mgr.logger.error = mock.MagicMock()
with mock.patch('os.listdir', side_effect=oserror):
self.assertEqual(self.df_mgr._listdir('path'), [])
self.df_mgr.logger.error.assert_called_once_with(
'ERROR: Skipping %r due to error with listdir attempt: %s',
'path', oserror)
def test_listdir(self):
self.df_mgr.logger.error = mock.MagicMock()
with mock.patch('os.listdir', return_value=['abc', 'def']):
self.assertEqual(self.df_mgr._listdir('path'), ['abc', 'def'])
self.assertEqual(self.df_mgr.logger.error.mock_calls, [])
def test_yield_suffixes_dev_path_fail(self):
self.df_mgr.get_dev_path = mock.MagicMock(return_value=None)
exc = None
try:
list(self.df_mgr.yield_suffixes(self.existing_device, '9', 0))
except DiskFileDeviceUnavailable as err:
exc = err
self.assertEqual(str(exc), '')
def test_yield_suffixes(self):
self.df_mgr._listdir = mock.MagicMock(return_value=[
'abc', 'def', 'ghi', 'abcd', '012'])
dev = self.existing_device
self.assertEqual(
list(self.df_mgr.yield_suffixes(dev, '9', POLICIES[0])),
[(self.testdir + '/' + dev + '/objects/9/abc', 'abc'),
(self.testdir + '/' + dev + '/objects/9/def', 'def'),
(self.testdir + '/' + dev + '/objects/9/012', '012')])
def test_yield_hashes_dev_path_fail(self):
self.df_mgr.get_dev_path = mock.MagicMock(return_value=None)
exc = None
try:
list(self.df_mgr.yield_hashes(self.existing_device, '9',
POLICIES[0]))
except DiskFileDeviceUnavailable as err:
exc = err
self.assertEqual(str(exc), '')
def test_yield_hashes_empty(self):
def _listdir(path):
return []
with mock.patch('os.listdir', _listdir):
self.assertEqual(list(self.df_mgr.yield_hashes(
self.existing_device, '9', POLICIES[0])), [])
def test_yield_hashes_cleans_up_everything(self):
the_time = [1525354555.657585]
def mock_time():
return the_time[0]
with mock.patch('time.time', mock_time):
# Make a couple of (soon-to-be-)expired tombstones
df1 = self.df_mgr.get_diskfile(
self.existing_device, 0, 'a', 'c', 'o1', POLICIES[0])
df1.delete(Timestamp(the_time[0]))
df1_hash = utils.hash_path('a', 'c', 'o1')
df1_suffix = df1_hash[-3:]
df2 = self.df_mgr.get_diskfile(
self.existing_device, 0, 'a', 'c', 'o2', POLICIES[0])
df2.delete(Timestamp(the_time[0] + 1))
df2_hash = utils.hash_path('a', 'c', 'o2')
df2_suffix = df2_hash[-3:]
# sanity checks
self.assertTrue(os.path.exists(os.path.join(
self.testdir, self.existing_device, 'objects', '0',
df1_suffix, df1_hash,
"1525354555.65758.ts")))
self.assertTrue(os.path.exists(os.path.join(
self.testdir, self.existing_device, 'objects', '0',
df2_suffix, df2_hash,
"1525354556.65758.ts")))
# Cache the hashes and expire the tombstones
self.df_mgr.get_hashes(self.existing_device, '0', [], POLICIES[0])
the_time[0] += 2 * self.df_mgr.reclaim_age
hashes = list(self.df_mgr.yield_hashes(
self.existing_device, '0', POLICIES[0]))
self.assertEqual(hashes, [])
# The tombstones are gone
self.assertFalse(os.path.exists(os.path.join(
self.testdir, self.existing_device, 'objects', '0',
df1_suffix, df1_hash,
"1525354555.65758.ts")))
self.assertFalse(os.path.exists(os.path.join(
self.testdir, self.existing_device, 'objects', '0',
df2_suffix, df2_hash,
"1525354556.65758.ts")))
# The empty hash dirs are gone
self.assertFalse(os.path.exists(os.path.join(
self.testdir, self.existing_device, 'objects', '0',
df1_suffix, df1_hash)))
self.assertFalse(os.path.exists(os.path.join(
self.testdir, self.existing_device, 'objects', '0',
df2_suffix, df2_hash)))
# The empty suffix dirs, and partition are still there
self.assertTrue(os.path.isdir(os.path.join(
self.testdir, self.existing_device, 'objects', '0',
df1_suffix)))
self.assertTrue(os.path.isdir(os.path.join(
self.testdir, self.existing_device, 'objects', '0',
df2_suffix)))
# but the suffixes is invalid
part_dir = os.path.join(
self.testdir, self.existing_device, 'objects', '0')
invalidations_file = os.path.join(
part_dir, diskfile.HASH_INVALIDATIONS_FILE)
with open(invalidations_file) as f:
invalids = f.read().splitlines()
self.assertEqual(sorted((df1_suffix, df2_suffix)),
sorted(invalids)) # sanity
# next time get hashes runs
with mock.patch('time.time', mock_time):
hashes = self.df_mgr.get_hashes(
self.existing_device, '0', [], POLICIES[0])
self.assertEqual(hashes, {})
# ... suffixes will get cleanup
self.assertFalse(os.path.exists(os.path.join(
self.testdir, self.existing_device, 'objects', '0',
df1_suffix)))
self.assertFalse(os.path.exists(os.path.join(
self.testdir, self.existing_device, 'objects', '0',
df2_suffix)))
# but really it's not diskfile's jobs to decide if a partition belongs
# on a node or not
self.assertTrue(os.path.isdir(os.path.join(
self.testdir, self.existing_device, 'objects', '0')))
def test_focused_yield_hashes_does_not_clean_up(self):
the_time = [1525354555.657585]
def mock_time():
return the_time[0]
with mock.patch('time.time', mock_time):
df = self.df_mgr.get_diskfile(
self.existing_device, 0, 'a', 'c', 'o', POLICIES[0])
df.delete(Timestamp(the_time[0]))
df_hash = utils.hash_path('a', 'c', 'o')
df_suffix = df_hash[-3:]
# sanity check
self.assertTrue(os.path.exists(os.path.join(
self.testdir, self.existing_device, 'objects', '0',
df_suffix, df_hash,
"1525354555.65758.ts")))
# Expire the tombstone
the_time[0] += 2 * self.df_mgr.reclaim_age
hashes = list(self.df_mgr.yield_hashes(
self.existing_device, '0', POLICIES[0],
suffixes=[df_suffix]))
self.assertEqual(hashes, [])
# The partition dir is still there. Since we didn't visit all the
# suffix dirs, we didn't learn whether or not the partition dir was
# empty.
self.assertTrue(os.path.exists(os.path.join(
self.testdir, self.existing_device, 'objects', '0')))
def test_yield_hashes_empty_suffixes(self):
def _listdir(path):
return []
with mock.patch('os.listdir', _listdir):
self.assertEqual(
list(self.df_mgr.yield_hashes(self.existing_device, '9',
POLICIES[0],
suffixes=['456'])), [])
def _check_yield_hashes(self, policy, suffix_map, expected, **kwargs):
device = self.existing_device
part = '9'
part_path = os.path.join(
self.testdir, device, diskfile.get_data_dir(policy), part)
def _listdir(path):
if path == part_path:
return suffix_map.keys()
for suff, hash_map in suffix_map.items():
if path == os.path.join(part_path, suff):
return hash_map.keys()
for hash_, files in hash_map.items():
if path == os.path.join(part_path, suff, hash_):
return files
self.fail('Unexpected listdir of %r' % path)
expected_items = [
(hash_, timestamps)
for hash_, timestamps in expected.items()]
with mock.patch('os.listdir', _listdir), \
mock.patch('os.unlink'), \
mock.patch('os.rmdir'):
df_mgr = self.df_router[policy]
hash_items = list(df_mgr.yield_hashes(
device, part, policy, **kwargs))
expected = sorted(expected_items)
actual = sorted(hash_items)
# default list diff easiest to debug
self.assertEqual(actual, expected)
def test_yield_hashes_tombstones(self):
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
ts2 = next(ts_iter)
ts3 = next(ts_iter)
suffix_map = {
'27e': {
'1111111111111111111111111111127e': [
ts1.internal + '.ts'],
'2222222222222222222222222222227e': [
ts2.internal + '.ts'],
},
'd41': {
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaad41': []
},
'd98': {},
'00b': {
'3333333333333333333333333333300b': [
ts1.internal + '.ts',
ts2.internal + '.ts',
ts3.internal + '.ts',
]
},
'204': {
'bbbbbbbbbbbbbbbbbbbbbbbbbbbbb204': [
ts3.internal + '.ts',
]
}
}
expected = {
'1111111111111111111111111111127e': {'ts_data': ts1.internal},
'2222222222222222222222222222227e': {'ts_data': ts2.internal},
'3333333333333333333333333333300b': {'ts_data': ts3.internal},
}
for policy in POLICIES:
self._check_yield_hashes(policy, suffix_map, expected,
suffixes=['27e', '00b'])
@patch_policies
class TestDiskFileManager(DiskFileManagerMixin, unittest.TestCase):
mgr_cls = diskfile.DiskFileManager
def test_get_ondisk_files_with_repl_policy(self):
# Each scenario specifies a list of (filename, extension) tuples. If
# extension is set then that filename should be returned by the method
# under test for that extension type.
scenarios = [[('0000000007.00000.data', '.data')],
[('0000000007.00000.ts', '.ts')],
# older tombstone is ignored
[('0000000007.00000.ts', '.ts'),
('0000000006.00000.ts', False)],
# older data is ignored
[('0000000007.00000.data', '.data'),
('0000000006.00000.data', False),
('0000000004.00000.ts', False)],
# newest meta trumps older meta
[('0000000009.00000.meta', '.meta'),
('0000000008.00000.meta', False),
('0000000007.00000.data', '.data'),
('0000000004.00000.ts', False)],
# meta older than data is ignored
[('0000000007.00000.data', '.data'),
('0000000006.00000.meta', False),
('0000000004.00000.ts', False)],
# meta without data is ignored
[('0000000007.00000.meta', False, True),
('0000000006.00000.ts', '.ts'),
('0000000004.00000.data', False)],
# tombstone trumps meta and data at same timestamp
[('0000000006.00000.meta', False),
('0000000006.00000.ts', '.ts'),
('0000000006.00000.data', False)],
]
self._test_get_ondisk_files(scenarios, POLICIES[0], None)
self._test_cleanup_ondisk_files(scenarios, POLICIES[0])
self._test_yield_hashes_cleanup(scenarios, POLICIES[0])
def test_get_ondisk_files_with_stray_meta(self):
# get_ondisk_files ignores a stray .meta file
class_under_test = self._get_diskfile(POLICIES[0])
files = ['0000000007.00000.meta']
with mock.patch('swift.obj.diskfile.os.listdir', lambda *args: files):
self.assertRaises(DiskFileNotExist, class_under_test.open)
def test_verify_ondisk_files(self):
# ._verify_ondisk_files should only return False if get_ondisk_files
# has produced a bad set of files due to a bug, so to test it we need
# to probe it directly.
mgr = self.df_router[POLICIES.default]
ok_scenarios = (
{'ts_file': None, 'data_file': None, 'meta_file': None},
{'ts_file': None, 'data_file': 'a_file', 'meta_file': None},
{'ts_file': None, 'data_file': 'a_file', 'meta_file': 'a_file'},
{'ts_file': 'a_file', 'data_file': None, 'meta_file': None},
)
for scenario in ok_scenarios:
self.assertTrue(mgr._verify_ondisk_files(scenario),
'Unexpected result for scenario %s' % scenario)
# construct every possible invalid combination of results
vals = (None, 'a_file')
for ts_file, data_file, meta_file in [
(a, b, c) for a in vals for b in vals for c in vals]:
scenario = {
'ts_file': ts_file,
'data_file': data_file,
'meta_file': meta_file}
if scenario in ok_scenarios:
continue
self.assertFalse(mgr._verify_ondisk_files(scenario),
'Unexpected result for scenario %s' % scenario)
def test_parse_on_disk_filename(self):
mgr = self.df_router[POLICIES.default]
for ts in (Timestamp('1234567890.00001'),
Timestamp('1234567890.00001', offset=17)):
for ext in ('.meta', '.data', '.ts'):
fname = '%s%s' % (ts.internal, ext)
info = mgr.parse_on_disk_filename(fname, POLICIES.default)
self.assertEqual(ts, info['timestamp'])
self.assertEqual(ext, info['ext'])
def test_parse_on_disk_filename_errors(self):
mgr = self.df_router[POLICIES.default]
with self.assertRaises(DiskFileError) as cm:
mgr.parse_on_disk_filename('junk', POLICIES.default)
self.assertEqual("Invalid Timestamp value in filename 'junk'",
str(cm.exception))
def test_cleanup_ondisk_files_reclaim_with_data_files(self):
# Each scenario specifies a list of (filename, extension, [survives])
# tuples. If extension is set or 'survives' is True, the filename
# should still be in the dir after cleanup.
much_older = Timestamp(time() - 2000).internal
older = Timestamp(time() - 1001).internal
newer = Timestamp(time() - 900).internal
scenarios = [
# .data files are not reclaimed, ever
[('%s.data' % older, '.data', True)],
[('%s.data' % newer, '.data', True)],
# ... and we could have a mixture of fresh and stale .data
[('%s.data' % newer, '.data', True),
('%s.data' % older, False, False)],
# tombstone reclaimed despite newer data
[('%s.data' % newer, '.data', True),
('%s.data' % older, False, False),
('%s.ts' % much_older, '.ts', False)],
# .meta not reclaimed if there is a .data file
[('%s.meta' % older, '.meta'),
('%s.data' % much_older, '.data')]]
self._test_cleanup_ondisk_files(scenarios, POLICIES.default,
reclaim_age=1000)
def test_yield_hashes(self):
old_ts = '1383180000.12345'
fresh_ts = Timestamp(time() - 10).internal
fresher_ts = Timestamp(time() - 1).internal
suffix_map = {
'abc': {
'9373a92d072897b136b3fc06595b4abc': [
fresh_ts + '.ts'],
},
'456': {
'9373a92d072897b136b3fc06595b0456': [
old_ts + '.data'],
'9373a92d072897b136b3fc06595b7456': [
fresh_ts + '.ts',
fresher_ts + '.data'],
},
'def': {},
}
expected = {
'9373a92d072897b136b3fc06595b4abc': {'ts_data': fresh_ts},
'9373a92d072897b136b3fc06595b0456': {'ts_data': old_ts},
'9373a92d072897b136b3fc06595b7456': {'ts_data': fresher_ts},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected)
def test_yield_hashes_yields_meta_timestamp(self):
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
ts2 = next(ts_iter)
ts3 = next(ts_iter)
suffix_map = {
'abc': {
# only tombstone is yield/sync -able
'9333a92d072897b136b3fc06595b4abc': [
ts1.internal + '.ts',
ts2.internal + '.meta'],
},
'456': {
# only latest metadata timestamp
'9444a92d072897b136b3fc06595b0456': [
ts1.internal + '.data',
ts2.internal + '.meta',
ts3.internal + '.meta'],
# exemplary datadir with .meta
'9555a92d072897b136b3fc06595b7456': [
ts1.internal + '.data',
ts2.internal + '.meta'],
},
}
expected = {
'9333a92d072897b136b3fc06595b4abc':
{'ts_data': ts1},
'9444a92d072897b136b3fc06595b0456':
{'ts_data': ts1, 'ts_meta': ts3},
'9555a92d072897b136b3fc06595b7456':
{'ts_data': ts1, 'ts_meta': ts2},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected)
def test_yield_hashes_yields_content_type_timestamp(self):
hash_ = '9373a92d072897b136b3fc06595b4abc'
ts_iter = make_timestamp_iter()
ts0, ts1, ts2, ts3, ts4 = (next(ts_iter) for _ in range(5))
data_file = ts1.internal + '.data'
# no content-type delta
meta_file = ts2.internal + '.meta'
suffix_map = {'abc': {hash_: [data_file, meta_file]}}
expected = {hash_: {'ts_data': ts1,
'ts_meta': ts2}}
self._check_yield_hashes(POLICIES.default, suffix_map, expected)
# non-zero content-type delta
delta = ts3.raw - ts2.raw
meta_file = '%s-%x.meta' % (ts3.internal, delta)
suffix_map = {'abc': {hash_: [data_file, meta_file]}}
expected = {hash_: {'ts_data': ts1,
'ts_meta': ts3,
'ts_ctype': ts2}}
self._check_yield_hashes(POLICIES.default, suffix_map, expected)
# zero content-type delta
meta_file = '%s+0.meta' % ts3.internal
suffix_map = {'abc': {hash_: [data_file, meta_file]}}
expected = {hash_: {'ts_data': ts1,
'ts_meta': ts3,
'ts_ctype': ts3}}
self._check_yield_hashes(POLICIES.default, suffix_map, expected)
# content-type in second meta file
delta = ts3.raw - ts2.raw
meta_file1 = '%s-%x.meta' % (ts3.internal, delta)
meta_file2 = '%s.meta' % ts4.internal
suffix_map = {'abc': {hash_: [data_file, meta_file1, meta_file2]}}
expected = {hash_: {'ts_data': ts1,
'ts_meta': ts4,
'ts_ctype': ts2}}
self._check_yield_hashes(POLICIES.default, suffix_map, expected)
# obsolete content-type in second meta file, older than data file
delta = ts3.raw - ts0.raw
meta_file1 = '%s-%x.meta' % (ts3.internal, delta)
meta_file2 = '%s.meta' % ts4.internal
suffix_map = {'abc': {hash_: [data_file, meta_file1, meta_file2]}}
expected = {hash_: {'ts_data': ts1,
'ts_meta': ts4}}
self._check_yield_hashes(POLICIES.default, suffix_map, expected)
# obsolete content-type in second meta file, same time as data file
delta = ts3.raw - ts1.raw
meta_file1 = '%s-%x.meta' % (ts3.internal, delta)
meta_file2 = '%s.meta' % ts4.internal
suffix_map = {'abc': {hash_: [data_file, meta_file1, meta_file2]}}
expected = {hash_: {'ts_data': ts1,
'ts_meta': ts4}}
self._check_yield_hashes(POLICIES.default, suffix_map, expected)
def test_yield_hashes_suffix_filter(self):
# test again with limited suffixes
old_ts = '1383180000.12345'
fresh_ts = Timestamp(time() - 10).internal
fresher_ts = Timestamp(time() - 1).internal
suffix_map = {
'abc': {
'9373a92d072897b136b3fc06595b4abc': [
fresh_ts + '.ts'],
},
'456': {
'9373a92d072897b136b3fc06595b0456': [
old_ts + '.data'],
'9373a92d072897b136b3fc06595b7456': [
fresh_ts + '.ts',
fresher_ts + '.data'],
},
'def': {},
}
expected = {
'9373a92d072897b136b3fc06595b0456': {'ts_data': old_ts},
'9373a92d072897b136b3fc06595b7456': {'ts_data': fresher_ts},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
suffixes=['456'])
def test_yield_hashes_fails_with_bad_ondisk_filesets(self):
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
suffix_map = {
'456': {
'9373a92d072897b136b3fc06595b0456': [
ts1.internal + '.data'],
'9373a92d072897b136b3fc06595ba456': [
ts1.internal + '.meta'],
},
}
expected = {
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts1},
}
try:
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
self.fail('Expected AssertionError')
except AssertionError:
pass
def test_check_policy(self):
mock_policy = mock.MagicMock()
mock_policy.policy_type = REPL_POLICY
# sanity, DiskFileManager is ok with REPL_POLICY
diskfile.DiskFileManager.check_policy(mock_policy)
# DiskFileManager raises ValueError with EC_POLICY
mock_policy.policy_type = EC_POLICY
with self.assertRaises(ValueError) as cm:
diskfile.DiskFileManager.check_policy(mock_policy)
self.assertEqual('Invalid policy_type: %s' % EC_POLICY,
str(cm.exception))
@patch_policies(with_ec_default=True)
class TestECDiskFileManager(DiskFileManagerMixin, unittest.TestCase):
mgr_cls = diskfile.ECDiskFileManager
def test_get_ondisk_files_with_ec_policy_and_legacy_durable(self):
# Each scenario specifies a list of (filename, extension, [survives])
# tuples. If extension is set then that filename should be returned by
# the method under test for that extension type. If the optional
# 'survives' is True, the filename should still be in the dir after
# cleanup.
scenarios = [
# highest frag index is chosen by default
[('0000000007.00000.durable', '.durable'),
('0000000007.00000#1.data', '.data'),
('0000000007.00000#0.data', False, True)],
# data older than durable is ignored
[('0000000007.00000.durable', '.durable'),
('0000000007.00000#1.data', '.data'),
('0000000006.00000#1.data', False),
('0000000004.00000.ts', False)],
# data older than durable ignored, even if its only data
[('0000000007.00000.durable', False, False),
('0000000006.00000#1.data', False),
('0000000004.00000.ts', False)],
# newer meta trumps older meta
[('0000000009.00000.meta', '.meta'),
('0000000008.00000.meta', False),
('0000000007.00000.durable', '.durable'),
('0000000007.00000#14.data', '.data'),
('0000000004.00000.ts', False)],
# older meta is ignored
[('0000000007.00000.durable', '.durable'),
('0000000007.00000#14.data', '.data'),
('0000000006.00000.meta', False),
('0000000004.00000.ts', False)],
# tombstone trumps meta, data, durable at older timestamp
[('0000000006.00000.ts', '.ts'),
('0000000005.00000.meta', False),
('0000000004.00000.durable', False),
('0000000004.00000#0.data', False)],
# tombstone trumps meta, data, durable at same timestamp
[('0000000006.00000.meta', False),
('0000000006.00000.ts', '.ts'),
('0000000006.00000.durable', False),
('0000000006.00000#0.data', False)]
]
# these scenarios have same outcome regardless of whether any
# fragment preferences are specified
self._test_get_ondisk_files(scenarios, POLICIES.default,
frag_index=None)
self._test_get_ondisk_files(scenarios, POLICIES.default,
frag_index=None, frag_prefs=[])
self._test_cleanup_ondisk_files(scenarios, POLICIES.default)
self._test_yield_hashes_cleanup(scenarios, POLICIES.default)
# next scenarios have different outcomes dependent on whether a
# frag_prefs parameter is passed to diskfile constructor or not
scenarios = [
# data with no durable is ignored
[('0000000007.00000#0.data', False, True)],
# data newer than tombstone with no durable is ignored
[('0000000007.00000#0.data', False, True),
('0000000006.00000.ts', '.ts', True)],
# data newer than durable is ignored
[('0000000009.00000#2.data', False, True),
('0000000009.00000#1.data', False, True),
('0000000008.00000#3.data', False, True),
('0000000007.00000.durable', '.durable'),
('0000000007.00000#1.data', '.data'),
('0000000007.00000#0.data', False, True)],
# data newer than durable ignored, even if its only data
[('0000000008.00000#1.data', False, True),
('0000000007.00000.durable', False, False)],
# missing durable invalidates data, older meta deleted
[('0000000007.00000.meta', False, True),
('0000000006.00000#0.data', False, True),
('0000000005.00000.meta', False, False),
('0000000004.00000#1.data', False, True)]]
self._test_get_ondisk_files(scenarios, POLICIES.default,
frag_index=None)
self._test_cleanup_ondisk_files(scenarios, POLICIES.default)
scenarios = [
# data with no durable is chosen
[('0000000007.00000#0.data', '.data', True)],
# data newer than tombstone with no durable is chosen
[('0000000007.00000#0.data', '.data', True),
('0000000006.00000.ts', False, True)],
# data newer than durable is chosen, older data preserved
[('0000000009.00000#2.data', '.data', True),
('0000000009.00000#1.data', False, True),
('0000000008.00000#3.data', False, True),
('0000000007.00000.durable', False, True),
('0000000007.00000#1.data', False, True),
('0000000007.00000#0.data', False, True)],
# data newer than durable chosen when its only data
[('0000000008.00000#1.data', '.data', True),
('0000000007.00000.durable', False, False)],
# data plus meta chosen without durable, older meta deleted
[('0000000007.00000.meta', '.meta', True),
('0000000006.00000#0.data', '.data', True),
('0000000005.00000.meta', False, False),
('0000000004.00000#1.data', False, True)]]
self._test_get_ondisk_files(scenarios, POLICIES.default,
frag_index=None, frag_prefs=[])
self._test_cleanup_ondisk_files(scenarios, POLICIES.default)
def test_get_ondisk_files_with_ec_policy(self):
# Each scenario specifies a list of (filename, extension, [survives])
# tuples. If extension is set then that filename should be returned by
# the method under test for that extension type. If the optional
# 'survives' is True, the filename should still be in the dir after
# cleanup.
scenarios = [[('0000000007.00000.ts', '.ts')],
[('0000000007.00000.ts', '.ts'),
('0000000006.00000.ts', False)],
# highest frag index is chosen by default
[('0000000007.00000#1#d.data', '.data'),
('0000000007.00000#0.data', False, True)],
# data older than durable is ignored
[('0000000007.00000#1#d.data', '.data'),
('0000000006.00000#1.data', False),
('0000000004.00000.ts', False)],
# newer meta trumps older meta
[('0000000009.00000.meta', '.meta'),
('0000000008.00000.meta', False),
('0000000007.00000#14#d.data', '.data'),
('0000000004.00000.ts', False)],
# older meta is ignored
[('0000000007.00000#14#d.data', '.data'),
('0000000006.00000.meta', False),
('0000000004.00000.ts', False)],
# tombstone trumps meta and data at older timestamp
[('0000000006.00000.ts', '.ts'),
('0000000005.00000.meta', False),
('0000000004.00000#0#d.data', False)],
# tombstone trumps meta and data at same timestamp
[('0000000006.00000.meta', False),
('0000000006.00000.ts', '.ts'),
('0000000006.00000#0#d.data', False)],
]
# these scenarios have same outcome regardless of whether any
# fragment preferences are specified
self._test_get_ondisk_files(scenarios, POLICIES.default,
frag_index=None)
self._test_get_ondisk_files(scenarios, POLICIES.default,
frag_index=None, frag_prefs=[])
self._test_cleanup_ondisk_files(scenarios, POLICIES.default)
self._test_yield_hashes_cleanup(scenarios, POLICIES.default)
# next scenarios have different outcomes dependent on whether a
# frag_prefs parameter is passed to diskfile constructor or not
scenarios = [
# non-durable is ignored
[('0000000007.00000#0.data', False, True)],
# non-durable data newer than tombstone is ignored
[('0000000007.00000#0.data', False, True),
('0000000006.00000.ts', '.ts', True)],
# data newer than durable data is ignored
[('0000000009.00000#2.data', False, True),
('0000000009.00000#1.data', False, True),
('0000000008.00000#3.data', False, True),
('0000000007.00000#1#d.data', '.data'),
('0000000007.00000#0#d.data', False, True)],
# non-durable data ignored, older meta deleted
[('0000000007.00000.meta', False, True),
('0000000006.00000#0.data', False, True),
('0000000005.00000.meta', False, False),
('0000000004.00000#1.data', False, True)]]
self._test_get_ondisk_files(scenarios, POLICIES.default,
frag_index=None)
self._test_cleanup_ondisk_files(scenarios, POLICIES.default)
scenarios = [
# non-durable data is chosen
[('0000000007.00000#0.data', '.data', True)],
# non-durable data newer than tombstone is chosen
[('0000000007.00000#0.data', '.data', True),
('0000000006.00000.ts', False, True)],
# non-durable data newer than durable data is chosen, older data
# preserved
[('0000000009.00000#2.data', '.data', True),
('0000000009.00000#1.data', False, True),
('0000000008.00000#3.data', False, True),
('0000000007.00000#1#d.data', False, True),
('0000000007.00000#0#d.data', False, True)],
# non-durable data plus meta chosen, older meta deleted
[('0000000007.00000.meta', '.meta', True),
('0000000006.00000#0.data', '.data', True),
('0000000005.00000.meta', False, False),
('0000000004.00000#1.data', False, True)]]
self._test_get_ondisk_files(scenarios, POLICIES.default,
frag_index=None, frag_prefs=[])
self._test_cleanup_ondisk_files(scenarios, POLICIES.default)
def test_get_ondisk_files_with_ec_policy_and_frag_index_legacy(self):
# Each scenario specifies a list of (filename, extension, [survives])
# tuples. If extension is set then that filename should be returned by
# the method under test for that extension type.
scenarios = [[('0000000007.00000#2.data', False, True),
('0000000007.00000#1.data', '.data'),
('0000000007.00000#0.data', False, True),
('0000000007.00000.durable', '.durable')],
# specific frag newer than durable is ignored
[('0000000007.00000#2.data', False, True),
('0000000007.00000#1.data', False, True),
('0000000007.00000#0.data', False, True),
('0000000006.00000.durable', False)],
# specific frag older than durable is ignored
[('0000000007.00000#2.data', False),
('0000000007.00000#1.data', False),
('0000000007.00000#0.data', False),
('0000000008.00000.durable', False)],
# specific frag older than newest durable is ignored
# even if is also has a durable
[('0000000007.00000#2.data', False),
('0000000007.00000#1.data', False),
('0000000007.00000.durable', False),
('0000000008.00000#0.data', False, True),
('0000000008.00000.durable', '.durable')],
# meta included when frag index is specified
[('0000000009.00000.meta', '.meta'),
('0000000007.00000#2.data', False, True),
('0000000007.00000#1.data', '.data'),
('0000000007.00000#0.data', False, True),
('0000000007.00000.durable', '.durable')],
# specific frag older than tombstone is ignored
[('0000000009.00000.ts', '.ts'),
('0000000007.00000#2.data', False),
('0000000007.00000#1.data', False),
('0000000007.00000#0.data', False),
('0000000007.00000.durable', False)],
# no data file returned if specific frag index missing
[('0000000007.00000#2.data', False, True),
('0000000007.00000#14.data', False, True),
('0000000007.00000#0.data', False, True),
('0000000007.00000.durable', '.durable')],
# meta ignored if specific frag index missing
[('0000000008.00000.meta', False, True),
('0000000007.00000#14.data', False, True),
('0000000007.00000#0.data', False, True),
('0000000007.00000.durable', '.durable')],
# meta ignored if no data files
# Note: this is anomalous, because we are specifying a
# frag_index, get_ondisk_files will tolerate .meta with
# no .data
[('0000000088.00000.meta', False, True),
('0000000077.00000.durable', False, False)]
]
self._test_get_ondisk_files(scenarios, POLICIES.default, frag_index=1)
self._test_cleanup_ondisk_files(scenarios, POLICIES.default)
# scenarios for empty frag_prefs, meaning durable not required
scenarios = [
# specific frag newer than durable is chosen
[('0000000007.00000#2.data', False, True),
('0000000007.00000#1.data', '.data', True),
('0000000007.00000#0.data', False, True),
('0000000006.00000.durable', False, False)],
]
self._test_get_ondisk_files(scenarios, POLICIES.default, frag_index=1,
frag_prefs=[])
self._test_cleanup_ondisk_files(scenarios, POLICIES.default)
def test_get_ondisk_files_with_ec_policy_and_frag_index(self):
# Each scenario specifies a list of (filename, extension, [survives])
# tuples. If extension is set then that filename should be returned by
# the method under test for that extension type.
scenarios = [[('0000000007.00000#2#d.data', False, True),
('0000000007.00000#1#d.data', '.data'),
('0000000007.00000#0#d.data', False, True)],
# specific frag index 1 is returned as long as one durable
[('0000000007.00000#2.data', False, True),
('0000000007.00000#1.data', '.data', True),
('0000000007.00000#0#d.data', False, True)],
# specific frag newer than durable data is ignored
[('0000000007.00000#2.data', False, True),
('0000000007.00000#1.data', False, True),
('0000000007.00000#0.data', False, True),
('0000000006.00000#0#d.data', False, True)],
# specific frag older than durable data is ignored
[('0000000007.00000#2.data', False),
('0000000007.00000#1.data', False),
('0000000007.00000#0.data', False),
('0000000008.00000#0#d.data', False, True)],
# specific frag older than newest durable data is ignored
# even if is durable
[('0000000007.00000#2#d.data', False),
('0000000007.00000#1#d.data', False),
('0000000008.00000#0#d.data', False, True)],
# meta included when frag index is specified
[('0000000009.00000.meta', '.meta'),
('0000000007.00000#2#d.data', False, True),
('0000000007.00000#1#d.data', '.data'),
('0000000007.00000#0#d.data', False, True)],
# specific frag older than tombstone is ignored
[('0000000009.00000.ts', '.ts'),
('0000000007.00000#2#d.data', False),
('0000000007.00000#1#d.data', False),
('0000000007.00000#0#d.data', False)],
# no data file returned if specific frag index missing
[('0000000007.00000#2#d.data', False, True),
('0000000007.00000#14#d.data', False, True),
('0000000007.00000#0#d.data', False, True)],
# meta ignored if specific frag index missing
[('0000000008.00000.meta', False, True),
('0000000007.00000#14#d.data', False, True),
('0000000007.00000#0#d.data', False, True)],
# meta ignored if no data files
# Note: this is anomalous, because we are specifying a
# frag_index, get_ondisk_files will tolerate .meta with
# no .data
[('0000000088.00000.meta', False, True)]
]
self._test_get_ondisk_files(scenarios, POLICIES.default, frag_index=1)
self._test_cleanup_ondisk_files(scenarios, POLICIES.default)
# scenarios for empty frag_prefs, meaning durable not required
scenarios = [
# specific frag newer than durable is chosen
[('0000000007.00000#2.data', False, True),
('0000000007.00000#1.data', '.data', True),
('0000000007.00000#0.data', False, True)],
]
self._test_get_ondisk_files(scenarios, POLICIES.default, frag_index=1,
frag_prefs=[])
self._test_cleanup_ondisk_files(scenarios, POLICIES.default)
def test_get_ondisk_files_with_ec_policy_some_legacy(self):
# Test mixture of legacy durable files and durable data files that
# might somehow end up in the same object dir.
# Each scenario specifies a list of (filename, extension, [survives])
# tuples. If extension is set then that filename should be returned by
# the method under test for that extension type. If the optional
# 'survives' is True, the filename should still be in the dir after
# cleanup.
scenarios = [
# .durable at same timestamp is ok
[('0000000007.00000#1#d.data', '.data', True),
('0000000007.00000#0#d.data', False, True),
('0000000007.00000.durable', False, True)],
# .durable at same timestamp is ok with non durable wanted frag
[('0000000007.00000#1.data', '.data', True),
('0000000007.00000#0#d.data', False, True),
('0000000007.00000.durable', False, True)],
# older .durable file is cleaned up
[('0000000007.00000#1#d.data', '.data', True),
('0000000007.00000#0#d.data', False, True),
('0000000006.00000.durable', False, False)],
# older .durable does not interfere with non durable wanted frag
[('0000000007.00000#1.data', '.data', True),
('0000000007.00000#0#d.data', False, True),
('0000000006.00000.durable', False, False)],
# ...even if it has accompanying .data file
[('0000000007.00000#1.data', '.data', True),
('0000000007.00000#0#d.data', False, True),
('0000000006.00000#0.data', False, False),
('0000000006.00000.durable', False, False)],
# newer .durable file trumps older durable-data
[('0000000007.00000#1#d.data', False, False),
('0000000007.00000#0#d.data', False, False),
('0000000008.00000#1.data', '.data', True),
('0000000008.00000.durable', False, True)],
# newer .durable file with no .data trumps older durable-data
[('0000000007.00000#1#d.data', False, False),
('0000000007.00000#0#d.data', False, False),
('0000000008.00000.durable', False, False)],
]
self._test_get_ondisk_files(scenarios, POLICIES.default, frag_index=1)
self._test_cleanup_ondisk_files(scenarios, POLICIES.default)
self._test_yield_hashes_cleanup(scenarios, POLICIES.default)
def test_cleanup_ondisk_files_reclaim_with_data_files_legacy_durable(self):
# Each scenario specifies a list of (filename, extension, [survives])
# tuples. If extension is set or 'survives' is True, the filename
# should still be in the dir after cleanup.
much_older = Timestamp(time() - 2000).internal
older = Timestamp(time() - 1001).internal
newer = Timestamp(time() - 900).internal
scenarios = [
# isolated legacy .durable is cleaned up immediately
[('%s.durable' % newer, False, False)],
# ...even when other older files are in dir
[('%s.durable' % older, False, False),
('%s.ts' % much_older, False, False)],
# isolated .data files are cleaned up when stale
# ...even when there is an older legacy durable
[('%s#2.data' % older, False, False),
('%s#4.data' % older, False, False),
('%s#2.data' % much_older, '.data', True),
('%s#4.data' % much_older, False, True),
('%s.durable' % much_older, '.durable', True)],
# tombstone reclaimed despite much older legacy durable
[('%s.ts' % older, '.ts', False),
('%s.durable' % much_older, False, False)],
# .meta not reclaimed if there is legacy durable data
[('%s.meta' % older, '.meta', True),
('%s#4.data' % much_older, False, True),
('%s.durable' % much_older, '.durable', True)],
# stale .meta reclaimed along with stale legacy .durable
[('%s.meta' % older, False, False),
('%s.durable' % much_older, False, False)]]
self._test_cleanup_ondisk_files(scenarios, POLICIES.default,
reclaim_age=1000)
def test_cleanup_ondisk_files_reclaim_with_data_files(self):
# Each scenario specifies a list of (filename, extension, [survives])
# tuples. If extension is set or 'survives' is True, the filename
# should still be in the dir after cleanup.
much_older = Timestamp(time() - 2000).internal
older = Timestamp(time() - 1001).internal
newer = Timestamp(time() - 900).internal
scenarios = [
# isolated .data files are cleaned up when stale
[('%s#2.data' % older, False, False),
('%s#4.data' % older, False, False)],
# ...even when there is an older durable fileset
[('%s#2.data' % older, False, False),
('%s#4.data' % older, False, False),
('%s#2#d.data' % much_older, '.data', True),
('%s#4#d.data' % much_older, False, True)],
# ... but preserved if still fresh
[('%s#2.data' % newer, False, True),
('%s#4.data' % newer, False, True)],
# ... and we could have a mixture of fresh and stale .data
[('%s#2.data' % newer, False, True),
('%s#4.data' % older, False, False)],
# tombstone reclaimed despite newer non-durable data
[('%s#2.data' % newer, False, True),
('%s#4.data' % older, False, False),
('%s.ts' % much_older, '.ts', False)],
# tombstone reclaimed despite much older durable
[('%s.ts' % older, '.ts', False),
('%s#4#d.data' % much_older, False, False)],
# .meta not reclaimed if there is durable data
[('%s.meta' % older, '.meta', True),
('%s#4#d.data' % much_older, False, True)],
# stale .meta reclaimed along with stale non-durable .data
[('%s.meta' % older, False, False),
('%s#4.data' % much_older, False, False)]]
self._test_cleanup_ondisk_files(scenarios, POLICIES.default,
reclaim_age=1000)
def test_get_ondisk_files_with_stray_meta(self):
# get_ondisk_files ignores a stray .meta file
class_under_test = self._get_diskfile(POLICIES.default)
@contextmanager
def create_files(df, files):
os.makedirs(df._datadir)
for fname in files:
fpath = os.path.join(df._datadir, fname)
with open(fpath, 'w') as f:
diskfile.write_metadata(f, {'name': df._name,
'Content-Length': 0})
yield
rmtree(df._datadir, ignore_errors=True)
# sanity
good_files = [
'0000000006.00000.meta',
'0000000006.00000#1#d.data'
]
with create_files(class_under_test, good_files):
class_under_test.open()
scenarios = [['0000000007.00000.meta'],
['0000000007.00000.meta',
'0000000006.00000.durable'], # legacy durable file
['0000000007.00000.meta',
'0000000006.00000#1.data'],
['0000000007.00000.meta',
'0000000006.00000.durable', # legacy durable file
'0000000005.00000#1.data']
]
for files in scenarios:
with create_files(class_under_test, files):
try:
class_under_test.open()
except DiskFileNotExist:
continue
self.fail('expected DiskFileNotExist opening %s with %r' % (
class_under_test.__class__.__name__, files))
# Simulate another process deleting the data after we list contents
# but before we actually open them
orig_listdir = os.listdir
def deleting_listdir(d):
result = orig_listdir(d)
for f in result:
os.unlink(os.path.join(d, f))
return result
with create_files(class_under_test, good_files), \
mock.patch('swift.obj.diskfile.os.listdir',
side_effect=deleting_listdir), \
self.assertRaises(DiskFileNotExist):
class_under_test.open()
def test_verify_ondisk_files(self):
# _verify_ondisk_files should only return False if get_ondisk_files
# has produced a bad set of files due to a bug, so to test it we need
# to probe it directly.
mgr = self.df_router[POLICIES.default]
ok_scenarios = (
{'ts_file': None, 'data_file': None, 'meta_file': None,
'durable_frag_set': None},
{'ts_file': None, 'data_file': 'a_file', 'meta_file': None,
'durable_frag_set': ['a_file']},
{'ts_file': None, 'data_file': 'a_file', 'meta_file': 'a_file',
'durable_frag_set': ['a_file']},
{'ts_file': 'a_file', 'data_file': None, 'meta_file': None,
'durable_frag_set': None},
)
for scenario in ok_scenarios:
self.assertTrue(mgr._verify_ondisk_files(scenario),
'Unexpected result for scenario %s' % scenario)
# construct every possible invalid combination of results
vals = (None, 'a_file')
for ts_file, data_file, meta_file, durable_frag in [
(a, b, c, d)
for a in vals for b in vals for c in vals for d in vals]:
scenario = {
'ts_file': ts_file,
'data_file': data_file,
'meta_file': meta_file,
'durable_frag_set': [durable_frag] if durable_frag else None}
if scenario in ok_scenarios:
continue
self.assertFalse(mgr._verify_ondisk_files(scenario),
'Unexpected result for scenario %s' % scenario)
def test_parse_on_disk_filename(self):
mgr = self.df_router[POLICIES.default]
for ts in (Timestamp('1234567890.00001'),
Timestamp('1234567890.00001', offset=17)):
# non-durable data file
for frag in (0, 2, 13):
fname = '%s#%s.data' % (ts.internal, frag)
info = mgr.parse_on_disk_filename(fname, POLICIES.default)
self.assertEqual(ts, info['timestamp'])
self.assertEqual('.data', info['ext'])
self.assertEqual(frag, info['frag_index'])
self.assertIs(False, info['durable'])
self.assertEqual(mgr.make_on_disk_filename(**info), fname)
# durable data file
for frag in (0, 2, 13):
fname = '%s#%s#d.data' % (ts.internal, frag)
info = mgr.parse_on_disk_filename(fname, POLICIES.default)
self.assertEqual(ts, info['timestamp'])
self.assertEqual('.data', info['ext'])
self.assertEqual(frag, info['frag_index'])
self.assertIs(True, info['durable'])
self.assertEqual(mgr.make_on_disk_filename(**info), fname)
# data file with unexpected suffix marker, not an error in case
# alternative marker suffixes added in future
for frag in (0, 2, 13):
fname = '%s#%s#junk.data' % (ts.internal, frag)
info = mgr.parse_on_disk_filename(fname, POLICIES.default)
self.assertEqual(ts, info['timestamp'])
self.assertEqual('.data', info['ext'])
self.assertEqual(frag, info['frag_index'])
self.assertIs(False, info['durable'])
expected = '%s#%s.data' % (ts.internal, frag)
self.assertEqual(mgr.make_on_disk_filename(**info), expected)
for ext in ('.meta', '.durable', '.ts'):
fname = '%s%s' % (ts.internal, ext)
info = mgr.parse_on_disk_filename(fname, POLICIES.default)
self.assertEqual(ts, info['timestamp'])
self.assertEqual(ext, info['ext'])
self.assertIsNone(info['frag_index'])
self.assertEqual(mgr.make_on_disk_filename(**info), fname)
def test_parse_on_disk_filename_errors(self):
mgr = self.df_router[POLICIES.default]
for ts in (Timestamp('1234567890.00001'),
Timestamp('1234567890.00001', offset=17)):
fname = '%s.data' % ts.internal
with self.assertRaises(DiskFileError) as cm:
mgr.parse_on_disk_filename(fname, POLICIES.default)
self.assertTrue(str(cm.exception).startswith("Bad fragment index"))
expected = {
'': 'bad',
'foo': 'bad',
'1.314': 'bad',
1.314: 'bad',
-2: 'negative',
'-2': 'negative',
None: 'bad',
'None': 'bad',
}
# non-durable data file
for frag, msg in expected.items():
fname = '%s#%s.data' % (ts.internal, frag)
with self.assertRaises(DiskFileError) as cm:
mgr.parse_on_disk_filename(fname, POLICIES.default)
self.assertIn(msg, str(cm.exception).lower())
# durable data file
for frag, msg in expected.items():
fname = '%s#%s#d.data' % (ts.internal, frag)
with self.assertRaises(DiskFileError) as cm:
mgr.parse_on_disk_filename(fname, POLICIES.default)
self.assertIn(msg, str(cm.exception).lower())
with self.assertRaises(DiskFileError) as cm:
mgr.parse_on_disk_filename('junk', POLICIES.default)
self.assertEqual("Invalid Timestamp value in filename 'junk'",
str(cm.exception))
def test_make_on_disk_filename(self):
mgr = self.df_router[POLICIES.default]
for ts in (Timestamp('1234567890.00001'),
Timestamp('1234567890.00001', offset=17)):
for frag in (0, '0', 2, '2', 13, '13'):
for durable in (True, False):
expected = _make_datafilename(
ts, POLICIES.default, frag_index=frag, durable=durable)
actual = mgr.make_on_disk_filename(
ts, '.data', frag_index=frag, durable=durable)
self.assertEqual(expected, actual)
parsed = mgr.parse_on_disk_filename(
actual, POLICIES.default)
self.assertEqual(parsed, {
'timestamp': ts,
'frag_index': int(frag),
'ext': '.data',
'ctype_timestamp': None,
'durable': durable
})
# these functions are inverse
self.assertEqual(
mgr.make_on_disk_filename(**parsed),
expected)
for ext in ('.meta', '.durable', '.ts'):
expected = '%s%s' % (ts.internal, ext)
# frag index should not be required
actual = mgr.make_on_disk_filename(ts, ext)
self.assertEqual(expected, actual)
# frag index should be ignored
actual = mgr.make_on_disk_filename(
ts, ext, frag_index=frag)
self.assertEqual(expected, actual)
parsed = mgr.parse_on_disk_filename(
actual, POLICIES.default)
self.assertEqual(parsed, {
'timestamp': ts,
'frag_index': None,
'ext': ext,
'ctype_timestamp': None
})
# these functions are inverse
self.assertEqual(
mgr.make_on_disk_filename(**parsed),
expected)
actual = mgr.make_on_disk_filename(ts)
self.assertEqual(ts, actual)
def test_make_on_disk_filename_with_bad_frag_index(self):
mgr = self.df_router[POLICIES.default]
ts = Timestamp('1234567890.00001')
with self.assertRaises(DiskFileError):
# .data requires a frag_index kwarg
mgr.make_on_disk_filename(ts, '.data')
for frag in (None, 'foo', '1.314', 1.314, -2, '-2'):
with self.assertRaises(DiskFileError):
mgr.make_on_disk_filename(ts, '.data', frag_index=frag)
for ext in ('.meta', '.durable', '.ts'):
expected = '%s%s' % (ts.internal, ext)
# bad frag index should be ignored
actual = mgr.make_on_disk_filename(ts, ext, frag_index=frag)
self.assertEqual(expected, actual)
def test_make_on_disk_filename_for_meta_with_content_type(self):
# verify .meta filename encodes content-type timestamp
mgr = self.df_router[POLICIES.default]
time_ = 1234567890.00001
for delta in (0, 1, 111111):
t_meta = Timestamp(time_)
t_type = Timestamp(time_ - delta / 100000.)
sign = '-' if delta else '+'
expected = '%s%s%x.meta' % (t_meta.short, sign, delta)
actual = mgr.make_on_disk_filename(
t_meta, '.meta', ctype_timestamp=t_type)
self.assertEqual(expected, actual)
parsed = mgr.parse_on_disk_filename(actual, POLICIES.default)
self.assertEqual(parsed, {
'timestamp': t_meta,
'frag_index': None,
'ext': '.meta',
'ctype_timestamp': t_type
})
# these functions are inverse
self.assertEqual(
mgr.make_on_disk_filename(**parsed),
expected)
def test_yield_hashes_legacy_durable(self):
old_ts = Timestamp('1383180000.12345')
fresh_ts = Timestamp(time() - 10)
fresher_ts = Timestamp(time() - 1)
suffix_map = {
'abc': {
'9373a92d072897b136b3fc06595b4abc': [
fresh_ts.internal + '.ts'],
},
'456': {
'9373a92d072897b136b3fc06595b0456': [
old_ts.internal + '#2.data',
old_ts.internal + '.durable'],
'9373a92d072897b136b3fc06595b7456': [
fresh_ts.internal + '.ts',
fresher_ts.internal + '#2.data',
fresher_ts.internal + '.durable'],
},
'def': {},
}
expected = {
'9373a92d072897b136b3fc06595b4abc': {'ts_data': fresh_ts},
'9373a92d072897b136b3fc06595b0456': {'ts_data': old_ts,
'durable': True},
'9373a92d072897b136b3fc06595b7456': {'ts_data': fresher_ts,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
def test_yield_hashes(self):
old_ts = Timestamp('1383180000.12345')
fresh_ts = Timestamp(time() - 10)
fresher_ts = Timestamp(time() - 1)
suffix_map = {
'abc': {
'9373a92d072897b136b3fc06595b4abc': [
fresh_ts.internal + '.ts'],
},
'456': {
'9373a92d072897b136b3fc06595b0456': [
old_ts.internal + '#2#d.data'],
'9373a92d072897b136b3fc06595b7456': [
fresh_ts.internal + '.ts',
fresher_ts.internal + '#2#d.data'],
},
'def': {},
}
expected = {
'9373a92d072897b136b3fc06595b4abc': {'ts_data': fresh_ts},
'9373a92d072897b136b3fc06595b0456': {'ts_data': old_ts,
'durable': True},
'9373a92d072897b136b3fc06595b7456': {'ts_data': fresher_ts,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
def test_yield_hashes_yields_meta_timestamp_legacy_durable(self):
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
ts2 = next(ts_iter)
ts3 = next(ts_iter)
suffix_map = {
'abc': {
'9373a92d072897b136b3fc06595b4abc': [
ts1.internal + '.ts',
ts2.internal + '.meta'],
},
'456': {
'9373a92d072897b136b3fc06595b0456': [
ts1.internal + '#2.data',
ts1.internal + '.durable',
ts2.internal + '.meta',
ts3.internal + '.meta'],
'9373a92d072897b136b3fc06595b7456': [
ts1.internal + '#2.data',
ts1.internal + '.durable',
ts2.internal + '.meta'],
},
}
expected = {
'9373a92d072897b136b3fc06595b4abc': {'ts_data': ts1},
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts1,
'ts_meta': ts3,
'durable': True},
'9373a92d072897b136b3fc06595b7456': {'ts_data': ts1,
'ts_meta': ts2,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected)
# but meta timestamp is *not* returned if specified frag index
# is not found
expected = {
'9373a92d072897b136b3fc06595b4abc': {'ts_data': ts1},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=3)
def test_yield_hashes_yields_meta_timestamp(self):
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
ts2 = next(ts_iter)
ts3 = next(ts_iter)
suffix_map = {
'abc': {
'9373a92d072897b136b3fc06595b4abc': [
ts1.internal + '.ts',
ts2.internal + '.meta'],
},
'456': {
'9373a92d072897b136b3fc06595b0456': [
ts1.internal + '#2#d.data',
ts2.internal + '.meta',
ts3.internal + '.meta'],
'9373a92d072897b136b3fc06595b7456': [
ts1.internal + '#2#d.data',
ts2.internal + '.meta'],
},
}
expected = {
'9373a92d072897b136b3fc06595b4abc': {'ts_data': ts1},
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts1,
'ts_meta': ts3,
'durable': True},
'9373a92d072897b136b3fc06595b7456': {'ts_data': ts1,
'ts_meta': ts2,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected)
# but meta timestamp is *not* returned if specified frag index
# is not found
expected = {
'9373a92d072897b136b3fc06595b4abc': {'ts_data': ts1},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=3)
def test_yield_hashes_suffix_filter_legacy_durable(self):
# test again with limited suffixes
old_ts = '1383180000.12345'
fresh_ts = Timestamp(time() - 10).internal
fresher_ts = Timestamp(time() - 1).internal
suffix_map = {
'abc': {
'9373a92d072897b136b3fc06595b4abc': [
fresh_ts + '.ts'],
},
'456': {
'9373a92d072897b136b3fc06595b0456': [
old_ts + '#2.data',
old_ts + '.durable'],
'9373a92d072897b136b3fc06595b7456': [
fresh_ts + '.ts',
fresher_ts + '#2.data',
fresher_ts + '.durable'],
},
'def': {},
}
expected = {
'9373a92d072897b136b3fc06595b0456': {'ts_data': old_ts,
'durable': True},
'9373a92d072897b136b3fc06595b7456': {'ts_data': fresher_ts,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
suffixes=['456'], frag_index=2)
def test_yield_hashes_suffix_filter(self):
# test again with limited suffixes
old_ts = '1383180000.12345'
fresh_ts = Timestamp(time() - 10).internal
fresher_ts = Timestamp(time() - 1).internal
suffix_map = {
'abc': {
'9373a92d072897b136b3fc06595b4abc': [
fresh_ts + '.ts'],
},
'456': {
'9373a92d072897b136b3fc06595b0456': [
old_ts + '#2#d.data'],
'9373a92d072897b136b3fc06595b7456': [
fresh_ts + '.ts',
fresher_ts + '#2#d.data'],
},
'def': {},
}
expected = {
'9373a92d072897b136b3fc06595b0456': {'ts_data': old_ts,
'durable': True},
'9373a92d072897b136b3fc06595b7456': {'ts_data': fresher_ts,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
suffixes=['456'], frag_index=2)
def test_yield_hashes_skips_non_durable_data(self):
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
suffix_map = {
'456': {
'9373a92d072897b136b3fc06595b0456': [
ts1.internal + '#2#d.data'],
'9373a92d072897b136b3fc06595b7456': [
ts1.internal + '#2.data'],
},
}
expected = {
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts1,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
# if we add a durable it shows up
suffix_map['456']['9373a92d072897b136b3fc06595b7456'] = [
ts1.internal + '#2#d.data']
expected = {
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts1,
'durable': True},
'9373a92d072897b136b3fc06595b7456': {'ts_data': ts1,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
def test_yield_hashes_optionally_yields_non_durable_data(self):
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
ts2 = next(ts_iter)
suffix_map = {
'abc': {
'9373a92d072897b136b3fc06595b4abc': [
ts1.internal + '#2#d.data',
ts2.internal + '#2.data'], # newer non-durable
'9373a92d072897b136b3fc06595b0abc': [
ts1.internal + '#2.data', # older non-durable
ts2.internal + '#2#d.data'],
},
'456': {
'9373a92d072897b136b3fc06595b0456': [
ts1.internal + '#2#d.data'],
'9373a92d072897b136b3fc06595b7456': [
ts2.internal + '#2.data'],
},
}
# sanity check non-durables not yielded
expected = {
'9373a92d072897b136b3fc06595b4abc': {'ts_data': ts1,
'durable': True},
'9373a92d072897b136b3fc06595b0abc': {'ts_data': ts2,
'durable': True},
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts1,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2, frag_prefs=None)
# an empty frag_prefs list is sufficient to get non-durables yielded
# (in preference over *older* durable)
expected = {
'9373a92d072897b136b3fc06595b4abc': {'ts_data': ts2,
'durable': False},
'9373a92d072897b136b3fc06595b0abc': {'ts_data': ts2,
'durable': True},
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts1,
'durable': True},
'9373a92d072897b136b3fc06595b7456': {'ts_data': ts2,
'durable': False},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2, frag_prefs=[])
def test_yield_hashes_skips_missing_legacy_durable(self):
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
suffix_map = {
'456': {
'9373a92d072897b136b3fc06595b0456': [
ts1.internal + '#2.data',
ts1.internal + '.durable'],
'9373a92d072897b136b3fc06595b7456': [
ts1.internal + '#2.data'],
},
}
expected = {
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts1,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
# if we add a durable it shows up
suffix_map['456']['9373a92d072897b136b3fc06595b7456'].append(
ts1.internal + '.durable')
expected = {
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts1,
'durable': True},
'9373a92d072897b136b3fc06595b7456': {'ts_data': ts1,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
def test_yield_hashes_skips_newer_data_without_legacy_durable(self):
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
ts2 = next(ts_iter)
ts3 = next(ts_iter)
suffix_map = {
'456': {
'9373a92d072897b136b3fc06595b0456': [
ts1.internal + '#2.data',
ts1.internal + '.durable',
ts2.internal + '#2.data',
ts3.internal + '#2.data'],
},
}
expected = {
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts1,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=None)
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
# if we add a durable then newer data shows up
suffix_map['456']['9373a92d072897b136b3fc06595b0456'].append(
ts2.internal + '.durable')
expected = {
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts2,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=None)
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
def test_yield_hashes_skips_newer_non_durable_data(self):
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
ts2 = next(ts_iter)
ts3 = next(ts_iter)
suffix_map = {
'456': {
'9373a92d072897b136b3fc06595b0456': [
ts1.internal + '#2#d.data',
ts2.internal + '#2.data',
ts3.internal + '#2.data'],
},
}
expected = {
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts1,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=None)
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
# if we make it durable then newer data shows up
suffix_map = {
'456': {
'9373a92d072897b136b3fc06595b0456': [
ts1.internal + '#2#d.data',
ts2.internal + '#2#d.data',
ts3.internal + '#2.data'],
},
}
expected = {
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts2,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=None)
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
def test_yield_hashes_ignores_bad_ondisk_filesets(self):
# this differs from DiskFileManager.yield_hashes which will fail
# when encountering a bad on-disk file set
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
ts2 = next(ts_iter)
suffix_map = {
'456': {
# this one is fine
'9333a92d072897b136b3fc06595b0456': [
ts1.internal + '#2#d.data'],
# this one is fine, legacy durable
'9333a92d072897b136b3fc06595b1456': [
ts1.internal + '#2.data',
ts1.internal + '.durable'],
# missing frag index
'9444a92d072897b136b3fc06595b7456': [
ts1.internal + '.data'],
# junk
'9555a92d072897b136b3fc06595b8456': [
'junk_file'],
# not durable
'9666a92d072897b136b3fc06595b9456': [
ts1.internal + '#2.data',
ts2.internal + '.meta'],
# .meta files w/o .data files can't be opened, and are ignored
'9777a92d072897b136b3fc06595ba456': [
ts1.internal + '.meta'],
# multiple meta files with no data
'9888a92d072897b136b3fc06595bb456': [
ts1.internal + '.meta',
ts2.internal + '.meta'],
# this is good with meta
'9999a92d072897b136b3fc06595bb456': [
ts1.internal + '#2#d.data',
ts2.internal + '.meta'],
# this is good with meta, legacy durable
'9999a92d072897b136b3fc06595bc456': [
ts1.internal + '#2.data',
ts1.internal + '.durable',
ts2.internal + '.meta'],
# this one is wrong frag index
'9aaaa92d072897b136b3fc06595b0456': [
ts1.internal + '#7#d.data'],
# this one is wrong frag index, legacy durable
'9aaaa92d072897b136b3fc06595b1456': [
ts1.internal + '#7.data',
ts1.internal + '.durable'],
},
}
expected = {
'9333a92d072897b136b3fc06595b0456': {'ts_data': ts1,
'durable': True},
'9999a92d072897b136b3fc06595bb456': {'ts_data': ts1,
'ts_meta': ts2,
'durable': True},
'9333a92d072897b136b3fc06595b1456': {'ts_data': ts1,
'durable': True},
'9999a92d072897b136b3fc06595bc456': {'ts_data': ts1,
'ts_meta': ts2,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
def test_yield_hashes_filters_frag_index(self):
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
ts2 = next(ts_iter)
ts3 = next(ts_iter)
suffix_map = {
'27e': {
'1111111111111111111111111111127e': [
ts1.internal + '#2#d.data',
ts1.internal + '#3#d.data',
],
'2222222222222222222222222222227e': [
ts1.internal + '#2#d.data',
ts2.internal + '#2#d.data',
],
},
'd41': {
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaad41': [
ts1.internal + '#3#d.data',
],
},
'00b': {
'3333333333333333333333333333300b': [
ts1.internal + '#2.data',
ts2.internal + '#2.data',
ts3.internal + '#2#d.data',
],
},
}
expected = {
'1111111111111111111111111111127e': {'ts_data': ts1,
'durable': True},
'2222222222222222222222222222227e': {'ts_data': ts2,
'durable': True},
'3333333333333333333333333333300b': {'ts_data': ts3,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
def test_yield_hashes_filters_frag_index_legacy_durable(self):
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
ts2 = next(ts_iter)
ts3 = next(ts_iter)
suffix_map = {
'27e': {
'1111111111111111111111111111127e': [
ts1.internal + '#2.data',
ts1.internal + '#3.data',
ts1.internal + '.durable',
],
'2222222222222222222222222222227e': [
ts1.internal + '#2.data',
ts1.internal + '.durable',
ts2.internal + '#2.data',
ts2.internal + '.durable',
],
},
'd41': {
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaad41': [
ts1.internal + '#3.data',
ts1.internal + '.durable',
],
},
'00b': {
'3333333333333333333333333333300b': [
ts1.internal + '#2.data',
ts2.internal + '#2.data',
ts3.internal + '#2.data',
ts3.internal + '.durable',
],
},
}
expected = {
'1111111111111111111111111111127e': {'ts_data': ts1,
'durable': True},
'2222222222222222222222222222227e': {'ts_data': ts2,
'durable': True},
'3333333333333333333333333333300b': {'ts_data': ts3,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
def _test_get_diskfile_from_hash_frag_index_filter(self, legacy_durable):
df = self._get_diskfile(POLICIES.default)
hash_ = os.path.basename(df._datadir)
self.assertRaises(DiskFileNotExist,
self.df_mgr.get_diskfile_from_hash,
self.existing_device, '0', hash_,
POLICIES.default) # sanity
timestamp = Timestamp.now()
for frag_index in (4, 7):
write_diskfile(df, timestamp, frag_index=frag_index,
legacy_durable=legacy_durable)
df4 = self.df_mgr.get_diskfile_from_hash(
self.existing_device, '0', hash_, POLICIES.default, frag_index=4)
self.assertEqual(df4._frag_index, 4)
self.assertEqual(
df4.read_metadata()['X-Object-Sysmeta-Ec-Frag-Index'], '4')
df7 = self.df_mgr.get_diskfile_from_hash(
self.existing_device, '0', hash_, POLICIES.default, frag_index=7)
self.assertEqual(df7._frag_index, 7)
self.assertEqual(
df7.read_metadata()['X-Object-Sysmeta-Ec-Frag-Index'], '7')
def test_get_diskfile_from_hash_frag_index_filter(self):
self._test_get_diskfile_from_hash_frag_index_filter(False)
def test_get_diskfile_from_hash_frag_index_filter_legacy_durable(self):
self._test_get_diskfile_from_hash_frag_index_filter(True)
def test_check_policy(self):
mock_policy = mock.MagicMock()
mock_policy.policy_type = EC_POLICY
# sanity, ECDiskFileManager is ok with EC_POLICY
diskfile.ECDiskFileManager.check_policy(mock_policy)
# ECDiskFileManager raises ValueError with REPL_POLICY
mock_policy.policy_type = REPL_POLICY
with self.assertRaises(ValueError) as cm:
diskfile.ECDiskFileManager.check_policy(mock_policy)
self.assertEqual('Invalid policy_type: %s' % REPL_POLICY,
str(cm.exception))
class DiskFileMixin(BaseDiskFileTestMixin):
def ts(self):
"""
Timestamps - forever.
"""
return next(self._ts_iter)
def _create_ondisk_file(self, df, data, timestamp, metadata=None,
ctype_timestamp=None,
ext='.data', legacy_durable=False, commit=True):
mkdirs(df._datadir)
if timestamp is None:
timestamp = time()
timestamp = Timestamp(timestamp)
if not metadata:
metadata = {}
if 'X-Timestamp' not in metadata:
metadata['X-Timestamp'] = timestamp.internal
if 'ETag' not in metadata:
etag = md5()
etag.update(data)
metadata['ETag'] = etag.hexdigest()
if 'name' not in metadata:
metadata['name'] = '/a/c/o'
if 'Content-Length' not in metadata:
metadata['Content-Length'] = str(len(data))
filename = timestamp.internal
if ext == '.data' and df.policy.policy_type == EC_POLICY:
if legacy_durable:
filename = '%s#%s' % (timestamp.internal, df._frag_index)
if commit:
durable_file = os.path.join(
df._datadir, '%s.durable' % timestamp.internal)
with open(durable_file, 'wb') as f:
pass
elif commit:
filename = '%s#%s#d' % (timestamp.internal, df._frag_index)
else:
filename = '%s#%s' % (timestamp.internal, df._frag_index)
if ctype_timestamp:
metadata.update(
{'Content-Type-Timestamp':
Timestamp(ctype_timestamp).internal})
filename = encode_timestamps(timestamp,
Timestamp(ctype_timestamp),
explicit=True)
data_file = os.path.join(df._datadir, filename + ext)
with open(data_file, 'wb') as f:
f.write(data)
xattr.setxattr(f.fileno(), diskfile.METADATA_KEY,
pickle.dumps(metadata, diskfile.PICKLE_PROTOCOL))
def _simple_get_diskfile(self, partition='0', account='a', container='c',
obj='o', policy=None, frag_index=None, **kwargs):
policy = policy or POLICIES.default
df_mgr = self.df_router[policy]
if policy.policy_type == EC_POLICY and frag_index is None:
frag_index = 2
return df_mgr.get_diskfile(self.existing_device, partition,
account, container, obj,
policy=policy, frag_index=frag_index,
**kwargs)
def _create_test_file(self, data, timestamp=None, metadata=None,
account='a', container='c', obj='o', **kwargs):
if not isinstance(data, bytes):
raise ValueError('data must be bytes')
if metadata is None:
metadata = {}
metadata.setdefault('name', '/%s/%s/%s' % (account, container, obj))
df = self._simple_get_diskfile(account=account, container=container,
obj=obj, **kwargs)
if timestamp is None:
timestamp = time()
timestamp = Timestamp(timestamp)
# avoid getting O_TMPFILE warning in logs
if not utils.o_tmpfile_in_tmpdir_supported():
df.manager.use_linkat = False
if df.policy.policy_type == EC_POLICY:
data = encode_frag_archive_bodies(df.policy, data)[df._frag_index]
with df.create() as writer:
new_metadata = {
'ETag': md5(data).hexdigest(),
'X-Timestamp': timestamp.internal,
'Content-Length': len(data),
}
new_metadata.update(metadata)
writer.write(data)
writer.put(new_metadata)
writer.commit(timestamp)
df.open()
return df, data
def test_get_dev_path(self):
self.df_mgr.devices = '/srv'
device = 'sda1'
dev_path = os.path.join(self.df_mgr.devices, device)
mount_check = None
self.df_mgr.mount_check = True
with mock_check_drive(ismount=False):
self.assertEqual(self.df_mgr.get_dev_path(device, mount_check),
None)
with mock_check_drive(ismount=True):
self.assertEqual(self.df_mgr.get_dev_path(device, mount_check),
dev_path)
self.df_mgr.mount_check = False
with mock_check_drive(isdir=False):
self.assertEqual(self.df_mgr.get_dev_path(device, mount_check),
None)
with mock_check_drive(isdir=True):
self.assertEqual(self.df_mgr.get_dev_path(device, mount_check),
dev_path)
mount_check = True
with mock_check_drive(ismount=False):
self.assertEqual(self.df_mgr.get_dev_path(device, mount_check),
None)
with mock_check_drive(ismount=True):
self.assertEqual(self.df_mgr.get_dev_path(device, mount_check),
dev_path)
mount_check = False
self.assertEqual(self.df_mgr.get_dev_path(device, mount_check),
dev_path)
def test_open_not_exist(self):
df = self._simple_get_diskfile()
self.assertRaises(DiskFileNotExist, df.open)
def test_open_expired(self):
self.assertRaises(DiskFileExpired,
self._create_test_file,
b'1234567890', metadata={'X-Delete-At': '0'})
try:
self._create_test_file(b'1234567890', open_expired=True,
metadata={'X-Delete-At': '0',
'X-Object-Meta-Foo': 'bar'})
df = self._simple_get_diskfile(open_expired=True)
md = df.read_metadata()
self.assertEqual(md['X-Object-Meta-Foo'], 'bar')
except SwiftException as err:
self.fail("Unexpected swift exception raised: %r" % err)
def test_open_not_expired(self):
try:
self._create_test_file(
b'1234567890', metadata={'X-Delete-At': str(2 * int(time()))})
except SwiftException as err:
self.fail("Unexpected swift exception raised: %r" % err)
def test_get_metadata(self):
timestamp = self.ts().internal
df, df_data = self._create_test_file(b'1234567890',
timestamp=timestamp)
md = df.get_metadata()
self.assertEqual(md['X-Timestamp'], timestamp)
def test_read_metadata(self):
timestamp = self.ts().internal
self._create_test_file(b'1234567890', timestamp=timestamp)
df = self._simple_get_diskfile()
md = df.read_metadata()
self.assertEqual(md['X-Timestamp'], timestamp)
def test_read_metadata_no_xattr(self):
def mock_getxattr(*args, **kargs):
error_num = errno.ENOTSUP if hasattr(errno, 'ENOTSUP') else \
errno.EOPNOTSUPP
raise IOError(error_num, "Operation not supported")
with mock.patch('xattr.getxattr', mock_getxattr):
self.assertRaises(
DiskFileXattrNotSupported,
diskfile.read_metadata, 'n/a')
def test_get_metadata_not_opened(self):
df = self._simple_get_diskfile()
with self.assertRaises(DiskFileNotOpen):
df.get_metadata()
def test_get_datafile_metadata(self):
ts_iter = make_timestamp_iter()
body = b'1234567890'
ts_data = next(ts_iter)
metadata = {'X-Object-Meta-Test': 'test1',
'X-Object-Sysmeta-Test': 'test1'}
df, df_data = self._create_test_file(body, timestamp=ts_data.internal,
metadata=metadata)
expected = df.get_metadata()
ts_meta = next(ts_iter)
df.write_metadata({'X-Timestamp': ts_meta.internal,
'X-Object-Meta-Test': 'changed',
'X-Object-Sysmeta-Test': 'ignored'})
df.open()
self.assertEqual(expected, df.get_datafile_metadata())
expected.update({'X-Timestamp': ts_meta.internal,
'X-Object-Meta-Test': 'changed'})
self.assertEqual(expected, df.get_metadata())
def test_get_datafile_metadata_not_opened(self):
df = self._simple_get_diskfile()
with self.assertRaises(DiskFileNotOpen):
df.get_datafile_metadata()
def test_get_metafile_metadata(self):
ts_iter = make_timestamp_iter()
body = b'1234567890'
ts_data = next(ts_iter)
metadata = {'X-Object-Meta-Test': 'test1',
'X-Object-Sysmeta-Test': 'test1'}
df, df_data = self._create_test_file(body, timestamp=ts_data.internal,
metadata=metadata)
self.assertIsNone(df.get_metafile_metadata())
# now create a meta file
ts_meta = next(ts_iter)
df.write_metadata({'X-Timestamp': ts_meta.internal,
'X-Object-Meta-Test': 'changed'})
df.open()
expected = {'X-Timestamp': ts_meta.internal,
'X-Object-Meta-Test': 'changed'}
self.assertEqual(expected, df.get_metafile_metadata())
def test_get_metafile_metadata_not_opened(self):
df = self._simple_get_diskfile()
with self.assertRaises(DiskFileNotOpen):
df.get_metafile_metadata()
def test_not_opened(self):
df = self._simple_get_diskfile()
with self.assertRaises(DiskFileNotOpen):
with df:
pass
def test_disk_file_default_disallowed_metadata(self):
# build an object with some meta (at t0+1s)
orig_metadata = {'X-Object-Meta-Key1': 'Value1',
'X-Object-Transient-Sysmeta-KeyA': 'ValueA',
'Content-Type': 'text/garbage'}
df = self._get_open_disk_file(ts=self.ts().internal,
extra_metadata=orig_metadata)
with df.open():
if df.policy.policy_type == EC_POLICY:
expected = df.policy.pyeclib_driver.get_segment_info(
1024, df.policy.ec_segment_size)['fragment_size']
else:
expected = 1024
self.assertEqual(str(expected), df._metadata['Content-Length'])
# write some new metadata (fast POST, don't send orig meta, at t0+1)
df = self._simple_get_diskfile()
df.write_metadata({'X-Timestamp': self.ts().internal,
'X-Object-Transient-Sysmeta-KeyB': 'ValueB',
'X-Object-Meta-Key2': 'Value2'})
df = self._simple_get_diskfile()
with df.open():
# non-fast-post updateable keys are preserved
self.assertEqual('text/garbage', df._metadata['Content-Type'])
# original fast-post updateable keys are removed
self.assertNotIn('X-Object-Meta-Key1', df._metadata)
self.assertNotIn('X-Object-Transient-Sysmeta-KeyA', df._metadata)
# new fast-post updateable keys are added
self.assertEqual('Value2', df._metadata['X-Object-Meta-Key2'])
self.assertEqual('ValueB',
df._metadata['X-Object-Transient-Sysmeta-KeyB'])
def test_disk_file_preserves_sysmeta(self):
# build an object with some meta (at t0)
orig_metadata = {'X-Object-Sysmeta-Key1': 'Value1',
'Content-Type': 'text/garbage'}
df = self._get_open_disk_file(ts=self.ts().internal,
extra_metadata=orig_metadata)
with df.open():
if df.policy.policy_type == EC_POLICY:
expected = df.policy.pyeclib_driver.get_segment_info(
1024, df.policy.ec_segment_size)['fragment_size']
else:
expected = 1024
self.assertEqual(str(expected), df._metadata['Content-Length'])
# write some new metadata (fast POST, don't send orig meta, at t0+1s)
df = self._simple_get_diskfile()
df.write_metadata({'X-Timestamp': self.ts().internal,
'X-Object-Sysmeta-Key1': 'Value2',
'X-Object-Meta-Key3': 'Value3'})
df = self._simple_get_diskfile()
with df.open():
# non-fast-post updateable keys are preserved
self.assertEqual('text/garbage', df._metadata['Content-Type'])
# original sysmeta keys are preserved
self.assertEqual('Value1', df._metadata['X-Object-Sysmeta-Key1'])
def test_disk_file_preserves_slo(self):
# build an object with some meta (at t0)
orig_metadata = {'X-Static-Large-Object': 'True',
'Content-Type': 'text/garbage'}
df = self._get_open_disk_file(ts=self.ts().internal,
extra_metadata=orig_metadata)
# sanity test
with df.open():
self.assertEqual('True', df._metadata['X-Static-Large-Object'])
if df.policy.policy_type == EC_POLICY:
expected = df.policy.pyeclib_driver.get_segment_info(
1024, df.policy.ec_segment_size)['fragment_size']
else:
expected = 1024
self.assertEqual(str(expected), df._metadata['Content-Length'])
# write some new metadata (fast POST, don't send orig meta, at t0+1s)
df = self._simple_get_diskfile()
df.write_metadata({'X-Timestamp': self.ts().internal})
df = self._simple_get_diskfile()
with df.open():
# non-fast-post updateable keys are preserved
self.assertEqual('text/garbage', df._metadata['Content-Type'])
self.assertEqual('True', df._metadata['X-Static-Large-Object'])
def test_disk_file_reader_iter(self):
df, df_data = self._create_test_file(b'1234567890')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
self.assertEqual(b''.join(reader), df_data)
self.assertEqual(quarantine_msgs, [])
def test_disk_file_reader_iter_w_quarantine(self):
df, df_data = self._create_test_file(b'1234567890')
def raise_dfq(m):
raise DiskFileQuarantined(m)
reader = df.reader(_quarantine_hook=raise_dfq)
reader._obj_size += 1
self.assertRaises(DiskFileQuarantined, b''.join, reader)
def test_disk_file_app_iter_corners(self):
df, df_data = self._create_test_file(b'1234567890')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
self.assertEqual(b''.join(reader.app_iter_range(0, None)),
df_data)
self.assertEqual(quarantine_msgs, [])
df = self._simple_get_diskfile()
with df.open():
reader = df.reader()
self.assertEqual(b''.join(reader.app_iter_range(5, None)),
df_data[5:])
def test_disk_file_app_iter_range_w_none(self):
df, df_data = self._create_test_file(b'1234567890')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
self.assertEqual(b''.join(reader.app_iter_range(None, None)),
df_data)
self.assertEqual(quarantine_msgs, [])
def test_disk_file_app_iter_partial_closes(self):
df, df_data = self._create_test_file(b'1234567890')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
it = reader.app_iter_range(0, 5)
self.assertEqual(b''.join(it), df_data[:5])
self.assertEqual(quarantine_msgs, [])
self.assertTrue(reader._fp is None)
def test_disk_file_app_iter_ranges(self):
df, df_data = self._create_test_file(b'012345678911234567892123456789')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
it = reader.app_iter_ranges([(0, 10), (10, 20), (20, 30)],
'plain/text',
'\r\n--someheader\r\n', len(df_data))
value = b''.join(it)
self.assertIn(df_data[:10], value)
self.assertIn(df_data[10:20], value)
self.assertIn(df_data[20:30], value)
self.assertEqual(quarantine_msgs, [])
def test_disk_file_app_iter_ranges_w_quarantine(self):
df, df_data = self._create_test_file(b'012345678911234567892123456789')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
self.assertEqual(len(df_data), reader._obj_size) # sanity check
reader._obj_size += 1
it = reader.app_iter_ranges([(0, len(df_data))],
'plain/text',
'\r\n--someheader\r\n', len(df_data))
value = b''.join(it)
self.assertIn(df_data, value)
self.assertEqual(quarantine_msgs,
["Bytes read: %s, does not match metadata: %s" %
(len(df_data), len(df_data) + 1)])
def test_disk_file_app_iter_ranges_w_no_etag_quarantine(self):
df, df_data = self._create_test_file(b'012345678911234567892123456789')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
it = reader.app_iter_ranges([(0, 10)],
'plain/text',
'\r\n--someheader\r\n', len(df_data))
value = b''.join(it)
self.assertIn(df_data[:10], value)
self.assertEqual(quarantine_msgs, [])
def test_disk_file_app_iter_ranges_edges(self):
df, df_data = self._create_test_file(b'012345678911234567892123456789')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
it = reader.app_iter_ranges([(3, 10), (0, 2)], 'application/whatever',
'\r\n--someheader\r\n', len(df_data))
value = b''.join(it)
self.assertIn(df_data[3:10], value)
self.assertIn(df_data[:2], value)
self.assertEqual(quarantine_msgs, [])
def test_disk_file_large_app_iter_ranges(self):
# This test case is to make sure that the disk file app_iter_ranges
# method all the paths being tested.
long_str = b'01234567890' * 65536
df, df_data = self._create_test_file(long_str)
target_strs = [df_data[3:10], df_data[0:65590]]
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
it = reader.app_iter_ranges([(3, 10), (0, 65590)], 'plain/text',
'5e816ff8b8b8e9a5d355497e5d9e0301',
len(df_data))
# The produced string actually missing the MIME headers
# need to add these headers to make it as real MIME message.
# The body of the message is produced by method app_iter_ranges
# off of DiskFile object.
header = b''.join([b'Content-Type: multipart/byteranges;',
b'boundary=',
b'5e816ff8b8b8e9a5d355497e5d9e0301\r\n'])
value = header + b''.join(it)
self.assertEqual(quarantine_msgs, [])
if six.PY2:
message = email.message_from_string(value)
else:
message = email.message_from_bytes(value)
parts = [p.get_payload(decode=True) for p in message.walk()][1:3]
self.assertEqual(parts, target_strs)
def test_disk_file_app_iter_ranges_empty(self):
# This test case tests when empty value passed into app_iter_ranges
# When ranges passed into the method is either empty array or None,
# this method will yield empty string
df, df_data = self._create_test_file(b'012345678911234567892123456789')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
it = reader.app_iter_ranges([], 'application/whatever',
'\r\n--someheader\r\n', len(df_data))
self.assertEqual(b''.join(it), b'')
df = self._simple_get_diskfile()
with df.open():
reader = df.reader()
it = reader.app_iter_ranges(None, 'app/something',
'\r\n--someheader\r\n', 150)
self.assertEqual(b''.join(it), b'')
self.assertEqual(quarantine_msgs, [])
def test_disk_file_mkstemp_creates_dir(self):
for policy in POLICIES:
tmpdir = os.path.join(self.testdir, self.existing_device,
diskfile.get_tmp_dir(policy))
os.rmdir(tmpdir)
df = self._simple_get_diskfile(policy=policy)
df.manager.use_linkat = False
with df.create():
self.assertTrue(os.path.exists(tmpdir))
def test_disk_file_writer(self):
df = self._simple_get_diskfile()
with df.create() as writer:
self.assertIsInstance(writer, diskfile.BaseDiskFileWriter)
# create automatically opens for us
self.assertIsNotNone(writer._fd)
# can't re-open since we're already open
with self.assertRaises(ValueError):
writer.open()
writer.write(b'asdf')
writer.close()
# can't write any more
with self.assertRaises(ValueError):
writer.write(b'asdf')
# can close again
writer.close()
def test_disk_file_concurrent_writes(self):
def threadA(df, events, errors):
try:
ts = self.ts()
with df.create() as writer:
writer.write(b'dataA')
writer.put({
'X-Timestamp': ts.internal,
'Content-Length': 5,
})
events[0].set()
events[1].wait()
writer.commit(ts)
except Exception as e:
errors.append(e)
raise
def threadB(df, events, errors):
try:
events[0].wait()
ts = self.ts()
with df.create() as writer:
writer.write(b'dataB')
writer.put({
'X-Timestamp': ts.internal,
'Content-Length': 5,
})
writer.commit(ts)
events[1].set()
except Exception as e:
errors.append(e)
raise
df = self._simple_get_diskfile()
events = [threading.Event(), threading.Event()]
errors = []
threads = [threading.Thread(target=tgt, args=(df, events, errors))
for tgt in (threadA, threadB)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertFalse(errors)
with df.open(), open(df._data_file, 'rb') as fp:
self.assertEqual(b'dataB', fp.read())
def test_disk_file_concurrent_delete(self):
def threadA(df, events, errors):
try:
ts = self.ts()
with df.create() as writer:
writer.write(b'dataA')
writer.put({'X-Timestamp': ts.internal})
events[0].set()
events[1].wait()
writer.commit(ts)
except Exception as e:
errors.append(e)
raise
def threadB(df, events, errors):
try:
events[0].wait()
df.delete(self.ts())
events[1].set()
except Exception as e:
errors.append(e)
raise
df = self._simple_get_diskfile()
events = [threading.Event(), threading.Event()]
errors = []
threads = [threading.Thread(target=tgt, args=(df, events, errors))
for tgt in (threadA, threadB)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertFalse(errors)
self.assertRaises(DiskFileDeleted, df.open)
def _get_open_disk_file(self, invalid_type=None, obj_name='o', fsize=1024,
csize=8, mark_deleted=False, prealloc=False,
ts=None, mount_check=False, extra_metadata=None,
policy=None, frag_index=None, data=None,
commit=True):
'''returns a DiskFile'''
policy = policy or POLICIES.legacy
df = self._simple_get_diskfile(obj=obj_name, policy=policy,
frag_index=frag_index)
data = data or b'0' * fsize
if not isinstance(data, bytes):
raise ValueError('data must be bytes')
if policy.policy_type == EC_POLICY:
archives = encode_frag_archive_bodies(policy, data)
try:
data = archives[df._frag_index]
except IndexError:
data = archives[0]
if ts:
timestamp = Timestamp(ts)
else:
timestamp = Timestamp.now()
if prealloc:
prealloc_size = fsize
else:
prealloc_size = None
with df.create(size=prealloc_size) as writer:
writer.write(data)
upload_size, etag = writer.chunks_finished()
metadata = {
'ETag': etag,
'X-Timestamp': timestamp.internal,
'Content-Length': str(upload_size),
}
metadata.update(extra_metadata or {})
writer.put(metadata)
if invalid_type == 'ETag':
etag = md5()
etag.update('1' + '0' * (fsize - 1))
etag = etag.hexdigest()
metadata['ETag'] = etag
diskfile.write_metadata(writer._fd, metadata)
elif invalid_type == 'Content-Length':
metadata['Content-Length'] = fsize - 1
diskfile.write_metadata(writer._fd, metadata)
elif invalid_type == 'Bad-Content-Length':
metadata['Content-Length'] = 'zero'
diskfile.write_metadata(writer._fd, metadata)
elif invalid_type == 'Missing-Content-Length':
del metadata['Content-Length']
diskfile.write_metadata(writer._fd, metadata)
elif invalid_type == 'Bad-X-Delete-At':
metadata['X-Delete-At'] = 'bad integer'
diskfile.write_metadata(writer._fd, metadata)
if commit:
writer.commit(timestamp)
if mark_deleted:
df.delete(timestamp)
data_files = [os.path.join(df._datadir, fname)
for fname in sorted(os.listdir(df._datadir),
reverse=True)
if fname.endswith('.data')]
if invalid_type == 'Corrupt-Xattrs':
# We have to go below read_metadata/write_metadata to get proper
# corruption.
meta_xattr = xattr.getxattr(data_files[0], "user.swift.metadata")
wrong_byte = b'X' if meta_xattr[:1] != b'X' else b'Y'
xattr.setxattr(data_files[0], "user.swift.metadata",
wrong_byte + meta_xattr[1:])
elif invalid_type == 'Subtly-Corrupt-Xattrs':
# We have to go below read_metadata/write_metadata to get proper
# corruption.
meta_xattr = xattr.getxattr(data_files[0], "user.swift.metadata")
wrong_checksum = md5(meta_xattr + b"some extra stuff").hexdigest()
xattr.setxattr(data_files[0], "user.swift.metadata_checksum",
wrong_checksum.encode())
elif invalid_type == 'Truncated-Xattrs':
meta_xattr = xattr.getxattr(data_files[0], "user.swift.metadata")
xattr.setxattr(data_files[0], "user.swift.metadata",
meta_xattr[:-1])
elif invalid_type == 'Missing-Name':
md = diskfile.read_metadata(data_files[0])
del md['name']
diskfile.write_metadata(data_files[0], md)
elif invalid_type == 'Bad-Name':
md = diskfile.read_metadata(data_files[0])
md['name'] = md['name'] + 'garbage'
diskfile.write_metadata(data_files[0], md)
self.conf['disk_chunk_size'] = csize
self.conf['mount_check'] = mount_check
self.df_mgr = self.mgr_cls(self.conf, self.logger)
self.df_router = diskfile.DiskFileRouter(self.conf, self.logger)
# actual on disk frag_index may have been set by metadata
frag_index = metadata.get('X-Object-Sysmeta-Ec-Frag-Index',
frag_index)
df = self._simple_get_diskfile(obj=obj_name, policy=policy,
frag_index=frag_index)
df.open()
if invalid_type == 'Zero-Byte':
fp = open(df._data_file, 'w')
fp.close()
df.unit_test_len = fsize
return df
def test_keep_cache(self):
df = self._get_open_disk_file(fsize=65)
with mock.patch("swift.obj.diskfile.drop_buffer_cache") as foo:
for _ in df.reader():
pass
self.assertTrue(foo.called)
df = self._get_open_disk_file(fsize=65)
with mock.patch("swift.obj.diskfile.drop_buffer_cache") as bar:
for _ in df.reader(keep_cache=False):
pass
self.assertTrue(bar.called)
df = self._get_open_disk_file(fsize=65)
with mock.patch("swift.obj.diskfile.drop_buffer_cache") as boo:
for _ in df.reader(keep_cache=True):
pass
self.assertFalse(boo.called)
df = self._get_open_disk_file(fsize=50 * 1024, csize=256)
with mock.patch("swift.obj.diskfile.drop_buffer_cache") as goo:
for _ in df.reader(keep_cache=True):
pass
self.assertTrue(goo.called)
def test_quarantine_valids(self):
def verify(*args, **kwargs):
try:
df = self._get_open_disk_file(**kwargs)
reader = df.reader()
for chunk in reader:
pass
except DiskFileQuarantined:
self.fail(
"Unexpected quarantining occurred: args=%r, kwargs=%r" % (
args, kwargs))
else:
pass
verify(obj_name='1')
verify(obj_name='2', csize=1)
verify(obj_name='3', csize=100000)
def run_quarantine_invalids(self, invalid_type):
open_exc = invalid_type in ('Content-Length', 'Bad-Content-Length',
'Subtly-Corrupt-Xattrs',
'Corrupt-Xattrs', 'Truncated-Xattrs',
'Missing-Name', 'Bad-X-Delete-At')
open_collision = invalid_type == 'Bad-Name'
def verify(*args, **kwargs):
quarantine_msgs = []
try:
df = self._get_open_disk_file(**kwargs)
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
except DiskFileQuarantined as err:
if not open_exc:
self.fail(
"Unexpected DiskFileQuarantine raised: %r" % err)
return
except DiskFileCollision as err:
if not open_collision:
self.fail(
"Unexpected DiskFileCollision raised: %r" % err)
return
else:
if open_exc:
self.fail("Expected DiskFileQuarantine exception")
try:
for chunk in reader:
pass
except DiskFileQuarantined as err:
self.fail("Unexpected DiskFileQuarantine raised: :%r" % err)
else:
if not open_exc:
self.assertEqual(1, len(quarantine_msgs))
verify(invalid_type=invalid_type, obj_name='1')
verify(invalid_type=invalid_type, obj_name='2', csize=1)
verify(invalid_type=invalid_type, obj_name='3', csize=100000)
verify(invalid_type=invalid_type, obj_name='4')
def verify_air(params, start=0, adjustment=0):
"""verify (a)pp (i)ter (r)ange"""
try:
df = self._get_open_disk_file(**params)
reader = df.reader()
except DiskFileQuarantined as err:
if not open_exc:
self.fail(
"Unexpected DiskFileQuarantine raised: %r" % err)
return
except DiskFileCollision as err:
if not open_collision:
self.fail(
"Unexpected DiskFileCollision raised: %r" % err)
return
else:
if open_exc:
self.fail("Expected DiskFileQuarantine exception")
try:
for chunk in reader.app_iter_range(
start,
df.unit_test_len + adjustment):
pass
except DiskFileQuarantined as err:
self.fail("Unexpected DiskFileQuarantine raised: :%r" % err)
verify_air(dict(invalid_type=invalid_type, obj_name='5'))
verify_air(dict(invalid_type=invalid_type, obj_name='6'), 0, 100)
verify_air(dict(invalid_type=invalid_type, obj_name='7'), 1)
verify_air(dict(invalid_type=invalid_type, obj_name='8'), 0, -1)
verify_air(dict(invalid_type=invalid_type, obj_name='8'), 1, 1)
def test_quarantine_corrupt_xattrs(self):
self.run_quarantine_invalids('Corrupt-Xattrs')
def test_quarantine_subtly_corrupt_xattrs(self):
# xattrs that unpickle without error, but whose checksum does not
# match
self.run_quarantine_invalids('Subtly-Corrupt-Xattrs')
def test_quarantine_truncated_xattrs(self):
self.run_quarantine_invalids('Truncated-Xattrs')
def test_quarantine_invalid_etag(self):
self.run_quarantine_invalids('ETag')
def test_quarantine_invalid_missing_name(self):
self.run_quarantine_invalids('Missing-Name')
def test_quarantine_invalid_bad_name(self):
self.run_quarantine_invalids('Bad-Name')
def test_quarantine_invalid_bad_x_delete_at(self):
self.run_quarantine_invalids('Bad-X-Delete-At')
def test_quarantine_invalid_content_length(self):
self.run_quarantine_invalids('Content-Length')
def test_quarantine_invalid_content_length_bad(self):
self.run_quarantine_invalids('Bad-Content-Length')
def test_quarantine_invalid_zero_byte(self):
self.run_quarantine_invalids('Zero-Byte')
def test_quarantine_deleted_files(self):
try:
self._get_open_disk_file(invalid_type='Content-Length')
except DiskFileQuarantined:
pass
else:
self.fail("Expected DiskFileQuarantined exception")
try:
self._get_open_disk_file(invalid_type='Content-Length',
mark_deleted=True)
except DiskFileQuarantined as err:
self.fail("Unexpected DiskFileQuarantined exception"
" encountered: %r" % err)
except DiskFileNotExist:
pass
else:
self.fail("Expected DiskFileNotExist exception")
try:
self._get_open_disk_file(invalid_type='Content-Length',
mark_deleted=True)
except DiskFileNotExist:
pass
else:
self.fail("Expected DiskFileNotExist exception")
def test_quarantine_missing_content_length(self):
self.assertRaises(
DiskFileQuarantined,
self._get_open_disk_file,
invalid_type='Missing-Content-Length')
def test_quarantine_bad_content_length(self):
self.assertRaises(
DiskFileQuarantined,
self._get_open_disk_file,
invalid_type='Bad-Content-Length')
def test_quarantine_fstat_oserror(self):
with mock.patch('os.fstat', side_effect=OSError()):
self.assertRaises(
DiskFileQuarantined,
self._get_open_disk_file)
def test_quarantine_hashdir_not_a_directory(self):
df, df_data = self._create_test_file(b'1234567890', account="abc",
container='123', obj='xyz')
hashdir = df._datadir
rmtree(hashdir)
with open(hashdir, 'w'):
pass
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc', '123',
'xyz', policy=POLICIES.legacy)
self.assertRaises(DiskFileQuarantined, df.open)
# make sure the right thing got quarantined; the suffix dir should not
# have moved, as that could have many objects in it
self.assertFalse(os.path.exists(hashdir))
self.assertTrue(os.path.exists(os.path.dirname(hashdir)))
def test_create_prealloc(self):
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc', '123',
'xyz', policy=POLICIES.legacy)
with mock.patch("swift.obj.diskfile.fallocate") as fa:
with df.create(size=200) as writer:
used_fd = writer._fd
fa.assert_called_with(used_fd, 200)
def test_create_prealloc_oserror(self):
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc', '123',
'xyz', policy=POLICIES.legacy)
for e in (errno.ENOSPC, errno.EDQUOT):
with mock.patch("swift.obj.diskfile.fallocate",
mock.MagicMock(side_effect=OSError(
e, os.strerror(e)))):
try:
with df.create(size=200):
pass
except DiskFileNoSpace:
pass
else:
self.fail("Expected exception DiskFileNoSpace")
# Other OSErrors must not be raised as DiskFileNoSpace
with mock.patch("swift.obj.diskfile.fallocate",
mock.MagicMock(side_effect=OSError(
errno.EACCES, os.strerror(errno.EACCES)))):
try:
with df.create(size=200):
pass
except OSError:
pass
else:
self.fail("Expected exception OSError")
def test_create_mkstemp_no_space(self):
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc', '123',
'xyz', policy=POLICIES.legacy)
df.manager.use_linkat = False
for e in (errno.ENOSPC, errno.EDQUOT):
with mock.patch("swift.obj.diskfile.mkstemp",
mock.MagicMock(side_effect=OSError(
e, os.strerror(e)))):
with self.assertRaises(DiskFileNoSpace):
with df.create(size=200):
pass
# Other OSErrors must not be raised as DiskFileNoSpace
with mock.patch("swift.obj.diskfile.mkstemp",
mock.MagicMock(side_effect=OSError(
errno.EACCES, os.strerror(errno.EACCES)))):
with self.assertRaises(OSError) as raised:
with df.create(size=200):
pass
self.assertEqual(raised.exception.errno, errno.EACCES)
def test_create_close_oserror(self):
# This is a horrible hack so you can run this test in isolation.
# Some of the ctypes machinery calls os.close(), and that runs afoul
# of our mock.
with mock.patch.object(utils, '_sys_fallocate', None):
utils.disable_fallocate()
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc',
'123', 'xyz', policy=POLICIES.legacy)
with mock.patch("swift.obj.diskfile.os.close",
mock.MagicMock(side_effect=OSError(
errno.EACCES, os.strerror(errno.EACCES)))):
with df.create(size=200):
pass
def test_write_metadata(self):
df, df_data = self._create_test_file(b'1234567890')
file_count = len(os.listdir(df._datadir))
timestamp = Timestamp.now().internal
metadata = {'X-Timestamp': timestamp, 'X-Object-Meta-test': 'data'}
df.write_metadata(metadata)
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), file_count + 1)
exp_name = '%s.meta' % timestamp
self.assertIn(exp_name, set(dl))
def test_write_metadata_with_content_type(self):
# if metadata has content-type then its time should be in file name
df, df_data = self._create_test_file(b'1234567890')
file_count = len(os.listdir(df._datadir))
timestamp = Timestamp.now()
metadata = {'X-Timestamp': timestamp.internal,
'X-Object-Meta-test': 'data',
'Content-Type': 'foo',
'Content-Type-Timestamp': timestamp.internal}
df.write_metadata(metadata)
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), file_count + 1)
exp_name = '%s+0.meta' % timestamp.internal
self.assertTrue(exp_name in set(dl),
'Expected file %s not found in %s' % (exp_name, dl))
def test_write_metadata_with_older_content_type(self):
# if metadata has content-type then its time should be in file name
ts_iter = make_timestamp_iter()
df, df_data = self._create_test_file(b'1234567890',
timestamp=next(ts_iter))
file_count = len(os.listdir(df._datadir))
timestamp = next(ts_iter)
timestamp2 = next(ts_iter)
metadata = {'X-Timestamp': timestamp2.internal,
'X-Object-Meta-test': 'data',
'Content-Type': 'foo',
'Content-Type-Timestamp': timestamp.internal}
df.write_metadata(metadata)
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), file_count + 1, dl)
exp_name = '%s-%x.meta' % (timestamp2.internal,
timestamp2.raw - timestamp.raw)
self.assertTrue(exp_name in set(dl),
'Expected file %s not found in %s' % (exp_name, dl))
def test_write_metadata_with_content_type_removes_same_time_meta(self):
# a meta file without content-type should be cleaned up in favour of
# a meta file at same time with content-type
ts_iter = make_timestamp_iter()
df, df_data = self._create_test_file(b'1234567890',
timestamp=next(ts_iter))
file_count = len(os.listdir(df._datadir))
timestamp = next(ts_iter)
timestamp2 = next(ts_iter)
metadata = {'X-Timestamp': timestamp2.internal,
'X-Object-Meta-test': 'data'}
df.write_metadata(metadata)
metadata = {'X-Timestamp': timestamp2.internal,
'X-Object-Meta-test': 'data',
'Content-Type': 'foo',
'Content-Type-Timestamp': timestamp.internal}
df.write_metadata(metadata)
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), file_count + 1, dl)
exp_name = '%s-%x.meta' % (timestamp2.internal,
timestamp2.raw - timestamp.raw)
self.assertTrue(exp_name in set(dl),
'Expected file %s not found in %s' % (exp_name, dl))
def test_write_metadata_with_content_type_removes_multiple_metas(self):
# a combination of a meta file without content-type and an older meta
# file with content-type should be cleaned up in favour of a meta file
# at newer time with content-type
ts_iter = make_timestamp_iter()
df, df_data = self._create_test_file(b'1234567890',
timestamp=next(ts_iter))
file_count = len(os.listdir(df._datadir))
timestamp = next(ts_iter)
timestamp2 = next(ts_iter)
metadata = {'X-Timestamp': timestamp2.internal,
'X-Object-Meta-test': 'data'}
df.write_metadata(metadata)
metadata = {'X-Timestamp': timestamp.internal,
'X-Object-Meta-test': 'data',
'Content-Type': 'foo',
'Content-Type-Timestamp': timestamp.internal}
df.write_metadata(metadata)
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), file_count + 2, dl)
metadata = {'X-Timestamp': timestamp2.internal,
'X-Object-Meta-test': 'data',
'Content-Type': 'foo',
'Content-Type-Timestamp': timestamp.internal}
df.write_metadata(metadata)
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), file_count + 1, dl)
exp_name = '%s-%x.meta' % (timestamp2.internal,
timestamp2.raw - timestamp.raw)
self.assertTrue(exp_name in set(dl),
'Expected file %s not found in %s' % (exp_name, dl))
def test_write_metadata_no_xattr(self):
timestamp = Timestamp.now().internal
metadata = {'X-Timestamp': timestamp, 'X-Object-Meta-test': 'data'}
def mock_setxattr(*args, **kargs):
error_num = errno.ENOTSUP if hasattr(errno, 'ENOTSUP') else \
errno.EOPNOTSUPP
raise IOError(error_num, "Operation not supported")
with mock.patch('xattr.setxattr', mock_setxattr):
self.assertRaises(
DiskFileXattrNotSupported,
diskfile.write_metadata, 'n/a', metadata)
def test_write_metadata_disk_full(self):
timestamp = Timestamp.now().internal
metadata = {'X-Timestamp': timestamp, 'X-Object-Meta-test': 'data'}
def mock_setxattr_ENOSPC(*args, **kargs):
raise IOError(errno.ENOSPC, "No space left on device")
def mock_setxattr_EDQUOT(*args, **kargs):
raise IOError(errno.EDQUOT, "Exceeded quota")
with mock.patch('xattr.setxattr', mock_setxattr_ENOSPC):
self.assertRaises(
DiskFileNoSpace,
diskfile.write_metadata, 'n/a', metadata)
with mock.patch('xattr.setxattr', mock_setxattr_EDQUOT):
self.assertRaises(
DiskFileNoSpace,
diskfile.write_metadata, 'n/a', metadata)
def _create_diskfile_dir(self, timestamp, policy, legacy_durable=False,
partition=0, next_part_power=None,
expect_error=False):
timestamp = Timestamp(timestamp)
df = self._simple_get_diskfile(account='a', container='c',
obj='o_%s' % policy,
policy=policy,
partition=partition,
next_part_power=next_part_power)
frag_index = None
if policy.policy_type == EC_POLICY:
frag_index = df._frag_index or 7
if expect_error:
with self.assertRaises(Exception):
write_diskfile(df, timestamp, frag_index=frag_index,
legacy_durable=legacy_durable)
else:
write_diskfile(df, timestamp, frag_index=frag_index,
legacy_durable=legacy_durable)
return df._datadir
def test_commit(self):
for policy in POLICIES:
timestamp = Timestamp.now()
df = self._simple_get_diskfile(account='a', container='c',
obj='o_%s' % policy,
policy=policy)
write_diskfile(df, timestamp, frag_index=2)
dl = os.listdir(df._datadir)
expected = [_make_datafilename(
timestamp, policy, frag_index=2, durable=True)]
self.assertEqual(len(dl), len(expected),
'Unexpected dir listing %s' % dl)
self.assertEqual(expected, dl)
if policy.policy_type == EC_POLICY:
self.assertEqual(2, df._frag_index)
def _do_test_write_cleanup(self, policy, legacy_durable=False):
# create first fileset as starting state
timestamp_1 = Timestamp.now()
datadir_1 = self._create_diskfile_dir(
timestamp_1, policy, legacy_durable)
# second write should clean up first fileset
timestamp_2 = Timestamp(time() + 1)
datadir_2 = self._create_diskfile_dir(timestamp_2, policy)
# sanity check
self.assertEqual(datadir_1, datadir_2)
dl = os.listdir(datadir_2)
expected = [_make_datafilename(
timestamp_2, policy, frag_index=2, durable=True)]
self.assertEqual(len(dl), len(expected),
'Unexpected dir listing %s' % dl)
self.assertEqual(expected, dl)
def test_write_cleanup(self):
for policy in POLICIES:
self._do_test_write_cleanup(policy)
def test_write_cleanup_legacy_durable(self):
for policy in POLICIES:
self._do_test_write_cleanup(policy, legacy_durable=True)
@mock.patch("swift.obj.diskfile.BaseDiskFileManager.cleanup_ondisk_files")
def test_write_cleanup_part_power_increase(self, mock_cleanup):
# Without next_part_power set we expect only one cleanup per DiskFile
# and no linking
for policy in POLICIES:
timestamp = Timestamp(time()).internal
df_dir = self._create_diskfile_dir(timestamp, policy)
self.assertEqual(1, mock_cleanup.call_count)
mock_cleanup.assert_called_once_with(df_dir)
mock_cleanup.reset_mock()
# With next_part_power set to part_power + 1 we expect two cleanups per
# DiskFile: first cleanup the current directory, but also cleanup the
# future directory where hardlinks are created
for policy in POLICIES:
timestamp = Timestamp(time()).internal
df_dir = self._create_diskfile_dir(
timestamp, policy, next_part_power=11)
self.assertEqual(2, mock_cleanup.call_count)
mock_cleanup.assert_any_call(df_dir)
# Make sure the translated path is also cleaned up
expected_dir = utils.replace_partition_in_path(
self.conf['devices'], df_dir, 11)
mock_cleanup.assert_any_call(expected_dir)
mock_cleanup.reset_mock()
# With next_part_power set to part_power we expect two cleanups per
# DiskFile: first cleanup the current directory, but also cleanup the
# previous old directory
for policy in POLICIES:
hash_path = utils.hash_path('a', 'c', 'o_%s' % policy)
partition = utils.get_partition_for_hash(hash_path, 10)
timestamp = Timestamp(time()).internal
df_dir = self._create_diskfile_dir(
timestamp, policy, partition=partition, next_part_power=10)
self.assertEqual(2, mock_cleanup.call_count)
mock_cleanup.assert_any_call(df_dir)
# Make sure the path using the old part power is also cleaned up
expected_dir = utils.replace_partition_in_path(
self.conf['devices'], df_dir, 9)
mock_cleanup.assert_any_call(expected_dir)
mock_cleanup.reset_mock()
@mock.patch.object(diskfile.BaseDiskFileManager, 'cleanup_ondisk_files',
side_effect=Exception)
def test_killed_before_cleanup(self, mock_cleanup):
for policy in POLICIES:
timestamp = Timestamp(time()).internal
hash_path = utils.hash_path('a', 'c', 'o_%s' % policy)
partition = utils.get_partition_for_hash(hash_path, 10)
df_dir = self._create_diskfile_dir(timestamp, policy,
partition=partition,
next_part_power=11,
expect_error=True)
expected_dir = utils.replace_partition_in_path(
self.conf['devices'], df_dir, 11)
self.assertEqual(os.listdir(df_dir), os.listdir(expected_dir))
def test_commit_fsync(self):
for policy in POLICIES:
df = self._simple_get_diskfile(account='a', container='c',
obj='o', policy=policy)
timestamp = Timestamp.now()
with df.create() as writer:
metadata = {
'ETag': 'bogus_etag',
'X-Timestamp': timestamp.internal,
'Content-Length': '0',
}
with mock.patch('swift.obj.diskfile.fsync') as mock_fsync:
writer.put(metadata)
self.assertEqual(1, mock_fsync.call_count)
writer.commit(timestamp)
self.assertEqual(1, mock_fsync.call_count)
if policy.policy_type == EC_POLICY:
self.assertIsInstance(mock_fsync.call_args[0][0], int)
def test_commit_ignores_cleanup_ondisk_files_error(self):
for policy in POLICIES:
# Check OSError from cleanup_ondisk_files is caught and ignored
mock_cleanup = mock.MagicMock(side_effect=OSError)
df = self._simple_get_diskfile(account='a', container='c',
obj='o_error', policy=policy)
timestamp = Timestamp.now()
with df.create() as writer:
metadata = {
'ETag': 'bogus_etag',
'X-Timestamp': timestamp.internal,
'Content-Length': '0',
}
writer.put(metadata)
with mock.patch(self._manager_mock(
'cleanup_ondisk_files', df), mock_cleanup):
writer.commit(timestamp)
expected = {
EC_POLICY: 1,
REPL_POLICY: 0,
}[policy.policy_type]
self.assertEqual(expected, mock_cleanup.call_count)
if expected:
self.assertIn(
'Problem cleaning up',
df.manager.logger.get_lines_for_level('error')[0])
expected = [_make_datafilename(
timestamp, policy, frag_index=2, durable=True)]
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), len(expected),
'Unexpected dir listing %s' % dl)
self.assertEqual(expected, dl)
def test_number_calls_to_cleanup_ondisk_files_during_create(self):
# Check how many calls are made to cleanup_ondisk_files, and when,
# during put(), commit() sequence
for policy in POLICIES:
expected = {
EC_POLICY: (0, 1),
REPL_POLICY: (1, 0),
}[policy.policy_type]
df = self._simple_get_diskfile(account='a', container='c',
obj='o_error', policy=policy)
timestamp = Timestamp.now()
with df.create() as writer:
metadata = {
'ETag': 'bogus_etag',
'X-Timestamp': timestamp.internal,
'Content-Length': '0',
}
with mock.patch(self._manager_mock(
'cleanup_ondisk_files', df)) as mock_cleanup:
writer.put(metadata)
self.assertEqual(expected[0], mock_cleanup.call_count)
with mock.patch(self._manager_mock(
'cleanup_ondisk_files', df)) as mock_cleanup:
writer.commit(timestamp)
self.assertEqual(expected[1], mock_cleanup.call_count)
def test_number_calls_to_cleanup_ondisk_files_during_delete(self):
# Check how many calls are made to cleanup_ondisk_files, and when,
# for delete() and necessary prerequisite steps
for policy in POLICIES:
expected = {
EC_POLICY: (0, 1, 1),
REPL_POLICY: (1, 0, 1),
}[policy.policy_type]
df = self._simple_get_diskfile(account='a', container='c',
obj='o_error', policy=policy)
timestamp = Timestamp.now()
with df.create() as writer:
metadata = {
'ETag': 'bogus_etag',
'X-Timestamp': timestamp.internal,
'Content-Length': '0',
}
with mock.patch(self._manager_mock(
'cleanup_ondisk_files', df)) as mock_cleanup:
writer.put(metadata)
self.assertEqual(expected[0], mock_cleanup.call_count)
with mock.patch(self._manager_mock(
'cleanup_ondisk_files', df)) as mock_cleanup:
writer.commit(timestamp)
self.assertEqual(expected[1], mock_cleanup.call_count)
with mock.patch(self._manager_mock(
'cleanup_ondisk_files', df)) as mock_cleanup:
timestamp = Timestamp.now()
df.delete(timestamp)
self.assertEqual(expected[2], mock_cleanup.call_count)
def test_delete(self):
for policy in POLICIES:
if policy.policy_type == EC_POLICY:
metadata = {'X-Object-Sysmeta-Ec-Frag-Index': '1'}
fi = 1
else:
metadata = {}
fi = None
df = self._get_open_disk_file(policy=policy, frag_index=fi,
extra_metadata=metadata)
ts = Timestamp.now()
df.delete(ts)
exp_name = '%s.ts' % ts.internal
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), 1)
self.assertIn(exp_name, set(dl))
# cleanup before next policy
os.unlink(os.path.join(df._datadir, exp_name))
def test_open_deleted(self):
df = self._get_open_disk_file()
ts = time()
df.delete(ts)
exp_name = '%s.ts' % str(Timestamp(ts).internal)
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), 1)
self.assertIn(exp_name, set(dl))
df = self._simple_get_diskfile()
self.assertRaises(DiskFileDeleted, df.open)
def test_open_deleted_with_corrupt_tombstone(self):
df = self._get_open_disk_file()
ts = time()
df.delete(ts)
exp_name = '%s.ts' % str(Timestamp(ts).internal)
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), 1)
self.assertIn(exp_name, set(dl))
# it's pickle-format, so removing the last byte is sufficient to
# corrupt it
ts_fullpath = os.path.join(df._datadir, exp_name)
self.assertTrue(os.path.exists(ts_fullpath)) # sanity check
meta_xattr = xattr.getxattr(ts_fullpath, "user.swift.metadata")
xattr.setxattr(ts_fullpath, "user.swift.metadata", meta_xattr[:-1])
df = self._simple_get_diskfile()
self.assertRaises(DiskFileNotExist, df.open)
self.assertFalse(os.path.exists(ts_fullpath))
def test_from_audit_location(self):
df, df_data = self._create_test_file(
b'blah blah',
account='three', container='blind', obj='mice')
hashdir = df._datadir
df = self.df_mgr.get_diskfile_from_audit_location(
diskfile.AuditLocation(hashdir, self.existing_device, '0',
policy=POLICIES.default))
df.open()
self.assertEqual(df._name, '/three/blind/mice')
def test_from_audit_location_with_mismatched_hash(self):
df, df_data = self._create_test_file(
b'blah blah',
account='this', container='is', obj='right')
hashdir = df._datadir
datafilename = [f for f in os.listdir(hashdir)
if f.endswith('.data')][0]
datafile = os.path.join(hashdir, datafilename)
meta = diskfile.read_metadata(datafile)
meta['name'] = '/this/is/wrong'
diskfile.write_metadata(datafile, meta)
df = self.df_mgr.get_diskfile_from_audit_location(
diskfile.AuditLocation(hashdir, self.existing_device, '0',
policy=POLICIES.default))
self.assertRaises(DiskFileQuarantined, df.open)
def test_close_error(self):
def mock_handle_close_quarantine():
raise Exception("Bad")
df = self._get_open_disk_file(fsize=1024 * 1024 * 2, csize=1024)
reader = df.reader()
reader._handle_close_quarantine = mock_handle_close_quarantine
for chunk in reader:
pass
# close is called at the end of the iterator
self.assertIsNone(reader._fp)
error_lines = df._logger.get_lines_for_level('error')
self.assertEqual(len(error_lines), 1)
self.assertIn('close failure', error_lines[0])
self.assertIn('Bad', error_lines[0])
def test_mount_checking(self):
def _mock_cm(*args, **kwargs):
return False
with mock.patch("swift.common.constraints.check_mount", _mock_cm):
self.assertRaises(
DiskFileDeviceUnavailable,
self._get_open_disk_file,
mount_check=True)
def test_ondisk_search_loop_ts_meta_data(self):
df = self._simple_get_diskfile()
self._create_ondisk_file(df, b'', ext='.ts', timestamp=10)
self._create_ondisk_file(df, b'', ext='.ts', timestamp=9)
self._create_ondisk_file(df, b'', ext='.meta', timestamp=8)
self._create_ondisk_file(df, b'', ext='.meta', timestamp=7)
self._create_ondisk_file(df, b'B', ext='.data', timestamp=6)
self._create_ondisk_file(df, b'A', ext='.data', timestamp=5)
df = self._simple_get_diskfile()
with self.assertRaises(DiskFileDeleted) as raised:
df.open()
self.assertEqual(raised.exception.timestamp, Timestamp(10).internal)
def test_ondisk_search_loop_meta_ts_data(self):
df = self._simple_get_diskfile()
self._create_ondisk_file(df, b'', ext='.meta', timestamp=10)
self._create_ondisk_file(df, b'', ext='.meta', timestamp=9)
self._create_ondisk_file(df, b'', ext='.ts', timestamp=8)
self._create_ondisk_file(df, b'', ext='.ts', timestamp=7)
self._create_ondisk_file(df, b'B', ext='.data', timestamp=6)
self._create_ondisk_file(df, b'A', ext='.data', timestamp=5)
df = self._simple_get_diskfile()
with self.assertRaises(DiskFileDeleted) as raised:
df.open()
self.assertEqual(raised.exception.timestamp, Timestamp(8).internal)
def _test_ondisk_search_loop_meta_data_ts(self, legacy_durable=False):
df = self._simple_get_diskfile()
self._create_ondisk_file(df, b'', ext='.meta', timestamp=10)
self._create_ondisk_file(df, b'', ext='.meta', timestamp=9)
self._create_ondisk_file(
df, b'B', ext='.data', legacy_durable=legacy_durable, timestamp=8)
self._create_ondisk_file(
df, b'A', ext='.data', legacy_durable=legacy_durable, timestamp=7)
self._create_ondisk_file(df, b'', ext='.ts', timestamp=6)
self._create_ondisk_file(df, b'', ext='.ts', timestamp=5)
df = self._simple_get_diskfile()
with df.open():
self.assertIn('X-Timestamp', df._metadata)
self.assertEqual(df._metadata['X-Timestamp'],
Timestamp(10).internal)
self.assertNotIn('deleted', df._metadata)
def test_ondisk_search_loop_meta_data_ts(self):
self._test_ondisk_search_loop_meta_data_ts()
def test_ondisk_search_loop_meta_data_ts_legacy_durable(self):
self._test_ondisk_search_loop_meta_data_ts(legacy_durable=True)
def _test_ondisk_search_loop_multiple_meta_data(self,
legacy_durable=False):
df = self._simple_get_diskfile()
self._create_ondisk_file(df, b'', ext='.meta', timestamp=10,
metadata={'X-Object-Meta-User': 'user-meta'})
self._create_ondisk_file(df, b'', ext='.meta', timestamp=9,
ctype_timestamp=9,
metadata={'Content-Type': 'newest',
'X-Object-Meta-User': 'blah'})
self._create_ondisk_file(
df, b'B', ext='.data', legacy_durable=legacy_durable, timestamp=8,
metadata={'Content-Type': 'newer'})
self._create_ondisk_file(
df, b'A', ext='.data', legacy_durable=legacy_durable, timestamp=7,
metadata={'Content-Type': 'oldest'})
df = self._simple_get_diskfile()
with df.open():
self.assertTrue('X-Timestamp' in df._metadata)
self.assertEqual(df._metadata['X-Timestamp'],
Timestamp(10).internal)
self.assertTrue('Content-Type' in df._metadata)
self.assertEqual(df._metadata['Content-Type'], 'newest')
self.assertTrue('X-Object-Meta-User' in df._metadata)
self.assertEqual(df._metadata['X-Object-Meta-User'], 'user-meta')
def test_ondisk_search_loop_multiple_meta_data(self):
self._test_ondisk_search_loop_multiple_meta_data()
def test_ondisk_search_loop_multiple_meta_data_legacy_durable(self):
self._test_ondisk_search_loop_multiple_meta_data(legacy_durable=True)
def _test_ondisk_search_loop_stale_meta_data(self, legacy_durable=False):
df = self._simple_get_diskfile()
self._create_ondisk_file(df, b'', ext='.meta', timestamp=10,
metadata={'X-Object-Meta-User': 'user-meta'})
self._create_ondisk_file(df, b'', ext='.meta', timestamp=9,
ctype_timestamp=7,
metadata={'Content-Type': 'older',
'X-Object-Meta-User': 'blah'})
self._create_ondisk_file(
df, b'B', ext='.data', legacy_durable=legacy_durable, timestamp=8,
metadata={'Content-Type': 'newer'})
df = self._simple_get_diskfile()
with df.open():
self.assertTrue('X-Timestamp' in df._metadata)
self.assertEqual(df._metadata['X-Timestamp'],
Timestamp(10).internal)
self.assertTrue('Content-Type' in df._metadata)
self.assertEqual(df._metadata['Content-Type'], 'newer')
self.assertTrue('X-Object-Meta-User' in df._metadata)
self.assertEqual(df._metadata['X-Object-Meta-User'], 'user-meta')
def test_ondisk_search_loop_stale_meta_data(self):
self._test_ondisk_search_loop_stale_meta_data()
def test_ondisk_search_loop_stale_meta_data_legacy_durable(self):
self._test_ondisk_search_loop_stale_meta_data(legacy_durable=True)
def _test_ondisk_search_loop_data_ts_meta(self, legacy_durable=False):
df = self._simple_get_diskfile()
self._create_ondisk_file(
df, b'B', ext='.data', legacy_durable=legacy_durable, timestamp=10)
self._create_ondisk_file(
df, b'A', ext='.data', legacy_durable=legacy_durable, timestamp=9)
self._create_ondisk_file(df, b'', ext='.ts', timestamp=8)
self._create_ondisk_file(df, b'', ext='.ts', timestamp=7)
self._create_ondisk_file(df, b'', ext='.meta', timestamp=6)
self._create_ondisk_file(df, b'', ext='.meta', timestamp=5)
df = self._simple_get_diskfile()
with df.open():
self.assertIn('X-Timestamp', df._metadata)
self.assertEqual(df._metadata['X-Timestamp'],
Timestamp(10).internal)
self.assertNotIn('deleted', df._metadata)
def test_ondisk_search_loop_data_ts_meta(self):
self._test_ondisk_search_loop_data_ts_meta()
def test_ondisk_search_loop_data_ts_meta_legacy_durable(self):
self._test_ondisk_search_loop_data_ts_meta(legacy_durable=True)
def _test_ondisk_search_loop_wayward_files_ignored(self,
legacy_durable=False):
df = self._simple_get_diskfile()
self._create_ondisk_file(df, b'X', ext='.bar', timestamp=11)
self._create_ondisk_file(
df, b'B', ext='.data', legacy_durable=legacy_durable, timestamp=10)
self._create_ondisk_file(
df, b'A', ext='.data', legacy_durable=legacy_durable, timestamp=9)
self._create_ondisk_file(df, b'', ext='.ts', timestamp=8)
self._create_ondisk_file(df, b'', ext='.ts', timestamp=7)
self._create_ondisk_file(df, b'', ext='.meta', timestamp=6)
self._create_ondisk_file(df, b'', ext='.meta', timestamp=5)
df = self._simple_get_diskfile()
with df.open():
self.assertIn('X-Timestamp', df._metadata)
self.assertEqual(df._metadata['X-Timestamp'],
Timestamp(10).internal)
self.assertNotIn('deleted', df._metadata)
def test_ondisk_search_loop_wayward_files_ignored(self):
self._test_ondisk_search_loop_wayward_files_ignored()
def test_ondisk_search_loop_wayward_files_ignored_legacy_durable(self):
self._test_ondisk_search_loop_wayward_files_ignored(
legacy_durable=True)
def _test_ondisk_search_loop_listdir_error(self, legacy_durable=False):
df = self._simple_get_diskfile()
def mock_listdir_exp(*args, **kwargs):
raise OSError(errno.EACCES, os.strerror(errno.EACCES))
with mock.patch("os.listdir", mock_listdir_exp):
self._create_ondisk_file(df, b'X', ext='.bar', timestamp=11)
self._create_ondisk_file(df, b'B', ext='.data', timestamp=10,
legacy_durable=legacy_durable)
self._create_ondisk_file(df, b'A', ext='.data', timestamp=9,
legacy_durable=legacy_durable)
self._create_ondisk_file(df, b'', ext='.ts', timestamp=8)
self._create_ondisk_file(df, b'', ext='.ts', timestamp=7)
self._create_ondisk_file(df, b'', ext='.meta', timestamp=6)
self._create_ondisk_file(df, b'', ext='.meta', timestamp=5)
df = self._simple_get_diskfile()
self.assertRaises(DiskFileError, df.open)
def test_ondisk_search_loop_listdir_error(self):
self._test_ondisk_search_loop_listdir_error()
def test_ondisk_search_loop_listdir_error_legacy_durable(self):
self._test_ondisk_search_loop_listdir_error(legacy_durable=True)
def test_exception_in_handle_close_quarantine(self):
df = self._get_open_disk_file()
def blow_up():
raise Exception('a very special error')
reader = df.reader()
reader._handle_close_quarantine = blow_up
for _ in reader:
pass
reader.close()
log_lines = df._logger.get_lines_for_level('error')
self.assertIn('a very special error', log_lines[-1])
def test_diskfile_names(self):
df = self._simple_get_diskfile()
self.assertEqual(df.account, 'a')
self.assertEqual(df.container, 'c')
self.assertEqual(df.obj, 'o')
def test_diskfile_content_length_not_open(self):
df = self._simple_get_diskfile()
exc = None
try:
df.content_length
except DiskFileNotOpen as err:
exc = err
self.assertEqual(str(exc), '')
def test_diskfile_content_length_deleted(self):
df = self._get_open_disk_file()
ts = time()
df.delete(ts)
exp_name = '%s.ts' % str(Timestamp(ts).internal)
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), 1)
self.assertIn(exp_name, set(dl))
df = self._simple_get_diskfile()
exc = None
try:
with df.open():
df.content_length
except DiskFileDeleted as err:
exc = err
self.assertEqual(str(exc), '')
def test_diskfile_content_length(self):
self._get_open_disk_file()
df = self._simple_get_diskfile()
with df.open():
if df.policy.policy_type == EC_POLICY:
expected = df.policy.pyeclib_driver.get_segment_info(
1024, df.policy.ec_segment_size)['fragment_size']
else:
expected = 1024
self.assertEqual(df.content_length, expected)
def test_diskfile_timestamp_not_open(self):
df = self._simple_get_diskfile()
exc = None
try:
df.timestamp
except DiskFileNotOpen as err:
exc = err
self.assertEqual(str(exc), '')
def test_diskfile_timestamp_deleted(self):
df = self._get_open_disk_file()
ts = time()
df.delete(ts)
exp_name = '%s.ts' % str(Timestamp(ts).internal)
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), 1)
self.assertIn(exp_name, set(dl))
df = self._simple_get_diskfile()
exc = None
try:
with df.open():
df.timestamp
except DiskFileDeleted as err:
exc = err
self.assertEqual(str(exc), '')
def test_diskfile_timestamp(self):
ts_1 = self.ts()
self._get_open_disk_file(ts=ts_1.internal)
df = self._simple_get_diskfile()
with df.open():
self.assertEqual(df.timestamp, ts_1.internal)
ts_2 = self.ts()
df.write_metadata({'X-Timestamp': ts_2.internal})
with df.open():
self.assertEqual(df.timestamp, ts_2.internal)
def test_data_timestamp(self):
ts_1 = self.ts()
self._get_open_disk_file(ts=ts_1.internal)
df = self._simple_get_diskfile()
with df.open():
self.assertEqual(df.data_timestamp, ts_1.internal)
ts_2 = self.ts()
df.write_metadata({'X-Timestamp': ts_2.internal})
with df.open():
self.assertEqual(df.data_timestamp, ts_1.internal)
def test_data_timestamp_not_open(self):
df = self._simple_get_diskfile()
with self.assertRaises(DiskFileNotOpen):
df.data_timestamp
def test_content_type_and_timestamp(self):
ts_1 = self.ts()
self._get_open_disk_file(ts=ts_1.internal,
extra_metadata={'Content-Type': 'image/jpeg'})
df = self._simple_get_diskfile()
with df.open():
self.assertEqual(ts_1.internal, df.data_timestamp)
self.assertEqual(ts_1.internal, df.timestamp)
self.assertEqual(ts_1.internal, df.content_type_timestamp)
self.assertEqual('image/jpeg', df.content_type)
ts_2 = self.ts()
ts_3 = self.ts()
df.write_metadata({'X-Timestamp': ts_3.internal,
'Content-Type': 'image/gif',
'Content-Type-Timestamp': ts_2.internal})
with df.open():
self.assertEqual(ts_1.internal, df.data_timestamp)
self.assertEqual(ts_3.internal, df.timestamp)
self.assertEqual(ts_2.internal, df.content_type_timestamp)
self.assertEqual('image/gif', df.content_type)
def test_content_type_timestamp_not_open(self):
df = self._simple_get_diskfile()
with self.assertRaises(DiskFileNotOpen):
df.content_type_timestamp
def test_content_type_not_open(self):
df = self._simple_get_diskfile()
with self.assertRaises(DiskFileNotOpen):
df.content_type
def _do_test_durable_timestamp(self, legacy_durable):
ts_1 = self.ts()
df = self._simple_get_diskfile(frag_index=2)
write_diskfile(df, ts_1, legacy_durable=legacy_durable)
# get a new instance of the diskfile to ensure timestamp variable is
# set by the open() and not just the write operations
df = self._simple_get_diskfile(frag_index=2)
with df.open():
self.assertEqual(df.durable_timestamp, ts_1.internal)
# verify durable timestamp does not change when metadata is written
ts_2 = self.ts()
df.write_metadata({'X-Timestamp': ts_2.internal})
with df.open():
self.assertEqual(df.durable_timestamp, ts_1.internal)
def test_durable_timestamp(self):
self._do_test_durable_timestamp(False)
def test_durable_timestamp_not_open(self):
df = self._simple_get_diskfile()
with self.assertRaises(DiskFileNotOpen):
df.durable_timestamp
def test_durable_timestamp_no_data_file(self):
df = self._get_open_disk_file(self.ts().internal)
for f in os.listdir(df._datadir):
if f.endswith('.data'):
os.unlink(os.path.join(df._datadir, f))
df = self._simple_get_diskfile()
with self.assertRaises(DiskFileNotExist):
df.open()
# open() was attempted, but no data file so expect None
self.assertIsNone(df.durable_timestamp)
def test_error_in_cleanup_ondisk_files(self):
def mock_cleanup(*args, **kwargs):
raise OSError()
df = self._get_open_disk_file()
file_count = len(os.listdir(df._datadir))
ts = time()
with mock.patch(
self._manager_mock('cleanup_ondisk_files'), mock_cleanup):
# Expect to swallow the OSError
df.delete(ts)
exp_name = '%s.ts' % str(Timestamp(ts).internal)
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), file_count + 1)
self.assertIn(exp_name, set(dl))
def _system_can_zero_copy(self):
if not splice.available:
return False
try:
utils.get_md5_socket()
except IOError:
return False
return True
def test_zero_copy_cache_dropping(self):
if not self._system_can_zero_copy():
raise unittest.SkipTest("zero-copy support is missing")
self.conf['splice'] = 'on'
self.conf['keep_cache_size'] = 16384
self.conf['disk_chunk_size'] = 4096
df = self._get_open_disk_file(fsize=163840)
reader = df.reader()
self.assertTrue(reader.can_zero_copy_send())
with mock.patch("swift.obj.diskfile.drop_buffer_cache") as dbc:
with mock.patch("swift.obj.diskfile.DROP_CACHE_WINDOW", 4095):
with open('/dev/null', 'w') as devnull:
reader.zero_copy_send(devnull.fileno())
if df.policy.policy_type == EC_POLICY:
expected = 4 + 1
else:
expected = (4 * 10) + 1
self.assertEqual(len(dbc.mock_calls), expected)
def test_zero_copy_turns_off_when_md5_sockets_not_supported(self):
if not self._system_can_zero_copy():
raise unittest.SkipTest("zero-copy support is missing")
df_mgr = self.df_router[POLICIES.default]
self.conf['splice'] = 'on'
with mock.patch('swift.obj.diskfile.get_md5_socket') as mock_md5sock:
mock_md5sock.side_effect = IOError(
errno.EAFNOSUPPORT, "MD5 socket busted")
df = self._get_open_disk_file(fsize=128)
reader = df.reader()
self.assertFalse(reader.can_zero_copy_send())
log_lines = df_mgr.logger.get_lines_for_level('warning')
self.assertIn('MD5 sockets', log_lines[-1])
def test_tee_to_md5_pipe_length_mismatch(self):
if not self._system_can_zero_copy():
raise unittest.SkipTest("zero-copy support is missing")
self.conf['splice'] = 'on'
df = self._get_open_disk_file(fsize=16385)
reader = df.reader()
self.assertTrue(reader.can_zero_copy_send())
with mock.patch('swift.obj.diskfile.tee') as mock_tee:
mock_tee.side_effect = lambda _1, _2, _3, cnt: cnt - 1
with open('/dev/null', 'w') as devnull:
exc_re = (r'tee\(\) failed: tried to move \d+ bytes, but only '
r'moved -?\d+')
try:
reader.zero_copy_send(devnull.fileno())
except Exception as e:
self.assertTrue(re.match(exc_re, str(e)))
else:
self.fail('Expected Exception was not raised')
def test_splice_to_wsockfd_blocks(self):
if not self._system_can_zero_copy():
raise unittest.SkipTest("zero-copy support is missing")
self.conf['splice'] = 'on'
df = self._get_open_disk_file(fsize=16385)
reader = df.reader()
self.assertTrue(reader.can_zero_copy_send())
def _run_test():
# Set up mock of `splice`
splice_called = [False] # State hack
def fake_splice(fd_in, off_in, fd_out, off_out, len_, flags):
if fd_out == devnull.fileno() and not splice_called[0]:
splice_called[0] = True
err = errno.EWOULDBLOCK
raise IOError(err, os.strerror(err))
return splice(fd_in, off_in, fd_out, off_out,
len_, flags)
mock_splice.side_effect = fake_splice
# Set up mock of `trampoline`
# There are 2 reasons to mock this:
#
# - We want to ensure it's called with the expected arguments at
# least once
# - When called with our write FD (which points to `/dev/null`), we
# can't actually call `trampoline`, because adding such FD to an
# `epoll` handle results in `EPERM`
def fake_trampoline(fd, read=None, write=None, timeout=None,
timeout_exc=timeout.Timeout,
mark_as_closed=None):
if write and fd == devnull.fileno():
return
else:
hubs.trampoline(fd, read=read, write=write,
timeout=timeout, timeout_exc=timeout_exc,
mark_as_closed=mark_as_closed)
mock_trampoline.side_effect = fake_trampoline
reader.zero_copy_send(devnull.fileno())
# Assert the end of `zero_copy_send` was reached
self.assertTrue(mock_close.called)
# Assert there was at least one call to `trampoline` waiting for
# `write` access to the output FD
mock_trampoline.assert_any_call(devnull.fileno(), write=True)
# Assert at least one call to `splice` with the output FD we expect
for call in mock_splice.call_args_list:
args = call[0]
if args[2] == devnull.fileno():
break
else:
self.fail('`splice` not called with expected arguments')
with mock.patch('swift.obj.diskfile.splice') as mock_splice:
with mock.patch.object(
reader, 'close', side_effect=reader.close) as mock_close:
with open('/dev/null', 'w') as devnull:
with mock.patch('swift.obj.diskfile.trampoline') as \
mock_trampoline:
_run_test()
def test_create_unlink_cleanup_DiskFileNoSpace(self):
# Test cleanup when DiskFileNoSpace() is raised.
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc', '123',
'xyz', policy=POLICIES.legacy)
df.manager.use_linkat = False
_m_fallocate = mock.MagicMock(side_effect=OSError(errno.ENOSPC,
os.strerror(errno.ENOSPC)))
_m_unlink = mock.Mock()
with mock.patch("swift.obj.diskfile.fallocate", _m_fallocate):
with mock.patch("os.unlink", _m_unlink):
try:
with df.create(size=100):
pass
except DiskFileNoSpace:
pass
else:
self.fail("Expected exception DiskFileNoSpace")
self.assertTrue(_m_fallocate.called)
self.assertTrue(_m_unlink.called)
self.assertNotIn('error', self.logger.all_log_lines())
def test_create_unlink_cleanup_renamer_fails(self):
# Test cleanup when renamer fails
_m_renamer = mock.MagicMock(side_effect=OSError(errno.ENOENT,
os.strerror(errno.ENOENT)))
_m_unlink = mock.Mock()
df = self._simple_get_diskfile()
df.manager.use_linkat = False
data = b'0' * 100
metadata = {
'ETag': md5(data).hexdigest(),
'X-Timestamp': Timestamp.now().internal,
'Content-Length': str(100),
}
with mock.patch("swift.obj.diskfile.renamer", _m_renamer):
with mock.patch("os.unlink", _m_unlink):
try:
with df.create(size=100) as writer:
writer.write(data)
writer.put(metadata)
except OSError:
pass
else:
self.fail("Expected OSError exception")
self.assertFalse(writer._put_succeeded)
self.assertTrue(_m_renamer.called)
self.assertTrue(_m_unlink.called)
self.assertNotIn('error', self.logger.all_log_lines())
def test_create_unlink_cleanup_logging(self):
# Test logging of os.unlink() failures.
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc', '123',
'xyz', policy=POLICIES.legacy)
df.manager.use_linkat = False
_m_fallocate = mock.MagicMock(side_effect=OSError(errno.ENOSPC,
os.strerror(errno.ENOSPC)))
_m_unlink = mock.MagicMock(side_effect=OSError(errno.ENOENT,
os.strerror(errno.ENOENT)))
with mock.patch("swift.obj.diskfile.fallocate", _m_fallocate):
with mock.patch("os.unlink", _m_unlink):
try:
with df.create(size=100):
pass
except DiskFileNoSpace:
pass
else:
self.fail("Expected exception DiskFileNoSpace")
self.assertTrue(_m_fallocate.called)
self.assertTrue(_m_unlink.called)
error_lines = self.logger.get_lines_for_level('error')
for line in error_lines:
self.assertTrue(line.startswith("Error removing tempfile:"))
@requires_o_tmpfile_support_in_tmp
def test_get_tempfile_use_linkat_os_open_called(self):
df = self._simple_get_diskfile()
self.assertTrue(df.manager.use_linkat)
_m_mkstemp = mock.MagicMock()
_m_os_open = mock.Mock(return_value=12345)
_m_mkc = mock.Mock()
with mock.patch("swift.obj.diskfile.mkstemp", _m_mkstemp):
with mock.patch("swift.obj.diskfile.os.open", _m_os_open):
with mock.patch("swift.obj.diskfile.makedirs_count", _m_mkc):
writer = df.writer()
fd, tmppath = writer._get_tempfile()
self.assertTrue(_m_mkc.called)
flags = O_TMPFILE | os.O_WRONLY
_m_os_open.assert_called_once_with(df._datadir, flags)
self.assertIsNone(tmppath)
self.assertEqual(fd, 12345)
self.assertFalse(_m_mkstemp.called)
@requires_o_tmpfile_support_in_tmp
def test_get_tempfile_fallback_to_mkstemp(self):
df = self._simple_get_diskfile()
df._logger = debug_logger()
self.assertTrue(df.manager.use_linkat)
for err in (errno.EOPNOTSUPP, errno.EISDIR, errno.EINVAL):
df.manager.use_linkat = True
_m_open = mock.Mock(side_effect=OSError(err, os.strerror(err)))
_m_mkstemp = mock.MagicMock(return_value=(0, "blah"))
_m_mkc = mock.Mock()
with mock.patch("swift.obj.diskfile.os.open", _m_open):
with mock.patch("swift.obj.diskfile.mkstemp", _m_mkstemp):
with mock.patch("swift.obj.diskfile.makedirs_count",
_m_mkc):
writer = df.writer()
fd, tmppath = writer._get_tempfile()
self.assertTrue(_m_mkc.called)
# Fallback should succeed and mkstemp() should be called.
self.assertTrue(_m_mkstemp.called)
self.assertEqual(tmppath, "blah")
# Once opening file with O_TMPFILE has failed,
# failure is cached to not try again
self.assertFalse(df.manager.use_linkat)
# Now that we try to use O_TMPFILE all the time, log at debug
# instead of warning
log = df.manager.logger.get_lines_for_level('warning')
self.assertFalse(log)
log = df.manager.logger.get_lines_for_level('debug')
self.assertGreater(len(log), 0)
self.assertTrue('O_TMPFILE' in log[-1])
@requires_o_tmpfile_support_in_tmp
def test_get_tmpfile_os_open_other_exceptions_are_raised(self):
df = self._simple_get_diskfile()
_m_open = mock.Mock(side_effect=OSError(errno.ENOSPC,
os.strerror(errno.ENOSPC)))
_m_mkstemp = mock.MagicMock()
_m_mkc = mock.Mock()
with mock.patch("swift.obj.diskfile.os.open", _m_open):
with mock.patch("swift.obj.diskfile.mkstemp", _m_mkstemp):
with mock.patch("swift.obj.diskfile.makedirs_count", _m_mkc):
try:
writer = df.writer()
fd, tmppath = writer._get_tempfile()
except OSError as err:
self.assertEqual(err.errno, errno.ENOSPC)
else:
self.fail("Expecting ENOSPC")
self.assertTrue(_m_mkc.called)
# mkstemp() should not be invoked.
self.assertFalse(_m_mkstemp.called)
@requires_o_tmpfile_support_in_tmp
def test_create_use_linkat_renamer_not_called(self):
df = self._simple_get_diskfile()
data = b'0' * 100
metadata = {
'ETag': md5(data).hexdigest(),
'X-Timestamp': Timestamp.now().internal,
'Content-Length': str(100),
}
_m_renamer = mock.Mock()
with mock.patch("swift.obj.diskfile.renamer", _m_renamer):
with df.create(size=100) as writer:
writer.write(data)
writer.put(metadata)
self.assertTrue(writer._put_succeeded)
self.assertFalse(_m_renamer.called)
@patch_policies(test_policies)
class TestDiskFile(DiskFileMixin, unittest.TestCase):
mgr_cls = diskfile.DiskFileManager
@patch_policies(with_ec_default=True)
class TestECDiskFile(DiskFileMixin, unittest.TestCase):
mgr_cls = diskfile.ECDiskFileManager
def _test_commit_raises_DiskFileError_for_rename_error(self, fake_err):
df = self._simple_get_diskfile(account='a', container='c',
obj='o_rename_err',
policy=POLICIES.default)
timestamp = Timestamp.now()
with df.create() as writer:
metadata = {
'ETag': 'bogus_etag',
'X-Timestamp': timestamp.internal,
'Content-Length': '0',
}
writer.put(metadata)
with mock.patch('swift.obj.diskfile.os.rename',
side_effect=fake_err):
with self.assertRaises(DiskFileError) as cm:
writer.commit(timestamp)
dl = os.listdir(df._datadir)
datafile = _make_datafilename(
timestamp, POLICIES.default, frag_index=2, durable=False)
self.assertEqual([datafile], dl)
return df, cm.exception
def test_commit_raises_DiskFileError_for_rename_ENOSPC_IOError(self):
df, exc = self._test_commit_raises_DiskFileError_for_rename_error(
IOError(errno.ENOSPC, 'ENOSPC'))
self.assertIsInstance(exc, DiskFileNoSpace)
self.assertIn('No space left on device', str(exc))
self.assertIn('No space left on device',
df.manager.logger.get_lines_for_level('error')[0])
self.assertFalse(df.manager.logger.get_lines_for_level('error')[1:])
def test_commit_raises_DiskFileError_for_rename_EDQUOT_IOError(self):
df, exc = self._test_commit_raises_DiskFileError_for_rename_error(
IOError(errno.EDQUOT, 'EDQUOT'))
self.assertIsInstance(exc, DiskFileNoSpace)
self.assertIn('No space left on device', str(exc))
self.assertIn('No space left on device',
df.manager.logger.get_lines_for_level('error')[0])
self.assertFalse(df.manager.logger.get_lines_for_level('error')[1:])
def test_commit_raises_DiskFileError_for_rename_other_IOError(self):
df, exc = self._test_commit_raises_DiskFileError_for_rename_error(
IOError(21, 'Some other IO Error'))
self.assertIn('Problem making data file durable', str(exc))
self.assertIn('Problem making data file durable',
df.manager.logger.get_lines_for_level('error')[0])
self.assertFalse(df.manager.logger.get_lines_for_level('error')[1:])
def test_commit_raises_DiskFileError_for_rename_OSError(self):
df, exc = self._test_commit_raises_DiskFileError_for_rename_error(
OSError(100, 'Some Error'))
self.assertIn('Problem making data file durable', str(exc))
self.assertIn('Problem making data file durable',
df.manager.logger.get_lines_for_level('error')[0])
self.assertFalse(df.manager.logger.get_lines_for_level('error')[1:])
def _test_commit_raises_DiskFileError_for_fsync_dir_errors(self, fake_err):
df = self._simple_get_diskfile(account='a', container='c',
obj='o_fsync_dir_err',
policy=POLICIES.default)
timestamp = Timestamp.now()
with df.create() as writer:
metadata = {
'ETag': 'bogus_etag',
'X-Timestamp': timestamp.internal,
'Content-Length': '0',
}
writer.put(metadata)
with mock.patch('swift.obj.diskfile.fsync_dir',
side_effect=fake_err):
with self.assertRaises(DiskFileError) as cm:
writer.commit(timestamp)
dl = os.listdir(df._datadir)
datafile = _make_datafilename(
timestamp, POLICIES.default, frag_index=2, durable=True)
self.assertEqual([datafile], dl)
self.assertIn('Problem making data file durable', str(cm.exception))
self.assertIn('Problem making data file durable',
df.manager.logger.get_lines_for_level('error')[0])
self.assertFalse(df.manager.logger.get_lines_for_level('error')[1:])
def test_commit_raises_DiskFileError_for_fsync_dir_IOError(self):
self._test_commit_raises_DiskFileError_for_fsync_dir_errors(
IOError(21, 'Some IO Error'))
def test_commit_raises_DiskFileError_for_fsync_dir_OSError(self):
self._test_commit_raises_DiskFileError_for_fsync_dir_errors(
OSError(100, 'Some Error'))
def test_data_file_has_frag_index(self):
policy = POLICIES.default
for good_value in (0, '0', 2, '2', 13, '13'):
# frag_index set by constructor arg
ts = self.ts()
expected = [_make_datafilename(
ts, policy, good_value, durable=True)]
df = self._get_open_disk_file(ts=ts, policy=policy,
frag_index=good_value)
self.assertEqual(expected, sorted(os.listdir(df._datadir)))
# frag index should be added to object sysmeta
actual = df.get_metadata().get('X-Object-Sysmeta-Ec-Frag-Index')
self.assertEqual(int(good_value), int(actual))
# metadata value overrides the constructor arg
ts = self.ts()
expected = [_make_datafilename(
ts, policy, good_value, durable=True)]
meta = {'X-Object-Sysmeta-Ec-Frag-Index': good_value}
df = self._get_open_disk_file(ts=ts, policy=policy,
frag_index='3',
extra_metadata=meta)
self.assertEqual(expected, sorted(os.listdir(df._datadir)))
actual = df.get_metadata().get('X-Object-Sysmeta-Ec-Frag-Index')
self.assertEqual(int(good_value), int(actual))
# metadata value alone is sufficient
ts = self.ts()
expected = [_make_datafilename(
ts, policy, good_value, durable=True)]
meta = {'X-Object-Sysmeta-Ec-Frag-Index': good_value}
df = self._get_open_disk_file(ts=ts, policy=policy,
frag_index=None,
extra_metadata=meta)
self.assertEqual(expected, sorted(os.listdir(df._datadir)))
actual = df.get_metadata().get('X-Object-Sysmeta-Ec-Frag-Index')
self.assertEqual(int(good_value), int(actual))
def test_sysmeta_frag_index_is_immutable(self):
# the X-Object-Sysmeta-Ec-Frag-Index should *only* be set when
# the .data file is written.
policy = POLICIES.default
orig_frag_index = 13
# frag_index set by constructor arg
ts = self.ts()
expected = [_make_datafilename(
ts, policy, frag_index=orig_frag_index, durable=True)]
df = self._get_open_disk_file(ts=ts, policy=policy, obj_name='my_obj',
frag_index=orig_frag_index)
self.assertEqual(expected, sorted(os.listdir(df._datadir)))
# frag index should be added to object sysmeta
actual = df.get_metadata().get('X-Object-Sysmeta-Ec-Frag-Index')
self.assertEqual(int(orig_frag_index), int(actual))
# open the same diskfile with no frag_index passed to constructor
df = self.df_router[policy].get_diskfile(
self.existing_device, 0, 'a', 'c', 'my_obj', policy=policy,
frag_index=None)
df.open()
actual = df.get_metadata().get('X-Object-Sysmeta-Ec-Frag-Index')
self.assertEqual(int(orig_frag_index), int(actual))
# write metadata to a meta file
ts = self.ts()
metadata = {'X-Timestamp': ts.internal,
'X-Object-Meta-Fruit': 'kiwi'}
df.write_metadata(metadata)
# sanity check we did write a meta file
expected.append('%s.meta' % ts.internal)
actual_files = sorted(os.listdir(df._datadir))
self.assertEqual(expected, actual_files)
# open the same diskfile, check frag index is unchanged
df = self.df_router[policy].get_diskfile(
self.existing_device, 0, 'a', 'c', 'my_obj', policy=policy,
frag_index=None)
df.open()
# sanity check we have read the meta file
self.assertEqual(ts, df.get_metadata().get('X-Timestamp'))
self.assertEqual('kiwi', df.get_metadata().get('X-Object-Meta-Fruit'))
# check frag index sysmeta is unchanged
actual = df.get_metadata().get('X-Object-Sysmeta-Ec-Frag-Index')
self.assertEqual(int(orig_frag_index), int(actual))
# attempt to overwrite frag index sysmeta
ts = self.ts()
metadata = {'X-Timestamp': ts.internal,
'X-Object-Sysmeta-Ec-Frag-Index': 99,
'X-Object-Meta-Fruit': 'apple'}
df.write_metadata(metadata)
# open the same diskfile, check frag index is unchanged
df = self.df_router[policy].get_diskfile(
self.existing_device, 0, 'a', 'c', 'my_obj', policy=policy,
frag_index=None)
df.open()
# sanity check we have read the meta file
self.assertEqual(ts, df.get_metadata().get('X-Timestamp'))
self.assertEqual('apple', df.get_metadata().get('X-Object-Meta-Fruit'))
actual = df.get_metadata().get('X-Object-Sysmeta-Ec-Frag-Index')
self.assertEqual(int(orig_frag_index), int(actual))
def test_data_file_errors_bad_frag_index(self):
policy = POLICIES.default
df_mgr = self.df_router[policy]
for bad_value in ('foo', '-2', -2, '3.14', 3.14, '14', 14, '999'):
# check that bad frag_index set by constructor arg raises error
# as soon as diskfile is constructed, before data is written
self.assertRaises(DiskFileError, self._simple_get_diskfile,
policy=policy, frag_index=bad_value)
# bad frag_index set by metadata value
# (drive-by check that it is ok for constructor arg to be None)
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy, frag_index=None)
ts = self.ts()
meta = {'X-Object-Sysmeta-Ec-Frag-Index': bad_value,
'X-Timestamp': ts.internal,
'Content-Length': 0,
'Etag': EMPTY_ETAG,
'Content-Type': 'plain/text'}
with df.create() as writer:
try:
writer.put(meta)
self.fail('Expected DiskFileError for frag_index %s'
% bad_value)
except DiskFileError:
pass
# bad frag_index set by metadata value overrides ok constructor arg
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy, frag_index=2)
ts = self.ts()
meta = {'X-Object-Sysmeta-Ec-Frag-Index': bad_value,
'X-Timestamp': ts.internal,
'Content-Length': 0,
'Etag': EMPTY_ETAG,
'Content-Type': 'plain/text'}
with df.create() as writer:
try:
writer.put(meta)
self.fail('Expected DiskFileError for frag_index %s'
% bad_value)
except DiskFileError:
pass
def test_purge_one_fragment_index(self):
ts = self.ts()
for frag_index in (1, 2):
df = self._simple_get_diskfile(frag_index=frag_index)
write_diskfile(df, ts)
# sanity
self.assertEqual(sorted(os.listdir(df._datadir)), [
ts.internal + '#1#d.data',
ts.internal + '#2#d.data',
])
df.purge(ts, 2)
self.assertEqual(os.listdir(df._datadir), [
ts.internal + '#1#d.data',
])
def test_purge_last_fragment_index(self):
ts = self.ts()
frag_index = 0
df = self._simple_get_diskfile(frag_index=frag_index)
write_diskfile(df, ts)
# sanity
self.assertEqual(os.listdir(df._datadir), [
ts.internal + '#0#d.data',
])
df.purge(ts, frag_index)
self.assertFalse(os.path.exists(df._datadir))
def test_purge_last_fragment_index_legacy_durable(self):
# a legacy durable file doesn't get purged in case another fragment is
# relying on it for durability
ts = self.ts()
frag_index = 0
df = self._simple_get_diskfile(frag_index=frag_index)
write_diskfile(df, ts, legacy_durable=True)
# sanity
self.assertEqual(sorted(os.listdir(df._datadir)), [
ts.internal + '#0.data',
ts.internal + '.durable',
])
df.purge(ts, frag_index)
self.assertEqual(sorted(os.listdir(df._datadir)), [
ts.internal + '.durable',
])
def test_purge_non_existent_fragment_index(self):
ts = self.ts()
frag_index = 7
df = self._simple_get_diskfile(frag_index=frag_index)
write_diskfile(df, ts)
# sanity
self.assertEqual(os.listdir(df._datadir), [
ts.internal + '#7#d.data',
])
df.purge(ts, 3)
# no effect
self.assertEqual(os.listdir(df._datadir), [
ts.internal + '#7#d.data',
])
def test_purge_old_timestamp_frag_index(self):
old_ts = self.ts()
ts = self.ts()
frag_index = 1
df = self._simple_get_diskfile(frag_index=frag_index)
write_diskfile(df, ts)
# sanity
self.assertEqual(os.listdir(df._datadir), [
ts.internal + '#1#d.data',
])
df.purge(old_ts, 1)
# no effect
self.assertEqual(os.listdir(df._datadir), [
ts.internal + '#1#d.data',
])
def test_purge_tombstone(self):
ts = self.ts()
df = self._simple_get_diskfile(frag_index=3)
df.delete(ts)
# sanity
self.assertEqual(sorted(os.listdir(df._datadir)), [
ts.internal + '.ts',
])
df.purge(ts, 3)
self.assertFalse(os.path.exists(df._datadir))
def test_purge_without_frag(self):
ts = self.ts()
df = self._simple_get_diskfile()
df.delete(ts)
# sanity
self.assertEqual(sorted(os.listdir(df._datadir)), [
ts.internal + '.ts',
])
df.purge(ts, None)
self.assertEqual(sorted(os.listdir(df._datadir)), [])
def test_purge_old_tombstone(self):
old_ts = self.ts()
ts = self.ts()
df = self._simple_get_diskfile(frag_index=5)
df.delete(ts)
# sanity
self.assertEqual(sorted(os.listdir(df._datadir)), [
ts.internal + '.ts',
])
df.purge(old_ts, 5)
# no effect
self.assertEqual(sorted(os.listdir(df._datadir)), [
ts.internal + '.ts',
])
def test_purge_already_removed(self):
df = self._simple_get_diskfile(frag_index=6)
df.purge(self.ts(), 6) # no errors
# sanity
os.makedirs(df._datadir)
self.assertEqual(sorted(os.listdir(df._datadir)), [])
df.purge(self.ts(), 6)
# the directory was empty and has been removed
self.assertFalse(os.path.exists(df._datadir))
def _do_test_open_most_recent_durable(self, legacy_durable):
policy = POLICIES.default
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
ts = self.ts()
write_diskfile(df, ts, frag_index=3,
legacy_durable=legacy_durable)
metadata = {
'ETag': md5('test data').hexdigest(),
'X-Timestamp': ts.internal,
'Content-Length': str(len('test data')),
'X-Object-Sysmeta-Ec-Frag-Index': '3',
}
# add some .meta stuff
extra_meta = {
'X-Object-Meta-Foo': 'Bar',
'X-Timestamp': self.ts().internal,
}
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
df.write_metadata(extra_meta)
# sanity
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
metadata.update(extra_meta)
self.assertEqual(metadata, df.read_metadata())
# add a newer datafile
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
ts = self.ts()
write_diskfile(df, ts, frag_index=3, commit=False,
legacy_durable=legacy_durable)
# N.B. don't make it durable
# and we still get the old metadata (same as if no .data!)
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
self.assertEqual(metadata, df.read_metadata())
def test_open_most_recent_durable(self):
self._do_test_open_most_recent_durable(False)
def test_open_most_recent_durable_legacy(self):
self._do_test_open_most_recent_durable(True)
def test_open_most_recent_missing_durable(self):
policy = POLICIES.default
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
self.assertRaises(DiskFileNotExist, df.read_metadata)
# now create a datafile missing durable
ts = self.ts()
write_diskfile(df, ts, frag_index=3, commit=False)
# add some .meta stuff
extra_meta = {
'X-Object-Meta-Foo': 'Bar',
'X-Timestamp': self.ts().internal,
}
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
df.write_metadata(extra_meta)
# we still get the DiskFileNotExist (same as if no .data!)
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy,
frag_index=3)
self.assertRaises(DiskFileNotExist, df.read_metadata)
# sanity, without the frag_index kwarg
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
self.assertRaises(DiskFileNotExist, df.read_metadata)
def test_fragments(self):
ts_1 = self.ts()
self._get_open_disk_file(ts=ts_1.internal, frag_index=0)
df = self._get_open_disk_file(ts=ts_1.internal, frag_index=2)
self.assertEqual(df.fragments, {ts_1: [0, 2]})
# now add a newer datafile for frag index 3 but don't write a
# durable with it (so ignore the error when we try to open)
ts_2 = self.ts()
try:
df = self._get_open_disk_file(ts=ts_2.internal, frag_index=3,
commit=False)
except DiskFileNotExist:
pass
# sanity check: should have 3* .data
files = os.listdir(df._datadir)
self.assertEqual(3, len(files))
with df.open():
self.assertEqual(df.fragments, {ts_1: [0, 2], ts_2: [3]})
def test_fragments_available_when_not_durable(self):
# verify frags available even if open fails e.g. if none are durable
ts_1 = self.ts()
ts_2 = self.ts()
for ts, fi in ((ts_1, 0), (ts_1, 2), (ts_2, 3)):
try:
df = self._get_open_disk_file(
ts=ts, frag_index=fi, commit=False)
except DiskFileNotExist:
pass
df = self._simple_get_diskfile()
# sanity check: should have 3* .data
files = os.listdir(df._datadir)
self.assertEqual(3, len(files))
self.assertRaises(DiskFileNotExist, df.open)
self.assertEqual(df.fragments, {ts_1: [0, 2], ts_2: [3]})
def test_fragments_not_open(self):
df = self._simple_get_diskfile()
self.assertIsNone(df.fragments)
def test_durable_timestamp_when_not_durable(self):
try:
self._get_open_disk_file(self.ts().internal, commit=False)
except DiskFileNotExist:
pass
df = self._simple_get_diskfile()
with self.assertRaises(DiskFileNotExist):
df.open()
# open() was attempted, but no durable file so expect None
self.assertIsNone(df.durable_timestamp)
def test_durable_timestamp_missing_frag_index(self):
ts1 = self.ts()
self._get_open_disk_file(ts=ts1.internal, frag_index=1)
df = self._simple_get_diskfile(frag_index=2)
with self.assertRaises(DiskFileNotExist):
df.open()
# open() was attempted, but no data file for frag index so expect None
self.assertIsNone(df.durable_timestamp)
def test_durable_timestamp_newer_non_durable_data_file(self):
ts1 = self.ts()
self._get_open_disk_file(ts=ts1.internal)
ts2 = self.ts()
try:
self._get_open_disk_file(ts=ts2.internal, commit=False)
except DiskFileNotExist:
pass
df = self._simple_get_diskfile()
# sanity check - two .data files
self.assertEqual(2, len(os.listdir(df._datadir)))
df.open()
self.assertEqual(ts1, df.durable_timestamp)
def test_durable_timestamp_legacy_durable(self):
self._do_test_durable_timestamp(True)
def _test_open_with_fragment_preferences(self, legacy_durable=False):
policy = POLICIES.default
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
ts_1, ts_2, ts_3, ts_4 = (self.ts() for _ in range(4))
# create two durable frags, first with index 0
frag_0_metadata = write_diskfile(df, ts_1, frag_index=0,
legacy_durable=legacy_durable)
# second with index 3
frag_3_metadata = write_diskfile(df, ts_1, frag_index=3,
legacy_durable=legacy_durable)
# sanity check: should have 2 * .data plus possibly a .durable
self.assertEqual(3 if legacy_durable else 2,
len(os.listdir(df._datadir)))
# add some .meta stuff
meta_1_metadata = {
'X-Object-Meta-Foo': 'Bar',
'X-Timestamp': ts_2.internal,
}
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
df.write_metadata(meta_1_metadata)
# sanity check: should have 2 * .data, possibly .durable, .meta
self.assertEqual(4 if legacy_durable else 3,
len(os.listdir(df._datadir)))
# sanity: should get frag index 3
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
expected = dict(frag_3_metadata)
expected.update(meta_1_metadata)
self.assertEqual(expected, df.read_metadata())
# add a newer datafile for frag index 2
# N.B. don't make it durable - skip call to commit()
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
frag_2_metadata = write_diskfile(df, ts_3, frag_index=2, commit=False,
data=b'new test data',
legacy_durable=legacy_durable)
# sanity check: should have 2* .data, possibly .durable, .meta, .data
self.assertEqual(5 if legacy_durable else 4,
len(os.listdir(df._datadir)))
# sanity check: with no frag preferences we get old metadata
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy)
self.assertEqual(expected, df.read_metadata())
self.assertEqual(ts_2.internal, df.timestamp)
self.assertEqual(ts_1.internal, df.data_timestamp)
self.assertEqual(ts_1.internal, df.durable_timestamp)
self.assertEqual({ts_1: [0, 3], ts_3: [2]}, df.fragments)
# with empty frag preferences we get metadata from newer non-durable
# data file
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy, frag_prefs=[])
self.assertEqual(frag_2_metadata, df.read_metadata())
self.assertEqual(ts_3.internal, df.timestamp)
self.assertEqual(ts_3.internal, df.data_timestamp)
self.assertEqual(ts_1.internal, df.durable_timestamp)
self.assertEqual({ts_1: [0, 3], ts_3: [2]}, df.fragments)
# check we didn't destroy any potentially valid data by opening the
# non-durable data file
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy)
self.assertEqual(expected, df.read_metadata())
# now add some newer .meta stuff which should replace older .meta
meta_2_metadata = {
'X-Object-Meta-Foo': 'BarBarBarAnne',
'X-Timestamp': ts_4.internal,
}
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
df.write_metadata(meta_2_metadata)
# sanity check: should have 2 * .data, possibly .durable, .data, .meta
self.assertEqual(5 if legacy_durable else 4,
len(os.listdir(df._datadir)))
# sanity check: with no frag preferences we get newer metadata applied
# to durable data file
expected = dict(frag_3_metadata)
expected.update(meta_2_metadata)
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy)
self.assertEqual(expected, df.read_metadata())
self.assertEqual(ts_4.internal, df.timestamp)
self.assertEqual(ts_1.internal, df.data_timestamp)
self.assertEqual(ts_1.internal, df.durable_timestamp)
self.assertEqual({ts_1: [0, 3], ts_3: [2]}, df.fragments)
# with empty frag preferences we still get metadata from newer .meta
# but applied to non-durable data file
expected = dict(frag_2_metadata)
expected.update(meta_2_metadata)
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy, frag_prefs=[])
self.assertEqual(expected, df.read_metadata())
self.assertEqual(ts_4.internal, df.timestamp)
self.assertEqual(ts_3.internal, df.data_timestamp)
self.assertEqual(ts_1.internal, df.durable_timestamp)
self.assertEqual({ts_1: [0, 3], ts_3: [2]}, df.fragments)
# check we didn't destroy any potentially valid data by opening the
# non-durable data file
expected = dict(frag_3_metadata)
expected.update(meta_2_metadata)
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy)
self.assertEqual(expected, df.read_metadata())
self.assertEqual(ts_4.internal, df.timestamp)
self.assertEqual(ts_1.internal, df.data_timestamp)
self.assertEqual(ts_1.internal, df.durable_timestamp)
self.assertEqual({ts_1: [0, 3], ts_3: [2]}, df.fragments)
# prefer frags at ts_1, exclude no indexes, expect highest frag index
prefs = [{'timestamp': ts_1.internal, 'exclude': []},
{'timestamp': ts_2.internal, 'exclude': []},
{'timestamp': ts_3.internal, 'exclude': []}]
expected = dict(frag_3_metadata)
expected.update(meta_2_metadata)
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy, frag_prefs=prefs)
self.assertEqual(expected, df.read_metadata())
self.assertEqual(ts_4.internal, df.timestamp)
self.assertEqual(ts_1.internal, df.data_timestamp)
self.assertEqual(ts_1.internal, df.durable_timestamp)
self.assertEqual({ts_1: [0, 3], ts_3: [2]}, df.fragments)
# prefer frags at ts_1, exclude frag index 3 so expect frag index 0
prefs = [{'timestamp': ts_1.internal, 'exclude': [3]},
{'timestamp': ts_2.internal, 'exclude': []},
{'timestamp': ts_3.internal, 'exclude': []}]
expected = dict(frag_0_metadata)
expected.update(meta_2_metadata)
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy, frag_prefs=prefs)
self.assertEqual(expected, df.read_metadata())
self.assertEqual(ts_4.internal, df.timestamp)
self.assertEqual(ts_1.internal, df.data_timestamp)
self.assertEqual(ts_1.internal, df.durable_timestamp)
self.assertEqual({ts_1: [0, 3], ts_3: [2]}, df.fragments)
# now make ts_3 the preferred timestamp, excluded indexes don't exist
prefs = [{'timestamp': ts_3.internal, 'exclude': [4, 5, 6]},
{'timestamp': ts_2.internal, 'exclude': []},
{'timestamp': ts_1.internal, 'exclude': []}]
expected = dict(frag_2_metadata)
expected.update(meta_2_metadata)
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy, frag_prefs=prefs)
self.assertEqual(expected, df.read_metadata())
self.assertEqual(ts_4.internal, df.timestamp)
self.assertEqual(ts_3.internal, df.data_timestamp)
self.assertEqual(ts_1.internal, df.durable_timestamp)
self.assertEqual({ts_1: [0, 3], ts_3: [2]}, df.fragments)
# now make ts_2 the preferred timestamp - there are no frags at ts_2,
# next preference is ts_3 but index 2 is excluded, then at ts_1 index 3
# is excluded so we get frag 0 at ts_1
prefs = [{'timestamp': ts_2.internal, 'exclude': [1]},
{'timestamp': ts_3.internal, 'exclude': [2]},
{'timestamp': ts_1.internal, 'exclude': [3]}]
expected = dict(frag_0_metadata)
expected.update(meta_2_metadata)
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy, frag_prefs=prefs)
self.assertEqual(expected, df.read_metadata())
self.assertEqual(ts_4.internal, df.timestamp)
self.assertEqual(ts_1.internal, df.data_timestamp)
self.assertEqual(ts_1.internal, df.durable_timestamp)
self.assertEqual({ts_1: [0, 3], ts_3: [2]}, df.fragments)
def test_open_with_fragment_preferences_legacy_durable(self):
self._test_open_with_fragment_preferences(legacy_durable=True)
def test_open_with_fragment_preferences(self):
self._test_open_with_fragment_preferences(legacy_durable=False)
def test_open_with_bad_fragment_preferences(self):
policy = POLICIES.default
df_mgr = self.df_router[policy]
for bad in (
'ouch',
2,
[{'timestamp': '1234.5678', 'excludes': [1]}, {}],
[{'timestamp': 'not a timestamp', 'excludes': [1, 2]}],
[{'timestamp': '1234.5678', 'excludes': [1, -1]}],
[{'timestamp': '1234.5678', 'excludes': 1}],
[{'timestamp': '1234.5678'}],
[{'excludes': [1, 2]}]
):
try:
df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy, frag_prefs=bad)
self.fail('Expected DiskFileError for bad frag_prefs: %r'
% bad)
except DiskFileError as e:
self.assertIn('frag_prefs', str(e))
def test_disk_file_app_iter_ranges_checks_only_aligned_frag_data(self):
policy = POLICIES.default
frag_size = policy.fragment_size
# make sure there are two fragment size worth of data on disk
data = b'ab' * policy.ec_segment_size
df, df_data = self._create_test_file(data)
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
# each range uses a fresh reader app_iter_range which triggers a disk
# read at the range offset - make sure each of those disk reads will
# fetch an amount of data from disk that is greater than but not equal
# to a fragment size
reader._disk_chunk_size = int(frag_size * 1.5)
with mock.patch.object(
reader._diskfile.policy.pyeclib_driver, 'get_metadata')\
as mock_get_metadata:
it = reader.app_iter_ranges(
[(0, 10), (10, 20),
(frag_size + 20, frag_size + 30)],
'plain/text', '\r\n--someheader\r\n', len(df_data))
value = b''.join(it)
# check that only first range which starts at 0 triggers a frag check
self.assertEqual(1, mock_get_metadata.call_count)
self.assertIn(df_data[:10], value)
self.assertIn(df_data[10:20], value)
self.assertIn(df_data[frag_size + 20:frag_size + 30], value)
self.assertEqual(quarantine_msgs, [])
def test_reader_quarantines_corrupted_ec_archive(self):
# This has same purpose as
# TestAuditor.test_object_audit_checks_EC_fragments just making
# sure that checks happen in DiskFileReader layer.
policy = POLICIES.default
df, df_data = self._create_test_file(b'x' * policy.ec_segment_size,
timestamp=self.ts())
def do_test(corrupted_frag_body, expected_offset, expected_read):
# expected_offset is offset at which corruption should be reported
# expected_read is number of bytes that should be read before the
# exception is raised
ts = self.ts()
write_diskfile(df, ts, corrupted_frag_body)
# at the open for the diskfile, no error occurred
# reading first corrupt frag is sufficient to detect the corruption
df.open()
with self.assertRaises(DiskFileQuarantined) as cm:
reader = df.reader()
reader._disk_chunk_size = int(policy.fragment_size)
bytes_read = 0
for chunk in reader:
bytes_read += len(chunk)
with self.assertRaises(DiskFileNotExist):
df.open()
self.assertEqual(expected_read, bytes_read)
self.assertEqual('Invalid EC metadata at offset 0x%x' %
expected_offset, cm.exception.args[0])
# TODO with liberasurecode < 1.2.0 the EC metadata verification checks
# only the magic number at offset 59 bytes into the frag so we'll
# corrupt up to and including that. Once liberasurecode >= 1.2.0 is
# required we should be able to reduce the corruption length.
corruption_length = 64
# corrupted first frag can be detected
corrupted_frag_body = (b' ' * corruption_length +
df_data[corruption_length:])
do_test(corrupted_frag_body, 0, 0)
# corrupted the second frag can be also detected
corrupted_frag_body = (df_data + b' ' * corruption_length +
df_data[corruption_length:])
do_test(corrupted_frag_body, len(df_data), len(df_data))
# if the second frag is shorter than frag size then corruption is
# detected when the reader is closed
corrupted_frag_body = (df_data + b' ' * corruption_length +
df_data[corruption_length:-10])
do_test(corrupted_frag_body, len(df_data), len(corrupted_frag_body))
def test_reader_ec_exception_causes_quarantine(self):
policy = POLICIES.default
def do_test(exception):
df, df_data = self._create_test_file(b'x' * policy.ec_segment_size,
timestamp=self.ts())
df.manager.logger.clear()
with mock.patch.object(df.policy.pyeclib_driver, 'get_metadata',
side_effect=exception):
df.open()
with self.assertRaises(DiskFileQuarantined) as cm:
for chunk in df.reader():
pass
with self.assertRaises(DiskFileNotExist):
df.open()
self.assertEqual('Invalid EC metadata at offset 0x0',
cm.exception.args[0])
log_lines = df.manager.logger.get_lines_for_level('warning')
self.assertIn('Quarantined object', log_lines[0])
self.assertIn('Invalid EC metadata at offset 0x0', log_lines[0])
do_test(pyeclib.ec_iface.ECInvalidFragmentMetadata('testing'))
do_test(pyeclib.ec_iface.ECBadFragmentChecksum('testing'))
do_test(pyeclib.ec_iface.ECInvalidParameter('testing'))
def test_reader_ec_exception_does_not_cause_quarantine(self):
# ECDriverError should not cause quarantine, only certain subclasses
policy = POLICIES.default
df, df_data = self._create_test_file(b'x' * policy.ec_segment_size,
timestamp=self.ts())
with mock.patch.object(
df.policy.pyeclib_driver, 'get_metadata',
side_effect=pyeclib.ec_iface.ECDriverError('testing')):
df.open()
read_data = b''.join([d for d in df.reader()])
self.assertEqual(df_data, read_data)
log_lines = df.manager.logger.get_lines_for_level('warning')
self.assertIn('Problem checking EC fragment', log_lines[0])
df.open() # not quarantined
def test_reader_frag_check_does_not_quarantine_if_its_not_binary(self):
# This may look weird but for super-safety, check the
# ECDiskFileReader._frag_check doesn't quarantine when non-binary
# type chunk incomming (that would occurre only from coding bug)
policy = POLICIES.default
df, df_data = self._create_test_file(b'x' * policy.ec_segment_size,
timestamp=self.ts())
df.open()
for invalid_type_chunk in (None, [], [[]], 1):
reader = df.reader()
reader._check_frag(invalid_type_chunk)
# None and [] are just skipped and [[]] and 1 are detected as invalid
# chunks
log_lines = df.manager.logger.get_lines_for_level('warning')
self.assertEqual(2, len(log_lines))
for log_line in log_lines:
self.assertIn(
'Unexpected fragment data type (not quarantined)', log_line)
df.open() # not quarantined
def test_ondisk_data_info_has_durable_key(self):
# non-durable; use frag_prefs=[] to allow it to be opened
df = self._simple_get_diskfile(obj='o1', frag_prefs=[])
self._create_ondisk_file(df, b'', ext='.data', timestamp=10,
metadata={'name': '/a/c/o1'}, commit=False)
with df.open():
self.assertIn('durable', df._ondisk_info['data_info'])
self.assertFalse(df._ondisk_info['data_info']['durable'])
# durable
df = self._simple_get_diskfile(obj='o2')
self._create_ondisk_file(df, b'', ext='.data', timestamp=10,
metadata={'name': '/a/c/o2'})
with df.open():
self.assertIn('durable', df._ondisk_info['data_info'])
self.assertTrue(df._ondisk_info['data_info']['durable'])
# legacy durable
df = self._simple_get_diskfile(obj='o3')
self._create_ondisk_file(df, b'', ext='.data', timestamp=10,
metadata={'name': '/a/c/o3'},
legacy_durable=True)
with df.open():
data_info = df._ondisk_info['data_info']
# sanity check it is legacy with no #d part in filename
self.assertEqual(data_info['filename'], '0000000010.00000#2.data')
self.assertIn('durable', data_info)
self.assertTrue(data_info['durable'])
@patch_policies(with_ec_default=True)
class TestSuffixHashes(unittest.TestCase):
"""
This tests all things related to hashing suffixes and therefore
there's also few test methods for cleanup_ondisk_files as well
(because it's used by hash_suffix).
The public interface to suffix hashing is on the Manager::
* cleanup_ondisk_files(hsh_path)
* get_hashes(device, partition, suffixes, policy)
* invalidate_hash(suffix_dir)
The Manager.get_hashes method (used by the REPLICATE verb)
calls Manager._get_hashes (which may be an alias to the module
method get_hashes), which calls hash_suffix, which calls
cleanup_ondisk_files.
Outside of that, cleanup_ondisk_files and invalidate_hash are
used mostly after writing new files via PUT or DELETE.
Test methods are organized by::
* cleanup_ondisk_files tests - behaviors
* cleanup_ondisk_files tests - error handling
* invalidate_hash tests - behavior
* invalidate_hash tests - error handling
* get_hashes tests - hash_suffix behaviors
* get_hashes tests - hash_suffix error handling
* get_hashes tests - behaviors
* get_hashes tests - error handling
"""
def setUp(self):
skip_if_no_xattrs()
self.testdir = tempfile.mkdtemp()
self.logger = debug_logger('suffix-hash-test')
self.devices = os.path.join(self.testdir, 'node')
os.mkdir(self.devices)
self.existing_device = 'sda1'
os.mkdir(os.path.join(self.devices, self.existing_device))
self.conf = {
'swift_dir': self.testdir,
'devices': self.devices,
'mount_check': False,
}
self.df_router = diskfile.DiskFileRouter(self.conf, self.logger)
self._ts_iter = (Timestamp(t) for t in
itertools.count(int(time())))
self.policy = None
def ts(self):
"""
Timestamps - forever.
"""
return next(self._ts_iter)
def fname_to_ts_hash(self, fname):
"""
EC datafiles are only hashed by their timestamp
"""
return md5(fname.split('#', 1)[0]).hexdigest()
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def iter_policies(self):
for policy in POLICIES:
self.policy = policy
yield policy
@contextmanager
def policy_in_message(self):
try:
yield
except AssertionError as err:
if not self.policy:
raise
policy_trailer = '\n\n... for policy %r' % self.policy
raise AssertionError(str(err) + policy_trailer)
def assertEqual(self, *args):
with self.policy_in_message():
unittest.TestCase.assertEqual(self, *args)
def get_different_suffix_df(self, df, **kwargs):
# returns diskfile in the same partition with different suffix
suffix_dir = os.path.dirname(df._datadir)
for i in itertools.count():
df2 = df._manager.get_diskfile(
os.path.basename(df._device_path),
df._datadir.split('/')[-3],
df._account,
df._container,
'o%d' % i,
policy=df.policy,
**kwargs)
suffix_dir2 = os.path.dirname(df2._datadir)
if suffix_dir != suffix_dir2:
return df2
def test_valid_suffix(self):
self.assertTrue(diskfile.valid_suffix(u'000'))
self.assertTrue(diskfile.valid_suffix('000'))
self.assertTrue(diskfile.valid_suffix('123'))
self.assertTrue(diskfile.valid_suffix('fff'))
self.assertFalse(diskfile.valid_suffix(list('123')))
self.assertFalse(diskfile.valid_suffix(123))
self.assertFalse(diskfile.valid_suffix(' 12'))
self.assertFalse(diskfile.valid_suffix('-00'))
self.assertFalse(diskfile.valid_suffix(u'-00'))
self.assertFalse(diskfile.valid_suffix('1234'))
def check_cleanup_ondisk_files(self, policy, input_files, output_files):
orig_unlink = os.unlink
file_list = list(input_files)
rmdirs = []
def mock_listdir(path):
return list(file_list)
def mock_unlink(path):
# timestamp 1 is a special tag to pretend a file disappeared
# between the listdir and unlink.
if '/0000000001.00000.' in path:
# Using actual os.unlink for a non-existent name to reproduce
# exactly what OSError it raises in order to prove that
# common.utils.remove_file is squelching the error - but any
# OSError would do.
orig_unlink(uuid.uuid4().hex)
file_list.remove(os.path.basename(path))
df_mgr = self.df_router[policy]
with unit_mock({'os.listdir': mock_listdir, 'os.unlink': mock_unlink,
'os.rmdir': rmdirs.append}):
if isinstance(output_files, Exception):
path = os.path.join(self.testdir, 'does-not-matter')
self.assertRaises(output_files.__class__,
df_mgr.cleanup_ondisk_files, path)
return
files = df_mgr.cleanup_ondisk_files('/whatever')['files']
self.assertEqual(files, output_files)
if files:
self.assertEqual(rmdirs, [])
else:
self.assertEqual(rmdirs, ['/whatever'])
# cleanup_ondisk_files tests - behaviors
def test_cleanup_ondisk_files_purge_data_newer_ts(self):
for policy in self.iter_policies():
# purge .data if there's a newer .ts
file1 = _make_datafilename(self.ts(), policy)
file2 = self.ts().internal + '.ts'
file_list = [file1, file2]
self.check_cleanup_ondisk_files(policy, file_list, [file2])
def test_cleanup_ondisk_files_purge_expired_ts(self):
for policy in self.iter_policies():
# purge older .ts files if there's a newer .data
file1 = self.ts().internal + '.ts'
file2 = self.ts().internal + '.ts'
timestamp = self.ts()
file3 = _make_datafilename(timestamp, policy, durable=False)
file_list = [file1, file2, file3]
expected = {
# no durable datafile means you can't get rid of the
# latest tombstone even if datafile is newer
EC_POLICY: [file3, file2],
REPL_POLICY: [file3],
}[policy.policy_type]
self.check_cleanup_ondisk_files(policy, file_list, expected)
def _do_test_cleanup_ondisk_files_purge_ts_newer_data(
self, policy, legacy_durable=False):
# purge .ts if there's a newer .data
file1 = self.ts().internal + '.ts'
timestamp = self.ts()
file2 = _make_datafilename(
timestamp, policy, durable=not legacy_durable)
file_list = [file1, file2]
expected = [file2]
if policy.policy_type == EC_POLICY and legacy_durable:
durable_file = timestamp.internal + '.durable'
file_list.append(durable_file)
expected.insert(0, durable_file)
self.check_cleanup_ondisk_files(policy, file_list, expected)
def test_cleanup_ondisk_files_purge_ts_newer_data(self):
for policy in self.iter_policies():
self._do_test_cleanup_ondisk_files_purge_ts_newer_data(policy)
def test_cleanup_ondisk_files_purge_ts_newer_data_and_legacy_durable(self):
for policy in self.iter_policies():
if policy.policy_type == EC_POLICY:
self._do_test_cleanup_ondisk_files_purge_ts_newer_data(
policy, legacy_durable=True)
def test_cleanup_ondisk_files_purge_older_ts(self):
for policy in self.iter_policies():
file1 = self.ts().internal + '.ts'
file2 = self.ts().internal + '.ts'
file3 = _make_datafilename(self.ts(), policy, durable=False)
file4 = self.ts().internal + '.meta'
expected = {
# no durable means we can only throw out things before
# the latest tombstone
EC_POLICY: [file4, file3, file2],
# keep .meta and .data and purge all .ts files
REPL_POLICY: [file4, file3],
}[policy.policy_type]
file_list = [file1, file2, file3, file4]
self.check_cleanup_ondisk_files(policy, file_list, expected)
def _do_test_cleanup_ondisk_files_keep_meta_data_purge_ts(
self, policy, legacy_durable=False):
file1 = self.ts().internal + '.ts'
file2 = self.ts().internal + '.ts'
timestamp = self.ts()
file3 = _make_datafilename(
timestamp, policy, durable=not legacy_durable)
file_list = [file1, file2, file3]
expected = [file3]
if policy.policy_type == EC_POLICY and legacy_durable:
durable_filename = timestamp.internal + '.durable'
file_list.append(durable_filename)
expected.insert(0, durable_filename)
file4 = self.ts().internal + '.meta'
file_list.append(file4)
expected.insert(0, file4)
# keep .meta and .data if meta newer than data and purge .ts
self.check_cleanup_ondisk_files(policy, file_list, expected)
def test_cleanup_ondisk_files_keep_meta_data_purge_ts(self):
for policy in self.iter_policies():
self._do_test_cleanup_ondisk_files_keep_meta_data_purge_ts(policy)
def test_cleanup_ondisk_files_keep_meta_data_purge_ts_legacy_durable(self):
for policy in self.iter_policies():
if policy.policy_type == EC_POLICY:
self._do_test_cleanup_ondisk_files_keep_meta_data_purge_ts(
policy, legacy_durable=True)
def test_cleanup_ondisk_files_keep_one_ts(self):
for policy in self.iter_policies():
file1, file2, file3 = [self.ts().internal + '.ts'
for i in range(3)]
file_list = [file1, file2, file3]
# keep only latest of multiple .ts files
self.check_cleanup_ondisk_files(policy, file_list, [file3])
def test_cleanup_ondisk_files_multi_data_file(self):
for policy in self.iter_policies():
file1 = _make_datafilename(self.ts(), policy, 1, durable=False)
file2 = _make_datafilename(self.ts(), policy, 2, durable=False)
file3 = _make_datafilename(self.ts(), policy, 3, durable=False)
expected = {
# keep all non-durable datafiles
EC_POLICY: [file3, file2, file1],
# keep only latest of multiple .data files
REPL_POLICY: [file3]
}[policy.policy_type]
file_list = [file1, file2, file3]
self.check_cleanup_ondisk_files(policy, file_list, expected)
def _do_test_cleanup_ondisk_files_keeps_one_datafile(self, policy,
legacy_durable=False):
timestamps = [self.ts() for i in range(3)]
file1 = _make_datafilename(timestamps[0], policy, 1,
durable=not legacy_durable)
file2 = _make_datafilename(timestamps[1], policy, 2,
durable=not legacy_durable)
file3 = _make_datafilename(timestamps[2], policy, 3,
durable=not legacy_durable)
file_list = [file1, file2, file3]
expected = [file3]
if policy.policy_type == EC_POLICY and legacy_durable:
for t in timestamps:
file_list.append(t.internal + '.durable')
expected.insert(0, file_list[-1])
self.check_cleanup_ondisk_files(policy, file_list, expected)
def test_cleanup_ondisk_files_keeps_one_datafile(self):
for policy in self.iter_policies():
self._do_test_cleanup_ondisk_files_keeps_one_datafile(policy)
def test_cleanup_ondisk_files_keeps_one_datafile_and_legacy_durable(self):
for policy in self.iter_policies():
if policy.policy_type == EC_POLICY:
self._do_test_cleanup_ondisk_files_keeps_one_datafile(
policy, legacy_durable=True)
def _do_test_cleanup_ondisk_files_keep_one_meta(self, policy,
legacy_durable=False):
# keep only latest of multiple .meta files
t_data = self.ts()
file1 = _make_datafilename(t_data, policy, durable=not legacy_durable)
file2, file3 = [self.ts().internal + '.meta' for i in range(2)]
file_list = [file1, file2, file3]
expected = [file3, file1]
if policy.policy_type == EC_POLICY and legacy_durable:
durable_file = t_data.internal + '.durable'
file_list.append(durable_file)
expected.insert(1, durable_file)
self.check_cleanup_ondisk_files(policy, file_list, expected)
def test_cleanup_ondisk_files_keep_one_meta(self):
for policy in self.iter_policies():
self._do_test_cleanup_ondisk_files_keep_one_meta(policy)
def test_cleanup_ondisk_files_keep_one_meta_legacy_durable(self):
for policy in self.iter_policies():
if policy.policy_type == EC_POLICY:
self._do_test_cleanup_ondisk_files_keep_one_meta(
policy, legacy_durable=True)
def test_cleanup_ondisk_files_only_meta(self):
for policy in self.iter_policies():
file1, file2 = [self.ts().internal + '.meta' for i in range(2)]
file_list = [file1, file2]
self.check_cleanup_ondisk_files(policy, file_list, [file2])
def test_cleanup_ondisk_files_ignore_orphaned_ts(self):
for policy in self.iter_policies():
# A more recent orphaned .meta file will prevent old .ts files
# from being cleaned up otherwise
file1, file2 = [self.ts().internal + '.ts' for i in range(2)]
file3 = self.ts().internal + '.meta'
file_list = [file1, file2, file3]
self.check_cleanup_ondisk_files(policy, file_list, [file3, file2])
def test_cleanup_ondisk_files_purge_old_data_only(self):
for policy in self.iter_policies():
# Oldest .data will be purge, .meta and .ts won't be touched
file1 = _make_datafilename(self.ts(), policy)
file2 = self.ts().internal + '.ts'
file3 = self.ts().internal + '.meta'
file_list = [file1, file2, file3]
self.check_cleanup_ondisk_files(policy, file_list, [file3, file2])
def test_cleanup_ondisk_files_purge_old_ts(self):
for policy in self.iter_policies():
# A single old .ts file will be removed
old_float = time() - (diskfile.DEFAULT_RECLAIM_AGE + 1)
file1 = Timestamp(old_float).internal + '.ts'
file_list = [file1]
self.check_cleanup_ondisk_files(policy, file_list, [])
def test_cleanup_ondisk_files_keep_isolated_meta_purge_old_ts(self):
for policy in self.iter_policies():
# A single old .ts file will be removed despite presence of a .meta
old_float = time() - (diskfile.DEFAULT_RECLAIM_AGE + 1)
file1 = Timestamp(old_float).internal + '.ts'
file2 = Timestamp(time() + 2).internal + '.meta'
file_list = [file1, file2]
self.check_cleanup_ondisk_files(policy, file_list, [file2])
def test_cleanup_ondisk_files_keep_single_old_data(self):
for policy in self.iter_policies():
old_float = time() - (diskfile.DEFAULT_RECLAIM_AGE + 1)
file1 = _make_datafilename(
Timestamp(old_float), policy, durable=True)
file_list = [file1]
self.check_cleanup_ondisk_files(policy, file_list, file_list)
def test_cleanup_ondisk_drops_old_non_durable_data(self):
for policy in self.iter_policies():
if policy.policy_type == EC_POLICY:
old_float = time() - (diskfile.DEFAULT_RECLAIM_AGE + 1)
file1 = _make_datafilename(
Timestamp(old_float), policy, durable=False)
file_list = [file1]
# for EC an isolated old non-durable .data file is removed
expected = []
self.check_cleanup_ondisk_files(policy, file_list, expected)
def test_cleanup_ondisk_files_drops_isolated_durable(self):
# check behaviour for legacy durable files
for policy in self.iter_policies():
if policy.policy_type == EC_POLICY:
file1 = Timestamp.now().internal + '.durable'
file_list = [file1]
self.check_cleanup_ondisk_files(policy, file_list, [])
def test_cleanup_ondisk_files_purges_single_old_meta(self):
for policy in self.iter_policies():
# A single old .meta file will be removed
old_float = time() - (diskfile.DEFAULT_RECLAIM_AGE + 1)
file1 = Timestamp(old_float).internal + '.meta'
file_list = [file1]
self.check_cleanup_ondisk_files(policy, file_list, [])
# cleanup_ondisk_files tests - error handling
def test_cleanup_ondisk_files_hsh_path_enoent(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
# common.utils.listdir *completely* mutes ENOENT
path = os.path.join(self.testdir, 'does-not-exist')
self.assertEqual(df_mgr.cleanup_ondisk_files(path)['files'], [])
def test_cleanup_ondisk_files_hsh_path_other_oserror(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
with mock.patch('os.listdir') as mock_listdir:
mock_listdir.side_effect = OSError('kaboom!')
# but it will raise other OSErrors
path = os.path.join(self.testdir, 'does-not-matter')
self.assertRaises(OSError, df_mgr.cleanup_ondisk_files,
path)
def test_cleanup_ondisk_files_reclaim_tombstone_remove_file_error(self):
for policy in self.iter_policies():
# Timestamp 1 makes the check routine pretend the file
# disappeared after listdir before unlink.
file1 = '0000000001.00000.ts'
file_list = [file1]
self.check_cleanup_ondisk_files(policy, file_list, [])
def test_cleanup_ondisk_files_older_remove_file_error(self):
for policy in self.iter_policies():
# Timestamp 1 makes the check routine pretend the file
# disappeared after listdir before unlink.
file1 = _make_datafilename(Timestamp(1), policy)
file2 = '0000000002.00000.ts'
file_list = [file1, file2]
self.check_cleanup_ondisk_files(policy, file_list, [])
# invalidate_hash tests - behavior
def test_invalidate_hash_file_does_not_exist(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy)
suffix_dir = os.path.dirname(df._datadir)
suffix = os.path.basename(suffix_dir)
part_path = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(policy), '0')
hashes_file = os.path.join(part_path, diskfile.HASH_FILE)
inv_file = os.path.join(
part_path, diskfile.HASH_INVALIDATIONS_FILE)
# sanity, new partition has no suffix hashing artifacts
self.assertFalse(os.path.exists(hashes_file))
self.assertFalse(os.path.exists(inv_file))
# invalidating a hash does not create the hashes_file
with mock.patch(
'swift.obj.diskfile.BaseDiskFileManager.invalidate_hash',
side_effect=diskfile.invalidate_hash) \
as mock_invalidate_hash:
df.delete(self.ts())
self.assertFalse(os.path.exists(hashes_file))
# ... but does invalidate the suffix
self.assertEqual([mock.call(suffix_dir)],
mock_invalidate_hash.call_args_list)
with open(inv_file) as f:
self.assertEqual(suffix, f.read().strip('\n'))
# ... and hashing suffixes finds (and hashes) the new suffix
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertIn(suffix, hashes)
self.assertTrue(os.path.exists(hashes_file))
self.assertIn(os.path.basename(suffix_dir), hashes)
with open(hashes_file, 'rb') as f:
found_hashes = pickle.load(f)
found_hashes.pop('updated')
self.assertTrue(found_hashes.pop('valid'))
self.assertEqual(hashes, found_hashes)
# ... and truncates the invalidations file
with open(inv_file) as f:
self.assertEqual('', f.read().strip('\n'))
def test_invalidate_hash_empty_file_exists(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
part_path = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(policy), '0')
mkdirs(part_path)
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
pkl_path = os.path.join(part_path, diskfile.HASH_FILE)
self.assertTrue(os.path.exists(pkl_path))
self.assertEqual(hashes, {})
# create something to hash
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy)
df.delete(self.ts())
suffix_dir = os.path.dirname(df._datadir)
suffix = os.path.basename(suffix_dir)
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertIn(suffix, hashes) # sanity
def test_invalidate_hash_file_not_truncated_when_empty(self):
orig_open = open
def watch_open(*args, **kargs):
name = os.path.basename(args[0])
open_log[name].append(args[1])
return orig_open(*args, **kargs)
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
part_path = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(policy), '0')
mkdirs(part_path)
inv_file = os.path.join(
part_path, diskfile.HASH_INVALIDATIONS_FILE)
hash_file = os.path.join(
part_path, diskfile.HASH_FILE)
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertEqual(hashes, {})
self.assertTrue(os.path.exists(hash_file))
# create something to hash
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy)
df.delete(self.ts())
self.assertTrue(os.path.exists(inv_file))
# invalidation file created, lets consolidate it
df_mgr.get_hashes('sda1', '0', [], policy)
open_log = defaultdict(list)
open_loc = '__builtin__.open' if six.PY2 else 'builtins.open'
with mock.patch(open_loc, watch_open):
self.assertTrue(os.path.exists(inv_file))
# no new suffixes get invalidated... so no write iop
df_mgr.get_hashes('sda1', '0', [], policy)
# each file is opened once to read
expected = {
'hashes.pkl': ['rb'],
'hashes.invalid': ['r'],
}
self.assertEqual(open_log, expected)
def _test_invalidate_hash_racing_get_hashes_diff_suffix(self, existing):
# a suffix can be changed or created by second process while new pkl is
# being calculated - verify that suffix is correct after next
# get_hashes call
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
part_path = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(policy), '0')
if existing:
mkdirs(part_path)
# force hashes.pkl to exist
df_mgr.get_hashes('sda1', '0', [], policy)
self.assertTrue(os.path.exists(os.path.join(
part_path, diskfile.HASH_FILE)))
orig_listdir = os.listdir
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy)
suffix = os.path.basename(os.path.dirname(df._datadir))
df2 = self.get_different_suffix_df(df)
suffix2 = os.path.basename(os.path.dirname(df2._datadir))
non_local = {'df2touched': False}
df.delete(self.ts())
def mock_listdir(*args, **kwargs):
# simulating an invalidation occurring in another process while
# get_hashes is executing
result = orig_listdir(*args, **kwargs)
if not non_local['df2touched']:
non_local['df2touched'] = True
# other process creates new suffix
df2.delete(self.ts())
return result
if not existing:
self.assertFalse(os.path.exists(os.path.join(
part_path, diskfile.HASH_FILE)))
with mock.patch('swift.obj.diskfile.os.listdir',
mock_listdir):
# creates pkl file if not already there
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertTrue(os.path.exists(os.path.join(
part_path, diskfile.HASH_FILE)))
# second suffix added after directory listing, it's added later
self.assertIn(suffix, hashes)
self.assertNotIn(suffix2, hashes)
# updates pkl file
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertIn(suffix, hashes)
self.assertIn(suffix2, hashes)
def test_invalidate_hash_racing_get_hashes_diff_suffix_new_part(self):
self._test_invalidate_hash_racing_get_hashes_diff_suffix(False)
def test_invalidate_hash_racing_get_hashes_diff_suffix_existing_part(self):
self._test_invalidate_hash_racing_get_hashes_diff_suffix(True)
def _check_hash_invalidations_race_get_hashes_same_suffix(self, existing):
# verify that when two processes concurrently call get_hashes, then any
# concurrent hash invalidation will survive and be consolidated on a
# subsequent call to get_hashes (i.e. ensure first get_hashes process
# does not ignore the concurrent hash invalidation that second
# get_hashes might have consolidated to hashes.pkl)
non_local = {}
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
orig_hash_suffix = df_mgr._hash_suffix
if existing:
# create hashes.pkl
part_path = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(policy), '0')
mkdirs(part_path)
df_mgr.get_hashes('sda1', '0', [], policy)
self.assertTrue(os.path.exists(os.path.join(
part_path, diskfile.HASH_FILE)))
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy)
suffix_dir = os.path.dirname(df._datadir)
suffix = os.path.basename(suffix_dir)
part_dir = os.path.dirname(suffix_dir)
invalidations_file = os.path.join(
part_dir, diskfile.HASH_INVALIDATIONS_FILE)
non_local['hash'] = None
non_local['called'] = False
# delete will append suffix to hashes.invalid
df.delete(self.ts())
with open(invalidations_file) as f:
self.assertEqual(suffix, f.read().strip('\n')) # sanity
hash1 = df_mgr._hash_suffix(suffix_dir)
def mock_hash_suffix(*args, **kwargs):
# after first get_hashes has called _hash_suffix, simulate a
# second process invalidating the same suffix, followed by a
# third process calling get_hashes and failing (or yielding)
# after consolidate_hashes has completed
result = orig_hash_suffix(*args, **kwargs)
if not non_local['called']:
non_local['called'] = True
# appends suffix to hashes.invalid
df.delete(self.ts())
# simulate another process calling get_hashes but failing
# after hash invalidation have been consolidated
hashes = df_mgr.consolidate_hashes(part_dir)
if existing:
self.assertTrue(hashes['valid'])
else:
self.assertFalse(hashes['valid'])
# get the updated suffix hash...
non_local['hash'] = orig_hash_suffix(suffix_dir)
return result
with mock.patch.object(df_mgr, '_hash_suffix', mock_hash_suffix):
# repeats listing when pkl modified
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
# first get_hashes should complete with suffix1 state
self.assertIn(suffix, hashes)
# sanity check - the suffix hash has changed...
self.assertNotEqual(hash1, non_local['hash'])
# the invalidation file has been truncated...
with open(invalidations_file, 'r') as f:
self.assertEqual('', f.read())
# so hashes should have the latest suffix hash...
self.assertEqual(hashes[suffix], non_local['hash'])
non_local['called'] = False
with mock.patch.object(df_mgr, '_hash_suffix', mock_hash_suffix):
df_mgr.get_hashes('sda1', '0', [suffix], policy,
skip_rehash=True)
self.assertFalse(non_local['called'])
with open(invalidations_file) as f:
self.assertEqual(suffix, f.read().strip('\n')) # sanity
def test_hash_invalidations_race_get_hashes_same_suffix_new(self):
self._check_hash_invalidations_race_get_hashes_same_suffix(False)
def test_hash_invalidations_race_get_hashes_same_suffix_existing(self):
self._check_hash_invalidations_race_get_hashes_same_suffix(True)
def _check_unpickle_error_and_get_hashes_failure(self, existing):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy)
suffix = os.path.basename(os.path.dirname(df._datadir))
# avoid getting O_TMPFILE warning in logs
if not utils.o_tmpfile_in_tmpdir_supported():
df.manager.use_linkat = False
if existing:
df.delete(self.ts())
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
df.delete(self.ts())
part_path = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(policy), '0')
hashes_file = os.path.join(part_path, diskfile.HASH_FILE)
# write a corrupt hashes.pkl
open(hashes_file, 'w')
# simulate first call to get_hashes failing after attempting to
# consolidate hashes
with mock.patch('swift.obj.diskfile.os.listdir',
side_effect=Exception()):
self.assertRaises(
Exception, df_mgr.get_hashes, 'sda1', '0', [], policy)
# sanity on-disk state is invalid
with open(hashes_file, 'rb') as f:
found_hashes = pickle.load(f)
found_hashes.pop('updated')
self.assertEqual(False, found_hashes.pop('valid'))
# verify subsequent call to get_hashes reaches correct outcome
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertIn(suffix, hashes)
self.assertEqual([], df_mgr.logger.get_lines_for_level('warning'))
def test_unpickle_error_and_get_hashes_failure_new_part(self):
self._check_unpickle_error_and_get_hashes_failure(False)
def test_unpickle_error_and_get_hashes_failure_existing_part(self):
self._check_unpickle_error_and_get_hashes_failure(True)
def test_invalidate_hash_consolidation(self):
def assert_consolidation(suffixes):
# verify that suffixes are invalidated after consolidation
with mock.patch('swift.obj.diskfile.lock_path') as mock_lock:
hashes = df_mgr.consolidate_hashes(part_path)
self.assertTrue(mock_lock.called)
for suffix in suffixes:
self.assertIn(suffix, hashes)
self.assertIsNone(hashes[suffix])
with open(hashes_file, 'rb') as f:
found_hashes = pickle.load(f)
self.assertTrue(hashes['valid'])
self.assertEqual(hashes, found_hashes)
with open(invalidations_file, 'r') as f:
self.assertEqual("", f.read())
return hashes
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
# create something to hash
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy)
df.delete(self.ts())
suffix_dir = os.path.dirname(df._datadir)
suffix = os.path.basename(suffix_dir)
original_hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertIn(suffix, original_hashes) # sanity
self.assertIsNotNone(original_hashes[suffix])
# sanity check hashes file
part_path = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(policy), '0')
hashes_file = os.path.join(part_path, diskfile.HASH_FILE)
invalidations_file = os.path.join(
part_path, diskfile.HASH_INVALIDATIONS_FILE)
with open(hashes_file, 'rb') as f:
found_hashes = pickle.load(f)
found_hashes.pop('updated')
self.assertTrue(found_hashes.pop('valid'))
self.assertEqual(original_hashes, found_hashes)
# invalidate the hash
with mock.patch('swift.obj.diskfile.lock_path') as mock_lock:
df_mgr.invalidate_hash(suffix_dir)
self.assertTrue(mock_lock.called)
# suffix should be in invalidations file
with open(invalidations_file, 'r') as f:
self.assertEqual(suffix + "\n", f.read())
# hashes file is unchanged
with open(hashes_file, 'rb') as f:
found_hashes = pickle.load(f)
found_hashes.pop('updated')
self.assertTrue(found_hashes.pop('valid'))
self.assertEqual(original_hashes, found_hashes)
# consolidate the hash and the invalidations
hashes = assert_consolidation([suffix])
# invalidate a different suffix hash in same partition but not in
# existing hashes.pkl
df2 = self.get_different_suffix_df(df)
df2.delete(self.ts())
suffix_dir2 = os.path.dirname(df2._datadir)
suffix2 = os.path.basename(suffix_dir2)
# suffix2 should be in invalidations file
with open(invalidations_file, 'r') as f:
self.assertEqual(suffix2 + "\n", f.read())
# hashes file is not yet changed
with open(hashes_file, 'rb') as f:
found_hashes = pickle.load(f)
self.assertTrue(hashes['valid'])
self.assertEqual(hashes, found_hashes)
# consolidate hashes
hashes = assert_consolidation([suffix, suffix2])
# invalidating suffix2 multiple times is ok
df2.delete(self.ts())
df2.delete(self.ts())
# suffix2 should be in invalidations file
with open(invalidations_file, 'r') as f:
invalids = f.read().splitlines()
self.assertEqual(sorted((suffix2, suffix2)),
sorted(invalids)) # sanity
# hashes file is not yet changed
with open(hashes_file, 'rb') as f:
found_hashes = pickle.load(f)
self.assertTrue(hashes['valid'])
self.assertEqual(hashes, found_hashes)
# consolidate hashes
assert_consolidation([suffix, suffix2])
def test_get_hashes_consolidates_suffix_rehash_once(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy)
df.delete(self.ts())
suffix_dir = os.path.dirname(df._datadir)
with mock.patch.object(df_mgr, 'consolidate_hashes',
side_effect=df_mgr.consolidate_hashes
) as mock_consolidate_hashes, \
mock.patch.object(df_mgr, '_hash_suffix',
side_effect=df_mgr._hash_suffix
) as mock_hash_suffix:
# creates pkl file
df_mgr.get_hashes('sda1', '0', [], policy)
mock_consolidate_hashes.assert_called_once()
self.assertEqual([mock.call(suffix_dir, policy=policy)],
mock_hash_suffix.call_args_list)
# second object in path
df2 = self.get_different_suffix_df(df)
df2.delete(self.ts())
suffix_dir2 = os.path.dirname(df2._datadir)
mock_consolidate_hashes.reset_mock()
mock_hash_suffix.reset_mock()
# updates pkl file
df_mgr.get_hashes('sda1', '0', [], policy)
mock_consolidate_hashes.assert_called_once()
self.assertEqual([mock.call(suffix_dir2, policy=policy)],
mock_hash_suffix.call_args_list)
def test_consolidate_hashes_raises_exception(self):
# verify that if consolidate_hashes raises an exception then suffixes
# are rehashed and a hashes.pkl is written
for policy in self.iter_policies():
part_path = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(policy), '0')
hashes_file = os.path.join(part_path, diskfile.HASH_FILE)
invalidations_file = os.path.join(
part_path, diskfile.HASH_INVALIDATIONS_FILE)
self.logger.clear()
df_mgr = self.df_router[policy]
# create something to hash
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy)
# avoid getting O_TMPFILE warning in logs
if not utils.o_tmpfile_in_tmpdir_supported():
df.manager.use_linkat = False
self.assertFalse(os.path.exists(part_path))
df.delete(self.ts())
self.assertTrue(os.path.exists(invalidations_file))
suffix_dir = os.path.dirname(df._datadir)
suffix = os.path.basename(suffix_dir)
# no pre-existing hashes.pkl
self.assertFalse(os.path.exists(hashes_file))
with mock.patch.object(df_mgr, '_hash_suffix',
return_value='fake hash'):
with mock.patch.object(df_mgr, 'consolidate_hashes',
side_effect=Exception()):
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertEqual({suffix: 'fake hash'}, hashes)
# sanity check hashes file
with open(hashes_file, 'rb') as f:
found_hashes = pickle.load(f)
found_hashes.pop('updated')
self.assertTrue(found_hashes.pop('valid'))
self.assertEqual(hashes, found_hashes)
# sanity check log warning
warnings = self.logger.get_lines_for_level('warning')
self.assertEqual(warnings, ["Unable to read %r" % hashes_file])
# repeat with pre-existing hashes.pkl
self.logger.clear()
with mock.patch.object(df_mgr, '_hash_suffix',
return_value='new fake hash'):
with mock.patch.object(df_mgr, 'consolidate_hashes',
side_effect=Exception()):
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertEqual({suffix: 'new fake hash'}, hashes)
# sanity check hashes file
with open(hashes_file, 'rb') as f:
found_hashes = pickle.load(f)
found_hashes.pop('updated')
self.assertTrue(found_hashes.pop('valid'))
self.assertEqual(hashes, found_hashes)
# sanity check log warning
warnings = self.logger.get_lines_for_level('warning')
self.assertEqual(warnings, ["Unable to read %r" % hashes_file])
# invalidate_hash tests - error handling
def test_invalidate_hash_bad_pickle(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
# make some valid data
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy)
suffix_dir = os.path.dirname(df._datadir)
suffix = os.path.basename(suffix_dir)
df.delete(self.ts())
# sanity check hashes file
part_path = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(policy), '0')
hashes_file = os.path.join(part_path, diskfile.HASH_FILE)
self.assertFalse(os.path.exists(hashes_file))
# write some garbage in hashes file
with open(hashes_file, 'w') as f:
f.write('asdf')
# invalidate_hash silently *NOT* repair invalid data
df_mgr.invalidate_hash(suffix_dir)
with open(hashes_file) as f:
self.assertEqual(f.read(), 'asdf')
# ... but get_hashes will
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertIn(suffix, hashes)
# get_hashes tests - hash_suffix behaviors
def test_hash_suffix_one_tombstone(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(
'sda1', '0', 'a', 'c', 'o', policy=policy)
suffix = os.path.basename(os.path.dirname(df._datadir))
# write a tombstone
timestamp = self.ts()
df.delete(timestamp)
tombstone_hash = md5(timestamp.internal + '.ts').hexdigest()
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
expected = {
REPL_POLICY: {suffix: tombstone_hash},
EC_POLICY: {suffix: {
# fi is None here because we have a tombstone
None: tombstone_hash}},
}[policy.policy_type]
self.assertEqual(hashes, expected)
def test_hash_suffix_one_tombstone_and_one_meta(self):
# A tombstone plus a newer meta file can happen if a tombstone is
# replicated to a node with a newer meta file but older data file. The
# meta file will be ignored when the diskfile is opened so the
# effective state of the disk files is equivalent to only having the
# tombstone. Replication cannot remove the meta file, and the meta file
# cannot be ssync replicated to a node with only the tombstone, so
# we want the get_hashes result to be the same as if the meta file was
# not there.
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(
'sda1', '0', 'a', 'c', 'o', policy=policy)
suffix = os.path.basename(os.path.dirname(df._datadir))
# write a tombstone
timestamp = self.ts()
df.delete(timestamp)
# write a meta file
df.write_metadata({'X-Timestamp': self.ts().internal})
# sanity check
self.assertEqual(2, len(os.listdir(df._datadir)))
tombstone_hash = md5(timestamp.internal + '.ts').hexdigest()
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
expected = {
REPL_POLICY: {suffix: tombstone_hash},
EC_POLICY: {suffix: {
# fi is None here because we have a tombstone
None: tombstone_hash}},
}[policy.policy_type]
self.assertEqual(hashes, expected)
def test_hash_suffix_one_reclaim_tombstone_and_one_meta(self):
# An isolated meta file can happen if a tombstone is replicated to a
# node with a newer meta file but older data file, and the tombstone is
# subsequently reclaimed. The meta file will be ignored when the
# diskfile is opened so the effective state of the disk files is
# equivalent to having no files.
for policy in self.iter_policies():
if policy.policy_type == EC_POLICY:
continue
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(
'sda1', '0', 'a', 'c', 'o', policy=policy)
suffix = os.path.basename(os.path.dirname(df._datadir))
now = time()
# write a tombstone that's just a *little* older than reclaim time
df.delete(Timestamp(now - 1001))
# write a meta file that's not quite so old
ts_meta = Timestamp(now - 501)
df.write_metadata({'X-Timestamp': ts_meta.internal})
# sanity check
self.assertEqual(2, len(os.listdir(df._datadir)))
# scale back the df manager's reclaim age a bit to make the
# tombstone reclaimable
df_mgr.reclaim_age = 1000
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
# the tombstone is reclaimed, the meta file remains, the suffix
# hash is not updated BUT the suffix dir cannot be deleted so
# a suffix hash equal to hash of empty string is reported.
# TODO: this is not same result as if the meta file did not exist!
self.assertEqual([ts_meta.internal + '.meta'],
os.listdir(df._datadir))
self.assertEqual(hashes, {suffix: MD5_OF_EMPTY_STRING})
# scale back the df manager's reclaim age even more - call to
# get_hashes does not trigger reclaim because the suffix has
# MD5_OF_EMPTY_STRING in hashes.pkl
df_mgr.reclaim_age = 500
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertEqual([ts_meta.internal + '.meta'],
os.listdir(df._datadir))
self.assertEqual(hashes, {suffix: MD5_OF_EMPTY_STRING})
# call get_hashes with recalculate = [suffix] and the suffix dir
# gets re-hashed so the .meta if finally reclaimed.
hashes = df_mgr.get_hashes('sda1', '0', [suffix], policy)
self.assertFalse(os.path.exists(os.path.dirname(df._datadir)))
self.assertEqual(hashes, {})
def test_hash_suffix_one_reclaim_tombstone(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(
'sda1', '0', 'a', 'c', 'o', policy=policy)
# scale back this tests manager's reclaim age a bit
df_mgr.reclaim_age = 1000
# write a tombstone that's just a *little* older
old_time = time() - 1001
timestamp = Timestamp(old_time)
df.delete(timestamp.internal)
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertEqual(hashes, {})
def test_hash_suffix_ts_cleanup_after_recalc(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(
'sda1', '0', 'a', 'c', 'o', policy=policy)
suffix_dir = os.path.dirname(df._datadir)
suffix = os.path.basename(suffix_dir)
# scale back reclaim age a bit
df_mgr.reclaim_age = 1000
# write a valid tombstone
old_time = time() - 500
timestamp = Timestamp(old_time)
df.delete(timestamp.internal)
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertIn(suffix, hashes)
self.assertIsNotNone(hashes[suffix])
# we have tombstone entry
tombstone = '%s.ts' % timestamp.internal
self.assertTrue(os.path.exists(df._datadir))
self.assertIn(tombstone, os.listdir(df._datadir))
# lower reclaim age to force tombstone reclaiming
df_mgr.reclaim_age = 200
# not cleaning up because suffix not invalidated
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertTrue(os.path.exists(df._datadir))
self.assertIn(tombstone, os.listdir(df._datadir))
self.assertIn(suffix, hashes)
self.assertIsNotNone(hashes[suffix])
# recalculating suffix hash cause cleanup
hashes = df_mgr.get_hashes('sda1', '0', [suffix], policy)
self.assertEqual(hashes, {})
self.assertFalse(os.path.exists(df._datadir))
def test_hash_suffix_ts_cleanup_after_invalidate_hash(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(
'sda1', '0', 'a', 'c', 'o', policy=policy)
suffix_dir = os.path.dirname(df._datadir)
suffix = os.path.basename(suffix_dir)
# scale back reclaim age a bit
df_mgr.reclaim_age = 1000
# write a valid tombstone
old_time = time() - 500
timestamp = Timestamp(old_time)
df.delete(timestamp.internal)
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertIn(suffix, hashes)
self.assertIsNotNone(hashes[suffix])
# we have tombstone entry
tombstone = '%s.ts' % timestamp.internal
self.assertTrue(os.path.exists(df._datadir))
self.assertIn(tombstone, os.listdir(df._datadir))
# lower reclaim age to force tombstone reclaiming
df_mgr.reclaim_age = 200
# not cleaning up because suffix not invalidated
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertTrue(os.path.exists(df._datadir))
self.assertIn(tombstone, os.listdir(df._datadir))
self.assertIn(suffix, hashes)
self.assertIsNotNone(hashes[suffix])
# However if we call invalidate_hash for the suffix dir,
# get_hashes can reclaim the tombstone
with mock.patch('swift.obj.diskfile.lock_path'):
df_mgr.invalidate_hash(suffix_dir)
# updating invalidated hashes cause cleanup
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertEqual(hashes, {})
self.assertFalse(os.path.exists(df._datadir))
def test_hash_suffix_one_reclaim_and_one_valid_tombstone(self):
paths, suffix = find_paths_with_matching_suffixes(2, 1)
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
a, c, o = paths[suffix][0]
df1 = df_mgr.get_diskfile(
'sda1', '0', a, c, o, policy=policy)
# scale back this tests manager's reclaim age a bit
df_mgr.reclaim_age = 1000
# write one tombstone that's just a *little* older
df1.delete(Timestamp(time() - 1001))
# create another tombstone in same suffix dir that's newer
a, c, o = paths[suffix][1]
df2 = df_mgr.get_diskfile(
'sda1', '0', a, c, o, policy=policy)
t_df2 = Timestamp(time() - 900)
df2.delete(t_df2)
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
suffix = os.path.basename(os.path.dirname(df1._datadir))
df2_tombstone_hash = md5(t_df2.internal + '.ts').hexdigest()
expected = {
REPL_POLICY: {suffix: df2_tombstone_hash},
EC_POLICY: {suffix: {
# fi is None here because we have a tombstone
None: df2_tombstone_hash}},
}[policy.policy_type]
self.assertEqual(hashes, expected)
def test_hash_suffix_one_datafile(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(
'sda1', '0', 'a', 'c', 'o', policy=policy, frag_index=7)
suffix = os.path.basename(os.path.dirname(df._datadir))
# write a datafile
timestamp = self.ts()
with df.create() as writer:
test_data = b'test file'
writer.write(test_data)
metadata = {
'X-Timestamp': timestamp.internal,
'ETag': md5(test_data).hexdigest(),
'Content-Length': len(test_data),
}
writer.put(metadata)
# note - no commit so data is non-durable
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
datafile_hash = md5({
EC_POLICY: timestamp.internal,
REPL_POLICY: timestamp.internal + '.data',
}[policy.policy_type]).hexdigest()
expected = {
REPL_POLICY: {suffix: datafile_hash},
EC_POLICY: {suffix: {
# because there's no durable state, we have no hash for
# the None key - only the frag index for the data file
7: datafile_hash}},
}[policy.policy_type]
msg = 'expected %r != %r for policy %r' % (
expected, hashes, policy)
self.assertEqual(hashes, expected, msg)
def test_hash_suffix_multi_file_ends_in_tombstone(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o', policy=policy,
frag_index=4)
suffix = os.path.basename(os.path.dirname(df._datadir))
mkdirs(df._datadir)
now = time()
# go behind the scenes and setup a bunch of weird file names
for tdiff in [500, 100, 10, 1]:
for suff in ['.meta', '.data', '.ts']:
timestamp = Timestamp(now - tdiff)
filename = timestamp.internal
if policy.policy_type == EC_POLICY and suff == '.data':
filename += '#%s' % df._frag_index
filename += suff
open(os.path.join(df._datadir, filename), 'w').close()
tombstone_hash = md5(filename).hexdigest()
# call get_hashes and it should clean things up
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
expected = {
REPL_POLICY: {suffix: tombstone_hash},
EC_POLICY: {suffix: {
# fi is None here because we have a tombstone
None: tombstone_hash}},
}[policy.policy_type]
self.assertEqual(hashes, expected)
# only the tombstone should be left
found_files = os.listdir(df._datadir)
self.assertEqual(found_files, [filename])
def _do_hash_suffix_multi_file_ends_in_datafile(self, policy,
legacy_durable):
# if legacy_durable is True then synthesize legacy durable files
# instead of having a durable marker in the data file name
frag_index = 4
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o', policy=policy,
frag_index=frag_index)
suffix = os.path.basename(os.path.dirname(df._datadir))
mkdirs(df._datadir)
now = time()
timestamp = None
# go behind the scenes and setup a bunch of weird file names
for tdiff in [500, 100, 10, 1]:
suffs = ['.meta', '.data']
if tdiff > 50:
suffs.append('.ts')
if policy.policy_type == EC_POLICY and legacy_durable:
suffs.append('.durable')
for suff in suffs:
timestamp = Timestamp(now - tdiff)
if suff == '.data':
filename = _make_datafilename(
timestamp, policy, frag_index,
durable=not legacy_durable)
else:
filename = timestamp.internal + suff
open(os.path.join(df._datadir, filename), 'w').close()
meta_timestamp = Timestamp(now)
metadata_filename = meta_timestamp.internal + '.meta'
open(os.path.join(df._datadir, metadata_filename), 'w').close()
# call get_hashes and it should clean up all but the most recent files
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
# calculate expected outcome
data_filename = _make_datafilename(
timestamp, policy, frag_index, durable=not legacy_durable)
expected_files = [data_filename, metadata_filename]
if policy.policy_type == EC_POLICY:
# note: expected hashes is same with or without legacy durable file
hasher = md5()
hasher.update(metadata_filename)
hasher.update(timestamp.internal + '.durable')
expected = {
suffix: {
# metadata & durable updates are hashed separately
None: hasher.hexdigest(),
4: self.fname_to_ts_hash(data_filename),
}
}
if legacy_durable:
expected_files.append(timestamp.internal + '.durable')
elif policy.policy_type == REPL_POLICY:
hasher = md5()
hasher.update(metadata_filename)
hasher.update(data_filename)
expected = {suffix: hasher.hexdigest()}
else:
self.fail('unknown policy type %r' % policy.policy_type)
self.assertEqual(hashes, expected)
# only the meta and data should be left
self.assertEqual(sorted(os.listdir(df._datadir)),
sorted(expected_files))
def test_hash_suffix_multifile_ends_in_datafile(self):
for policy in self.iter_policies():
self._do_hash_suffix_multi_file_ends_in_datafile(
policy, legacy_durable=False)
def test_hash_suffix_multifile_ends_in_datafile_legacy_durable(self):
for policy in self.iter_policies():
if policy.policy_type == EC_POLICY:
self._do_hash_suffix_multi_file_ends_in_datafile(
policy, legacy_durable=True)
def _verify_get_hashes(self, filenames, ts_data, ts_meta, ts_ctype,
policy):
"""
Helper method to create a set of ondisk files and verify suffix_hashes.
:param filenames: list of filenames to create in an object hash dir
:param ts_data: newest data timestamp, used for expected result
:param ts_meta: newest meta timestamp, used for expected result
:param ts_ctype: newest content-type timestamp, used for expected
result
:param policy: storage policy to use for test
"""
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy, frag_index=4)
suffix = os.path.basename(os.path.dirname(df._datadir))
partition_dir = os.path.dirname(os.path.dirname(df._datadir))
rmtree(partition_dir, ignore_errors=True) # clean dir for each test
mkdirs(df._datadir)
# calculate expected result
hasher = md5()
if policy.policy_type == EC_POLICY:
hasher.update(ts_meta.internal + '.meta')
hasher.update(ts_data.internal + '.durable')
if ts_ctype:
hasher.update(ts_ctype.internal + '_ctype')
expected = {
suffix: {
None: hasher.hexdigest(),
4: md5(ts_data.internal).hexdigest(),
}
}
elif policy.policy_type == REPL_POLICY:
hasher.update(ts_meta.internal + '.meta')
hasher.update(ts_data.internal + '.data')
if ts_ctype:
hasher.update(ts_ctype.internal + '_ctype')
expected = {suffix: hasher.hexdigest()}
else:
self.fail('unknown policy type %r' % policy.policy_type)
for fname in filenames:
open(os.path.join(df._datadir, fname), 'w').close()
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
msg = 'expected %r != %r for policy %r' % (
expected, hashes, policy)
self.assertEqual(hashes, expected, msg)
def test_hash_suffix_with_older_content_type_in_meta(self):
# single meta file having older content-type
def do_test(legacy_durable):
for policy in self.iter_policies():
ts_data, ts_ctype, ts_meta = (
self.ts(), self.ts(), self.ts())
filenames = [_make_datafilename(ts_data, policy, frag_index=4,
durable=not legacy_durable),
_make_metafilename(ts_meta, ts_ctype)]
if policy.policy_type == EC_POLICY and legacy_durable:
filenames.append(ts_data.internal + '.durable')
self._verify_get_hashes(
filenames, ts_data, ts_meta, ts_ctype, policy)
do_test(False)
do_test(True)
def test_hash_suffix_with_same_age_content_type_in_meta(self):
# single meta file having same age content-type
def do_test(legacy_durable):
for policy in self.iter_policies():
ts_data, ts_meta = (self.ts(), self.ts())
filenames = [_make_datafilename(ts_data, policy, frag_index=4,
durable=not legacy_durable),
_make_metafilename(ts_meta, ts_meta)]
if policy.policy_type == EC_POLICY and legacy_durable:
filenames.append(ts_data.internal + '.durable')
self._verify_get_hashes(
filenames, ts_data, ts_meta, ts_meta, policy)
do_test(False)
do_test(True)
def test_hash_suffix_with_obsolete_content_type_in_meta(self):
# After rsync replication we could have a single meta file having
# content-type older than a replicated data file
def do_test(legacy_durable):
for policy in self.iter_policies():
ts_ctype, ts_data, ts_meta = (self.ts(), self.ts(), self.ts())
filenames = [_make_datafilename(ts_data, policy, frag_index=4,
durable=not legacy_durable),
_make_metafilename(ts_meta, ts_ctype)]
if policy.policy_type == EC_POLICY and legacy_durable:
filenames.append(ts_data.internal + '.durable')
self._verify_get_hashes(
filenames, ts_data, ts_meta, None, policy)
do_test(False)
do_test(True)
def test_hash_suffix_with_older_content_type_in_newer_meta(self):
# After rsync replication we could have two meta files: newest
# content-type is in newer meta file, older than newer meta file
def do_test(legacy_durable):
for policy in self.iter_policies():
ts_data, ts_older_meta, ts_ctype, ts_newer_meta = (
self.ts() for _ in range(4))
filenames = [_make_datafilename(ts_data, policy, frag_index=4,
durable=not legacy_durable),
_make_metafilename(ts_older_meta),
_make_metafilename(ts_newer_meta, ts_ctype)]
if policy.policy_type == EC_POLICY and legacy_durable:
filenames.append(ts_data.internal + '.durable')
self._verify_get_hashes(
filenames, ts_data, ts_newer_meta, ts_ctype, policy)
do_test(False)
do_test(True)
def test_hash_suffix_with_same_age_content_type_in_newer_meta(self):
# After rsync replication we could have two meta files: newest
# content-type is in newer meta file, at same age as newer meta file
def do_test(legacy_durable):
for policy in self.iter_policies():
ts_data, ts_older_meta, ts_newer_meta = (
self.ts() for _ in range(3))
filenames = [_make_datafilename(ts_data, policy, frag_index=4,
durable=not legacy_durable),
_make_metafilename(ts_newer_meta, ts_newer_meta)]
if policy.policy_type == EC_POLICY and legacy_durable:
filenames.append(ts_data.internal + '.durable')
self._verify_get_hashes(
filenames, ts_data, ts_newer_meta, ts_newer_meta, policy)
do_test(False)
do_test(True)
def test_hash_suffix_with_older_content_type_in_older_meta(self):
# After rsync replication we could have two meta files: newest
# content-type is in older meta file, older than older meta file
def do_test(legacy_durable):
for policy in self.iter_policies():
ts_data, ts_ctype, ts_older_meta, ts_newer_meta = (
self.ts() for _ in range(4))
filenames = [_make_datafilename(ts_data, policy, frag_index=4,
durable=not legacy_durable),
_make_metafilename(ts_newer_meta),
_make_metafilename(ts_older_meta, ts_ctype)]
if policy.policy_type == EC_POLICY and legacy_durable:
filenames.append(ts_data.internal + '.durable')
self._verify_get_hashes(
filenames, ts_data, ts_newer_meta, ts_ctype, policy)
do_test(False)
do_test(True)
def test_hash_suffix_with_same_age_content_type_in_older_meta(self):
# After rsync replication we could have two meta files: newest
# content-type is in older meta file, at same age as older meta file
def do_test(legacy_durable):
for policy in self.iter_policies():
ts_data, ts_older_meta, ts_newer_meta = (
self.ts() for _ in range(3))
filenames = [_make_datafilename(ts_data, policy, frag_index=4,
durable=not legacy_durable),
_make_metafilename(ts_newer_meta),
_make_metafilename(ts_older_meta, ts_older_meta)]
if policy.policy_type == EC_POLICY and legacy_durable:
filenames.append(ts_data.internal + '.durable')
self._verify_get_hashes(
filenames, ts_data, ts_newer_meta, ts_older_meta, policy)
do_test(False)
do_test(True)
def test_hash_suffix_with_obsolete_content_type_in_older_meta(self):
# After rsync replication we could have two meta files: newest
# content-type is in older meta file, but older than data file
def do_test(legacy_durable):
for policy in self.iter_policies():
ts_ctype, ts_data, ts_older_meta, ts_newer_meta = (
self.ts() for _ in range(4))
filenames = [_make_datafilename(ts_data, policy, frag_index=4,
durable=not legacy_durable),
_make_metafilename(ts_newer_meta),
_make_metafilename(ts_older_meta, ts_ctype)]
if policy.policy_type == EC_POLICY and legacy_durable:
filenames.append(ts_data.internal + '.durable')
self._verify_get_hashes(
filenames, ts_data, ts_newer_meta, None, policy)
do_test(False)
do_test(True)
def test_hash_suffix_removes_empty_hashdir_and_suffix(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy, frag_index=2)
os.makedirs(df._datadir)
self.assertTrue(os.path.exists(df._datadir)) # sanity
df_mgr.get_hashes('sda1', '0', [], policy)
suffix_dir = os.path.dirname(df._datadir)
self.assertFalse(os.path.exists(suffix_dir))
def test_hash_suffix_removes_empty_hashdirs_in_valid_suffix(self):
paths, suffix = find_paths_with_matching_suffixes(needed_matches=3,
needed_suffixes=0)
matching_paths = paths.pop(suffix)
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile('sda1', '0', *matching_paths[0],
policy=policy, frag_index=2)
# create a real, valid hsh_path
df.delete(Timestamp.now())
# and a couple of empty hsh_paths
empty_hsh_paths = []
for path in matching_paths[1:]:
fake_df = df_mgr.get_diskfile('sda1', '0', *path,
policy=policy)
os.makedirs(fake_df._datadir)
empty_hsh_paths.append(fake_df._datadir)
for hsh_path in empty_hsh_paths:
self.assertTrue(os.path.exists(hsh_path)) # sanity
# get_hashes will cleanup empty hsh_path and leave valid one
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertIn(suffix, hashes)
self.assertTrue(os.path.exists(df._datadir))
for hsh_path in empty_hsh_paths:
self.assertFalse(os.path.exists(hsh_path))
# get_hashes tests - hash_suffix error handling
def test_hash_suffix_listdir_enotdir(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
suffix = '123'
suffix_path = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(policy), '0',
suffix)
os.makedirs(suffix_path)
self.assertTrue(os.path.exists(suffix_path)) # sanity
hashes = df_mgr.get_hashes('sda1', '0', [suffix], policy)
# suffix dir cleaned up by get_hashes
self.assertFalse(os.path.exists(suffix_path))
expected = {}
msg = 'expected %r != %r for policy %r' % (
expected, hashes, policy)
self.assertEqual(hashes, expected, msg)
# now make the suffix path a file
open(suffix_path, 'w').close()
hashes = df_mgr.get_hashes('sda1', '0', [suffix], policy)
expected = {}
msg = 'expected %r != %r for policy %r' % (
expected, hashes, policy)
self.assertEqual(hashes, expected, msg)
def test_hash_suffix_listdir_enoent(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
part_path = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(policy), '0')
mkdirs(part_path) # ensure we'll bother writing a pkl at all
orig_listdir = os.listdir
listdir_calls = []
def mock_listdir(path):
success = False
try:
rv = orig_listdir(path)
success = True
return rv
finally:
listdir_calls.append((path, success))
with mock.patch('swift.obj.diskfile.os.listdir',
mock_listdir):
# recalc always forces hash_suffix even if the suffix
# does not exist!
df_mgr.get_hashes('sda1', '0', ['123'], policy)
self.assertEqual(listdir_calls, [
# part path gets created automatically
(part_path, True),
# this one blows up
(os.path.join(part_path, '123'), False),
])
def test_hash_suffix_cleanup_ondisk_files_enotdir_quarantined(self):
for policy in self.iter_policies():
df = self.df_router[policy].get_diskfile(
self.existing_device, '0', 'a', 'c', 'o', policy=policy)
# make the suffix directory
suffix_path = os.path.dirname(df._datadir)
os.makedirs(suffix_path)
suffix = os.path.basename(suffix_path)
# make the df hash path a file
open(df._datadir, 'wb').close()
df_mgr = self.df_router[policy]
hashes = df_mgr.get_hashes(self.existing_device, '0', [suffix],
policy)
self.assertEqual(hashes, {})
# and hash path is quarantined
self.assertFalse(os.path.exists(df._datadir))
# each device a quarantined directory
quarantine_base = os.path.join(self.devices,
self.existing_device, 'quarantined')
# the quarantine path is...
quarantine_path = os.path.join(
quarantine_base, # quarantine root
diskfile.get_data_dir(policy), # per-policy data dir
os.path.basename(df._datadir) # name of quarantined file
)
self.assertTrue(os.path.exists(quarantine_path))
def test_hash_suffix_cleanup_ondisk_files_other_oserror(self):
for policy in self.iter_policies():
timestamp = self.ts()
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c',
'o', policy=policy,
frag_index=7)
suffix = os.path.basename(os.path.dirname(df._datadir))
with df.create() as writer:
test_data = b'test_data'
writer.write(test_data)
metadata = {
'X-Timestamp': timestamp.internal,
'ETag': md5(test_data).hexdigest(),
'Content-Length': len(test_data),
}
writer.put(metadata)
orig_os_listdir = os.listdir
listdir_calls = []
part_path = os.path.join(self.devices, self.existing_device,
diskfile.get_data_dir(policy), '0')
suffix_path = os.path.join(part_path, suffix)
datadir_path = os.path.join(suffix_path, hash_path('a', 'c', 'o'))
def mock_os_listdir(path):
listdir_calls.append(path)
if path == datadir_path:
# we want the part and suffix listdir calls to pass and
# make the cleanup_ondisk_files raise an exception
raise OSError(errno.EACCES, os.strerror(errno.EACCES))
return orig_os_listdir(path)
with mock.patch('os.listdir', mock_os_listdir):
hashes = df_mgr.get_hashes(self.existing_device, '0', [],
policy)
self.assertEqual(listdir_calls, [
part_path,
suffix_path,
datadir_path,
])
expected = {suffix: None}
msg = 'expected %r != %r for policy %r' % (
expected, hashes, policy)
self.assertEqual(hashes, expected, msg)
def test_hash_suffix_rmdir_hsh_path_oserror(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
# make an empty hsh_path to be removed
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c',
'o', policy=policy)
os.makedirs(df._datadir)
suffix = os.path.basename(os.path.dirname(df._datadir))
with mock.patch('os.rmdir', side_effect=OSError()):
hashes = df_mgr.get_hashes(self.existing_device, '0', [],
policy)
expected = {
EC_POLICY: {},
REPL_POLICY: md5().hexdigest(),
}[policy.policy_type]
self.assertEqual(hashes, {suffix: expected})
self.assertTrue(os.path.exists(df._datadir))
def test_hash_suffix_rmdir_suffix_oserror(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
# make an empty hsh_path to be removed
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c',
'o', policy=policy)
os.makedirs(df._datadir)
suffix_path = os.path.dirname(df._datadir)
suffix = os.path.basename(suffix_path)
captured_paths = []
def mock_rmdir(path):
captured_paths.append(path)
if path == suffix_path:
raise OSError('kaboom!')
with mock.patch('os.rmdir', mock_rmdir):
hashes = df_mgr.get_hashes(self.existing_device, '0', [],
policy)
expected = {
EC_POLICY: {},
REPL_POLICY: md5().hexdigest(),
}[policy.policy_type]
self.assertEqual(hashes, {suffix: expected})
self.assertTrue(os.path.exists(suffix_path))
self.assertEqual([
df._datadir,
suffix_path,
], captured_paths)
# get_hashes tests - behaviors
def test_get_hashes_does_not_create_partition(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
hashes = df_mgr.get_hashes(self.existing_device, '0', [],
policy)
self.assertEqual(hashes, {})
part_path = os.path.join(
self.devices, 'sda1', diskfile.get_data_dir(policy), '0')
self.assertFalse(os.path.exists(part_path))
def test_get_hashes_creates_pkl(self):
# like above, but -- if the partition already exists, make the pickle
for policy in self.iter_policies():
part_path = os.path.join(
self.devices, 'sda1', diskfile.get_data_dir(policy), '0')
mkdirs(part_path)
df_mgr = self.df_router[policy]
hashes = df_mgr.get_hashes(self.existing_device, '0', [],
policy)
self.assertEqual(hashes, {})
self.assertTrue(os.path.exists(part_path))
hashes_file = os.path.join(part_path,
diskfile.HASH_FILE)
self.assertTrue(os.path.exists(hashes_file))
# and double check the hashes
new_hashes = df_mgr.get_hashes(self.existing_device, '0', [],
policy)
self.assertEqual(hashes, new_hashes)
def _do_test_get_hashes_new_pkl_finds_new_suffix_dirs(self, device):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
part_path = os.path.join(
self.devices, self.existing_device,
diskfile.get_data_dir(policy), '0')
hashes_file = os.path.join(part_path,
diskfile.HASH_FILE)
# add something to find
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c',
'o', policy=policy, frag_index=4)
timestamp = self.ts()
df.delete(timestamp)
suffix_dir = os.path.dirname(df._datadir)
suffix = os.path.basename(suffix_dir)
# get_hashes will find the untracked suffix dir
self.assertFalse(os.path.exists(hashes_file)) # sanity
hashes = df_mgr.get_hashes(device, '0', [], policy)
self.assertIn(suffix, hashes)
# ... and create a hashes pickle for it
self.assertTrue(os.path.exists(hashes_file))
# repeat and check there is no rehashing
with mock.patch.object(df_mgr, '_hash_suffix',
return_value=hashes[suffix]) as mocked:
repeat_hashes = df_mgr.get_hashes(device, '0', [], policy)
self.assertEqual(hashes, repeat_hashes)
mocked.assert_not_called()
def test_get_hashes_new_pkl_finds_new_suffix_dirs_unicode(self):
self._do_test_get_hashes_new_pkl_finds_new_suffix_dirs(u'sda1')
def test_get_hashes_new_pkl_finds_new_suffix_dirs(self):
self._do_test_get_hashes_new_pkl_finds_new_suffix_dirs('sda1')
def test_get_hashes_new_pkl_missing_invalid_finds_new_suffix_dirs(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
part_path = os.path.join(
self.devices, self.existing_device,
diskfile.get_data_dir(policy), '0')
hashes_file = os.path.join(part_path,
diskfile.HASH_FILE)
invalidations_file = os.path.join(
part_path, diskfile.HASH_INVALIDATIONS_FILE)
# add something to find
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c',
'o', policy=policy, frag_index=4)
timestamp = self.ts()
df.delete(timestamp)
suffix = os.path.basename(os.path.dirname(df._datadir))
with open(invalidations_file) as f:
self.assertEqual('%s\n' % suffix, f.read())
# even if invalidations_file is missing ...
os.unlink(invalidations_file)
hashes = df_mgr.get_hashes(self.existing_device, '0', [], policy)
# get_hashes will *still* find the untracked suffix dir
self.assertIn(suffix, hashes)
# ... and create a hashes pickle for it
self.assertTrue(os.path.exists(hashes_file))
def test_get_hashes_new_pkl_lying_invalid_finds_new_suffix_dirs(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
part_path = os.path.join(
self.devices, self.existing_device,
diskfile.get_data_dir(policy), '0')
hashes_file = os.path.join(part_path,
diskfile.HASH_FILE)
invalidations_file = os.path.join(
part_path, diskfile.HASH_INVALIDATIONS_FILE)
# add something to find
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c',
'o', policy=policy, frag_index=4)
timestamp = self.ts()
df.delete(timestamp)
suffix = os.path.basename(os.path.dirname(df._datadir))
with open(invalidations_file) as f:
self.assertEqual('%s\n' % suffix, f.read())
# even if invalidations_file is lying ...
with open(invalidations_file, 'w') as f:
f.write('%x\n' % (int(suffix, 16) + 1))
hashes = df_mgr.get_hashes(self.existing_device, '0', [], policy)
# get_hashes will *still* find the untracked suffix dir
self.assertIn(suffix, hashes)
# ... and create a hashes pickle for it
self.assertTrue(os.path.exists(hashes_file))
def test_get_hashes_old_pickle_does_not_find_new_suffix_dirs(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
# create an empty stale pickle
part_path = os.path.join(
self.devices, 'sda1', diskfile.get_data_dir(policy), '0')
mkdirs(part_path)
hashes_file = os.path.join(part_path,
diskfile.HASH_FILE)
hashes = df_mgr.get_hashes(self.existing_device, '0', [], policy)
self.assertEqual(hashes, {})
self.assertTrue(os.path.exists(hashes_file)) # sanity
# add something to find
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy, frag_index=4)
os.makedirs(df._datadir)
filename = Timestamp.now().internal + '.ts'
open(os.path.join(df._datadir, filename), 'w').close()
suffix = os.path.basename(os.path.dirname(df._datadir))
# but get_hashes has no reason to find it (because we didn't
# call invalidate_hash)
new_hashes = df_mgr.get_hashes(self.existing_device, '0', [],
policy)
self.assertEqual(new_hashes, hashes)
# ... unless remote end asks for a recalc
hashes = df_mgr.get_hashes(self.existing_device, '0', [suffix],
policy)
self.assertIn(suffix, hashes)
def test_get_hashes_does_not_rehash_known_suffix_dirs(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c',
'o', policy=policy, frag_index=4)
suffix = os.path.basename(os.path.dirname(df._datadir))
timestamp = self.ts()
df.delete(timestamp)
# create the baseline hashes file
hashes = df_mgr.get_hashes(self.existing_device, '0', [], policy)
self.assertIn(suffix, hashes)
# now change the contents of the suffix w/o calling
# invalidate_hash
rmtree(df._datadir)
suffix_path = os.path.dirname(df._datadir)
self.assertTrue(os.path.exists(suffix_path)) # sanity
new_hashes = df_mgr.get_hashes(self.existing_device, '0', [],
policy)
# ... and get_hashes is none the wiser
self.assertEqual(new_hashes, hashes)
# ... unless remote end asks for a recalc
hashes = df_mgr.get_hashes(self.existing_device, '0', [suffix],
policy)
self.assertNotEqual(new_hashes, hashes)
# and the empty suffix path is removed
self.assertFalse(os.path.exists(suffix_path))
# ... and the suffix key is removed
expected = {}
self.assertEqual(expected, hashes)
def test_get_hashes_multi_file_multi_suffix(self):
paths, suffix = find_paths_with_matching_suffixes(needed_matches=2,
needed_suffixes=3)
matching_paths = paths.pop(suffix)
matching_paths.sort(key=lambda path: hash_path(*path))
other_paths = []
for suffix, paths in paths.items():
other_paths.append(paths[0])
if len(other_paths) >= 2:
break
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
# first we'll make a tombstone
df = df_mgr.get_diskfile(self.existing_device, '0',
*other_paths[0], policy=policy,
frag_index=4)
timestamp = self.ts()
df.delete(timestamp)
tombstone_hash = md5(timestamp.internal + '.ts').hexdigest()
tombstone_suffix = os.path.basename(os.path.dirname(df._datadir))
# second file in another suffix has a .datafile
df = df_mgr.get_diskfile(self.existing_device, '0',
*other_paths[1], policy=policy,
frag_index=5)
timestamp = self.ts()
with df.create() as writer:
test_data = b'test_file'
writer.write(test_data)
metadata = {
'X-Timestamp': timestamp.internal,
'ETag': md5(test_data).hexdigest(),
'Content-Length': len(test_data),
}
writer.put(metadata)
writer.commit(timestamp)
datafile_name = _make_datafilename(
timestamp, policy, frag_index=5)
durable_hash = md5(timestamp.internal + '.durable').hexdigest()
datafile_suffix = os.path.basename(os.path.dirname(df._datadir))
# in the *third* suffix - two datafiles for different hashes
df = df_mgr.get_diskfile(self.existing_device, '0',
*matching_paths[0], policy=policy,
frag_index=6)
matching_suffix = os.path.basename(os.path.dirname(df._datadir))
timestamp = self.ts()
with df.create() as writer:
test_data = b'test_file'
writer.write(test_data)
metadata = {
'X-Timestamp': timestamp.internal,
'ETag': md5(test_data).hexdigest(),
'Content-Length': len(test_data),
}
writer.put(metadata)
writer.commit(timestamp)
# we'll keep track of file names for hash calculations
filename = _make_datafilename(
timestamp, policy, frag_index=6)
data_filenames = {
6: filename
}
df = df_mgr.get_diskfile(self.existing_device, '0',
*matching_paths[1], policy=policy,
frag_index=7)
self.assertEqual(os.path.basename(os.path.dirname(df._datadir)),
matching_suffix) # sanity
timestamp = self.ts()
with df.create() as writer:
test_data = b'test_file'
writer.write(test_data)
metadata = {
'X-Timestamp': timestamp.internal,
'ETag': md5(test_data).hexdigest(),
'Content-Length': len(test_data),
}
writer.put(metadata)
writer.commit(timestamp)
filename = _make_datafilename(
timestamp, policy, frag_index=7)
data_filenames[7] = filename
# now make up the expected suffixes!
if policy.policy_type == EC_POLICY:
hasher = md5()
for filename in data_filenames.values():
# each data file updates the hasher with durable timestamp
hasher.update(filename.split('#', 1)[0] + '.durable')
expected = {
tombstone_suffix: {
None: tombstone_hash,
},
datafile_suffix: {
None: durable_hash,
5: self.fname_to_ts_hash(datafile_name),
},
matching_suffix: {
None: hasher.hexdigest(),
6: self.fname_to_ts_hash(data_filenames[6]),
7: self.fname_to_ts_hash(data_filenames[7]),
},
}
elif policy.policy_type == REPL_POLICY:
hasher = md5()
for filename in data_filenames.values():
hasher.update(filename)
expected = {
tombstone_suffix: tombstone_hash,
datafile_suffix: md5(datafile_name).hexdigest(),
matching_suffix: hasher.hexdigest(),
}
else:
self.fail('unknown policy type %r' % policy.policy_type)
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertEqual(hashes, expected)
# get_hashes tests - error handling
def test_get_hashes_bad_dev(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df_mgr.mount_check = True
with mock_check_drive(ismount=False):
self.assertRaises(
DiskFileDeviceUnavailable,
df_mgr.get_hashes, self.existing_device, '0', ['123'],
policy)
def test_get_hashes_zero_bytes_pickle(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
part_path = os.path.join(self.devices, self.existing_device,
diskfile.get_data_dir(policy), '0')
os.makedirs(part_path)
# create a pre-existing zero-byte file
open(os.path.join(part_path, diskfile.HASH_FILE), 'w').close()
hashes = df_mgr.get_hashes(self.existing_device, '0', [],
policy)
self.assertEqual(hashes, {})
def _test_get_hashes_race(self, hash_breaking_function):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c',
'o', policy=policy, frag_index=3)
suffix = os.path.basename(os.path.dirname(df._datadir))
df2 = self.get_different_suffix_df(df, frag_index=5)
suffix2 = os.path.basename(os.path.dirname(df2._datadir))
part_path = os.path.dirname(os.path.dirname(
os.path.join(df._datadir)))
mkdirs(part_path)
hashfile_path = os.path.join(part_path, diskfile.HASH_FILE)
# create hashes.pkl
hashes = df_mgr.get_hashes(self.existing_device, '0', [],
policy)
self.assertEqual(hashes, {}) # sanity
self.assertTrue(os.path.exists(hashfile_path))
# and optionally tamper with the hashes.pkl...
hash_breaking_function(hashfile_path)
non_local = {'called': False}
orig_hash_suffix = df_mgr._hash_suffix
# then create a suffix
df.delete(self.ts())
def mock_hash_suffix(*args, **kwargs):
# capture first call to mock_hash
if not non_local['called']:
non_local['called'] = True
df2.delete(self.ts())
non_local['other_hashes'] = df_mgr.get_hashes(
self.existing_device, '0', [], policy)
return orig_hash_suffix(*args, **kwargs)
with mock.patch.object(df_mgr, '_hash_suffix', mock_hash_suffix):
hashes = df_mgr.get_hashes(self.existing_device, '0', [],
policy)
self.assertTrue(non_local['called'])
self.assertIn(suffix, hashes)
self.assertIn(suffix2, hashes)
def test_get_hashes_race_invalid_pickle(self):
def hash_breaking_function(hashfile_path):
# create a garbage invalid zero-byte file which can not unpickle
open(hashfile_path, 'w').close()
self._test_get_hashes_race(hash_breaking_function)
def test_get_hashes_race_new_partition(self):
def hash_breaking_function(hashfile_path):
# simulate rebalanced part doing post-rsync REPLICATE
os.unlink(hashfile_path)
part_dir = os.path.dirname(hashfile_path)
os.unlink(os.path.join(part_dir, '.lock'))
# sanity
self.assertEqual([], os.listdir(os.path.dirname(hashfile_path)))
self._test_get_hashes_race(hash_breaking_function)
def test_get_hashes_race_existing_partition(self):
def hash_breaking_function(hashfile_path):
# no-op - simulate ok existing partition
self.assertTrue(os.path.exists(hashfile_path))
self._test_get_hashes_race(hash_breaking_function)
def test_get_hashes_hash_suffix_enotdir(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
# create a real suffix dir
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c',
'o', policy=policy, frag_index=3)
df.delete(Timestamp.now())
suffix = os.path.basename(os.path.dirname(df._datadir))
# touch a bad suffix dir
part_dir = os.path.join(self.devices, self.existing_device,
diskfile.get_data_dir(policy), '0')
open(os.path.join(part_dir, 'bad'), 'w').close()
hashes = df_mgr.get_hashes(self.existing_device, '0', [], policy)
self.assertIn(suffix, hashes)
self.assertNotIn('bad', hashes)
def test_get_hashes_hash_suffix_other_oserror(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
suffix = '123'
suffix_path = os.path.join(self.devices, self.existing_device,
diskfile.get_data_dir(policy), '0',
suffix)
os.makedirs(suffix_path)
self.assertTrue(os.path.exists(suffix_path)) # sanity
hashes = df_mgr.get_hashes(self.existing_device, '0', [suffix],
policy)
expected = {}
msg = 'expected %r != %r for policy %r' % (expected, hashes,
policy)
self.assertEqual(hashes, expected, msg)
# this OSError does *not* raise PathNotDir, and is allowed to leak
# from hash_suffix into get_hashes
mocked_os_listdir = mock.Mock(
side_effect=OSError(errno.EACCES, os.strerror(errno.EACCES)))
with mock.patch("os.listdir", mocked_os_listdir):
with mock.patch('swift.obj.diskfile.logging') as mock_logging:
hashes = df_mgr.get_hashes('sda1', '0', [suffix], policy)
self.assertEqual(mock_logging.method_calls,
[mock.call.exception('Error hashing suffix')])
# recalc always causes a suffix to get reset to None; the listdir
# error prevents the suffix from being rehashed
expected = {'123': None}
msg = 'expected %r != %r for policy %r' % (expected, hashes,
policy)
self.assertEqual(hashes, expected, msg)
def test_get_hashes_modified_recursive_retry(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
part_path = os.path.join(self.devices, self.existing_device,
diskfile.get_data_dir(policy), '0')
mkdirs(part_path)
# first create an empty pickle
df_mgr.get_hashes(self.existing_device, '0', [], policy)
self.assertTrue(os.path.exists(os.path.join(
part_path, diskfile.HASH_FILE)))
non_local = {'suffix_count': 1}
calls = []
def mock_read_hashes(filename):
rv = {'%03x' % i: 'fake'
for i in range(non_local['suffix_count'])}
if len(calls) <= 3:
# this will make the *next* call get slightly
# different content
non_local['suffix_count'] += 1
# track exactly the value for every return
calls.append(dict(rv))
rv['valid'] = True
return rv
with mock.patch('swift.obj.diskfile.read_hashes',
mock_read_hashes):
df_mgr.get_hashes(self.existing_device, '0', ['123'],
policy)
self.assertEqual(calls, [
{'000': 'fake'}, # read
{'000': 'fake', '001': 'fake'}, # modification
{'000': 'fake', '001': 'fake', '002': 'fake'}, # read
{'000': 'fake', '001': 'fake', '002': 'fake',
'003': 'fake'}, # modifed
{'000': 'fake', '001': 'fake', '002': 'fake',
'003': 'fake', '004': 'fake'}, # read
{'000': 'fake', '001': 'fake', '002': 'fake',
'003': 'fake', '004': 'fake'}, # not modifed
])
class TestHashesHelpers(unittest.TestCase):
def setUp(self):
self.testdir = tempfile.mkdtemp()
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def test_read_legacy_hashes(self):
hashes = {'fff': 'fake'}
hashes_file = os.path.join(self.testdir, diskfile.HASH_FILE)
with open(hashes_file, 'wb') as f:
pickle.dump(hashes, f)
expected = {
'fff': 'fake',
'updated': -1,
'valid': True,
}
self.assertEqual(expected, diskfile.read_hashes(self.testdir))
def test_write_hashes_valid_updated(self):
hashes = {'888': 'fake', 'valid': True}
now = time()
with mock.patch('swift.obj.diskfile.time.time', return_value=now):
diskfile.write_hashes(self.testdir, hashes)
hashes_file = os.path.join(self.testdir, diskfile.HASH_FILE)
with open(hashes_file, 'rb') as f:
data = pickle.load(f)
expected = {
'888': 'fake',
'updated': now,
'valid': True,
}
self.assertEqual(expected, data)
def test_write_hashes_invalid_updated(self):
hashes = {'valid': False}
now = time()
with mock.patch('swift.obj.diskfile.time.time', return_value=now):
diskfile.write_hashes(self.testdir, hashes)
hashes_file = os.path.join(self.testdir, diskfile.HASH_FILE)
with open(hashes_file, 'rb') as f:
data = pickle.load(f)
expected = {
'updated': now,
'valid': False,
}
self.assertEqual(expected, data)
def test_write_hashes_safe_default(self):
hashes = {}
now = time()
with mock.patch('swift.obj.diskfile.time.time', return_value=now):
diskfile.write_hashes(self.testdir, hashes)
hashes_file = os.path.join(self.testdir, diskfile.HASH_FILE)
with open(hashes_file, 'rb') as f:
data = pickle.load(f)
expected = {
'updated': now,
'valid': False,
}
self.assertEqual(expected, data)
def test_read_write_valid_hashes_mutation_and_transative_equality(self):
hashes = {'000': 'fake', 'valid': True}
diskfile.write_hashes(self.testdir, hashes)
# write_hashes mutates the passed in hashes, it adds the updated key
self.assertIn('updated', hashes)
self.assertTrue(hashes['valid'])
result = diskfile.read_hashes(self.testdir)
# unpickling result in a new object
self.assertNotEqual(id(hashes), id(result))
# with the exactly the same value mutation from write_hashes
self.assertEqual(hashes, result)
def test_read_write_invalid_hashes_mutation_and_transative_equality(self):
hashes = {'valid': False}
diskfile.write_hashes(self.testdir, hashes)
# write_hashes mutates the passed in hashes, it adds the updated key
self.assertIn('updated', hashes)
self.assertFalse(hashes['valid'])
result = diskfile.read_hashes(self.testdir)
# unpickling result in a new object
self.assertNotEqual(id(hashes), id(result))
# with the exactly the same value mutation from write_hashes
self.assertEqual(hashes, result)
def test_ignore_corrupted_hashes(self):
corrupted_hashes = {u'\x00\x00\x00': False, 'valid': True}
diskfile.write_hashes(self.testdir, corrupted_hashes)
result = diskfile.read_hashes(self.testdir)
self.assertFalse(result['valid'])
if __name__ == '__main__':
unittest.main()
|
__init__.py
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from glob import glob
from subprocess import check_output
from tempfile import mkstemp
from threading import Thread
import os
from os.path import dirname, join
import mycroft
from mycroft import MycroftSkill, intent_file_handler
from mycroft.api import DeviceApi
class SupportSkill(MycroftSkill):
# TODO: Will need to read from config under KDE, etc.
log_locations = [
'/opt/mycroft/*.json',
'/var/log/mycroft-*.log',
'/etc/mycroft/*.conf',
join(dirname(dirname(mycroft.__file__)), 'scripts', 'logs', '*.log')
]
# Service used to temporarilly hold the debugging data (linked to
# via email)
host = 'termbin.com'
def __init__(self):
MycroftSkill.__init__(self)
def upload_and_create_url(self, log_str):
# Send the various log and info files
# Upload to termbin.com using the nc (netcat) util
fd, path = mkstemp()
with open(path, 'w') as f:
f.write(log_str)
os.close(fd)
cmd = 'cat ' + path + ' | nc ' + self.host + ' 9999'
return check_output(cmd, shell=True).decode().strip('\n\x00')
def get_device_name(self):
try:
return DeviceApi().get()['name']
except:
self.log.exception('API Error')
return ':error:'
def upload_debug_info(self):
all_lines = []
threads = []
for log_file in sum([glob(pattern) for pattern in self.log_locations], []):
def do_thing(log_file=log_file):
with open(log_file) as f:
log_lines = f.read().split('\n')
lines = ['=== ' + log_file + ' ===']
if len(log_lines) > 100:
log_lines = '\n'.join(log_lines[-5000:])
print('Uploading ' + log_file + '...')
lines.append(self.upload_and_create_url(log_lines))
else:
lines.extend(log_lines)
lines.append('')
all_lines.extend(lines)
t = Thread(target=do_thing)
t.daemon = True
t.start()
threads.append(t)
for t in threads:
t.join()
return self.upload_and_create_url('\n'.join(all_lines))
# "Create a support ticket"
@intent_file_handler('contact.support.intent')
def troubleshoot(self):
# Get a problem description from the user
user_words = self.get_response('confirm.support', num_retries=0)
yes_words = self.translate_list('yes')
# TODO: .strip() shouldn't be needed, translate_list should remove
# the '\r' I'm seeing. Remove after bugfix.
if (not user_words or not any(
i.strip() in user_words for i in yes_words
)):
self.speak_dialog('cancelled')
return
description = self.get_response('ask.description', num_retries=0)
if description is None:
self.speak_dialog('cancelled')
return
self.speak_dialog('one.moment')
# Log so that the message will appear in the package of logs sent
self.log.debug("Troubleshooting Package Description: " +
str(description))
# Upload the logs to the web
url = self.upload_debug_info()
# Create the troubleshooting email and send to user
data = {'url': url, 'device_name': self.get_device_name(),
'description': description}
email = '\n'.join(self.translate_template('support.email', data))
title = self.translate('support.title')
self.send_email(title, email)
self.speak_dialog('complete')
def create_skill():
return SupportSkill()
|
manual_ctrl.py
|
#!/usr/bin/env python3
# set up wheel
import os, struct, array
from fcntl import ioctl
# Iterate over the joystick devices.
print('Available devices:')
for fn in os.listdir('/dev/input'):
if fn.startswith('js'):
print(' /dev/input/%s' % (fn))
# We'll store the states here.
axis_states = {}
button_states = {}
# These constants were borrowed from linux/input.h
axis_names = {
0x00 : 'x',
0x01 : 'y',
0x02 : 'z',
0x03 : 'rx',
0x04 : 'ry',
0x05 : 'rz',
0x06 : 'trottle',
0x07 : 'rudder',
0x08 : 'wheel',
0x09 : 'gas',
0x0a : 'brake',
0x10 : 'hat0x',
0x11 : 'hat0y',
0x12 : 'hat1x',
0x13 : 'hat1y',
0x14 : 'hat2x',
0x15 : 'hat2y',
0x16 : 'hat3x',
0x17 : 'hat3y',
0x18 : 'pressure',
0x19 : 'distance',
0x1a : 'tilt_x',
0x1b : 'tilt_y',
0x1c : 'tool_width',
0x20 : 'volume',
0x28 : 'misc',
}
button_names = {
0x120 : 'trigger',
0x121 : 'thumb',
0x122 : 'thumb2',
0x123 : 'top',
0x124 : 'top2',
0x125 : 'pinkie',
0x126 : 'base',
0x127 : 'base2',
0x128 : 'base3',
0x129 : 'base4',
0x12a : 'base5',
0x12b : 'base6',
0x12f : 'dead',
0x130 : 'a',
0x131 : 'b',
0x132 : 'c',
0x133 : 'x',
0x134 : 'y',
0x135 : 'z',
0x136 : 'tl',
0x137 : 'tr',
0x138 : 'tl2',
0x139 : 'tr2',
0x13a : 'select',
0x13b : 'start',
0x13c : 'mode',
0x13d : 'thumbl',
0x13e : 'thumbr',
0x220 : 'dpad_up',
0x221 : 'dpad_down',
0x222 : 'dpad_left',
0x223 : 'dpad_right',
# XBox 360 controller uses these codes.
0x2c0 : 'dpad_left',
0x2c1 : 'dpad_right',
0x2c2 : 'dpad_up',
0x2c3 : 'dpad_down',
}
axis_map = []
button_map = []
def wheel_poll_thread(q):
# Open the joystick device.
fn = '/dev/input/js0'
print('Opening %s...' % fn)
jsdev = open(fn, 'rb')
# Get the device name.
#buf = bytearray(63)
buf = array.array('B', [0] * 64)
ioctl(jsdev, 0x80006a13 + (0x10000 * len(buf)), buf) # JSIOCGNAME(len)
js_name = buf.tobytes().rstrip(b'\x00').decode('utf-8')
print('Device name: %s' % js_name)
# Get number of axes and buttons.
buf = array.array('B', [0])
ioctl(jsdev, 0x80016a11, buf) # JSIOCGAXES
num_axes = buf[0]
buf = array.array('B', [0])
ioctl(jsdev, 0x80016a12, buf) # JSIOCGBUTTONS
num_buttons = buf[0]
# Get the axis map.
buf = array.array('B', [0] * 0x40)
ioctl(jsdev, 0x80406a32, buf) # JSIOCGAXMAP
for axis in buf[:num_axes]:
axis_name = axis_names.get(axis, 'unknown(0x%02x)' % axis)
axis_map.append(axis_name)
axis_states[axis_name] = 0.0
# Get the button map.
buf = array.array('H', [0] * 200)
ioctl(jsdev, 0x80406a34, buf) # JSIOCGBTNMAP
for btn in buf[:num_buttons]:
btn_name = button_names.get(btn, 'unknown(0x%03x)' % btn)
button_map.append(btn_name)
button_states[btn_name] = 0
print('%d axes found: %s' % (num_axes, ', '.join(axis_map)))
print('%d buttons found: %s' % (num_buttons, ', '.join(button_map)))
# Enable FF
import evdev # pylint: disable=import-error
from evdev import ecodes, InputDevice # pylint: disable=import-error
device = evdev.list_devices()[0]
evtdev = InputDevice(device)
val = 24000
evtdev.write(ecodes.EV_FF, ecodes.FF_AUTOCENTER, val)
while True:
evbuf = jsdev.read(8)
time, value, mtype, number = struct.unpack('IhBB', evbuf)
# print(mtype, number, value)
if mtype & 0x02: # wheel & paddles
axis = axis_map[number]
if axis == "z": # gas
fvalue = value / 32767.0
axis_states[axis] = fvalue
normalized = (1 - fvalue) * 50
q.put(str("throttle_%f" % normalized))
if axis == "rz": # brake
fvalue = value / 32767.0
axis_states[axis] = fvalue
normalized = (1 - fvalue) * 50
q.put(str("brake_%f" % normalized))
if axis == "x": # steer angle
fvalue = value / 32767.0
axis_states[axis] = fvalue
normalized = fvalue
q.put(str("steer_%f" % normalized))
if mtype & 0x01: # buttons
if number in [0,19]: # X
if value == 1: # press down
q.put(str("cruise_down"))
if number in [3,18]: # triangle
if value == 1: # press down
q.put(str("cruise_up"))
if number in [1,6]: # square
if value == 1: # press down
q.put(str("cruise_cancel"))
if number in [10,21]: # R3
if value == 1: # press down
q.put(str("reverse_switch"))
if __name__ == '__main__':
from multiprocessing import Process
p = Process(target=wheel_poll_thread)
p.start()
|
Master.py
|
import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import _thread
import threading
import cProfile, pstats, io
#from pstats import SortKey
from common import *
from test_images import image
from test_videos import video
from calibration import load_calib
class master(object):
def __init__(self):
self.mtx = 0
self.dist = 0
self.image = object()
if not os.path.exists(img_out_dir):
os.makedirs(img_out_dir)
if not os.path.exists(vid_out_dir):
os.makedirs(vid_out_dir)
def test_images(self, pipe="pipeline"):
images = find_files(main_dir + "/test_images/", "*.jpg")
def _threading(img_path, pipe):
img = image(self, image_path=img_path)
eval("img." + pipe)()
for img_path in images:
if USE_THREADING:
try:
t = threading.Thread(target=_threading, args=(img_path, pipe, ))
t.start()
except:
print("Error: unable to start thread")
else:
img = image(self, image_path=img_path)
eval("img." + pipe)()
fig0 = plt.figure(0)
fig0.clf()
print(image.brightness)
plt.plot(image.brightness, label='brightness')
plt.grid(True)
plt.legend()
arr = image.convert_figure_to_array(fig0)
store_image(arr, "brightness",img_out_dir + "/" + "brightness")
print("Avg brightness: " + str(np.average(image.brightness)))
min_ = np.min(image.brightness)
max_ = np.max(image.brightness)
print("Min brightness: " + str(min_), images[image.brightness.index(min_)])
print("MAX brightness: " + str(max_), images[image.brightness.index(max_)])
fig0.clf()
def test_videos(self):
videos = find_files(main_dir + "/test_videos", "*.mp4")
print(videos)
for vid_path in videos:
vid = video(self, vid_path)
vid.test_video()
def main(self):
self.mtx, self.dist = load_calib("camera_cal")
if TEST_MODE[0] == "IMAGE":
if TEST_MODE[1] == "S":
self.image = image(self, image_path="./test_images/test4.jpg")
self.image.pipeline()
else:
self.test_images(pipe="pipeline")
elif TEST_MODE[0] == "VIDEO":
if TEST_MODE[1] == "S":
vid = video(self)
vid.test_video("./test_videos/project_video.mp4")
#vid.test_video(["./test_videos/challenge_video.mp4", "./test_videos/harder_challenge_video.mp4"])
else:
self.test_videos()
if __name__ == "__main__":
#lane_color()
#lane_region()
#lane_region_color()
#canny_test1()
Master = master()
if not USE_PROFILER:
Master.main()
else:
pr = cProfile.Profile()
pr.enable()
Master.main()
pr.disable()
s = io.StringIO()
sortby = SortKey.CUMULATIVE
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
with open("profiler_Output.txt", "w") as text_file:
text_file.write(s.getvalue())
#cProfile.run('Master.main()',"profiler_out.txt")
|
application.py
|
"""Main execution body for program. Contains GUI interface and exporting class that creates files instead
of generating HTML Reports
Author: Alastair Chin
Last Updated: 28/02/2017
"""
import argparse
import webbrowser
import textwrap
import xlrd
from tkinter import *
from tkinter import filedialog, ttk
from threading import Thread
try:
from .data import *
from .report import *
from .template_reader import *
except:
from data import *
from report import *
from template_reader import *
terminal = False
"""
Global Variables:
terminal -- boolean value whether program is running through terminal or through GUI
progress -- Progress bar showing progress through program
"""
class DisplayWindow:
"""GUI for application allowing users to interact with program in simpler and more explanatory way
Methods:
dataaskopenfile -- Asks for files to process and displays them in the output window
dataaskopenfolder -- Asks for folder to process and displays the contained files in the output window
filetext -- Fills output box given a list of files
maketemplate -- Links to Create template web page of Data-oracle website
process_report -- Runs program and generates report for all files processed
process_export -- Runs program and creates a file containing analysis of all files processed
removefile -- Removes file from being processed after being selected in output window
reset -- Resets the program removing all files from the process queue and sets progress bar back to the start
templateaskopenfile -- Asks for a template to use during processing and displays it in the output window
Variables:
datafiles -- list of datafiles to be processed
display -- output window Frame object
template -- template to use in process if applicable
"""
def __init__(self):
root = Tk()
root.wm_title("UWA Data-oracle")
self.datafiles = []
self.template = None
# Main Window
mainwindow = Frame(root)
self.display = Frame(mainwindow)
Label(mainwindow, text="Select File(s) or Folder(s) to process: ").grid(row=0, sticky=E, pady=10)
Label(mainwindow, text="Select template file(optional): ").grid(row=1, sticky=E, pady=10)
label3 = Label(mainwindow, text="> Create Template", fg="blue")
label3.bind("<Button-1>", self.maketemplate)
label3.grid(row=2)
Button(mainwindow, text="Browse Files...", command= self.dataaskopenfile).grid(row=0, column=1, padx=5, sticky='ew')
Button(mainwindow, text='Browse Folders...', command= self.dataaskopenfolder).grid(row=0, column=2, padx=5)
Button(mainwindow, text="Browse Templates...", command=self.templateaskopenfile).grid(row=1, column=1, padx=5)
Button(mainwindow, text="View Report", command=self.process_report).grid(row=4, column=1,sticky='ew', padx=5)
Button(mainwindow, text="Export", command=self.process_export).grid(row=4, column=2, sticky='ew')
Button(mainwindow, text="Reset", command=self.reset).grid(row=6, column=1, sticky='ew')
Button(mainwindow, text="Exit", command=mainwindow.quit).grid(row=6, column=2, sticky='ew', pady=5)
self.progress = ttk.Progressbar(mainwindow, orient="horizontal", mode="determinate")
self.progress.grid(row=5, columnspan=3, sticky='ew', padx=10, pady=5)
mainwindow.pack()
# Output Window
self.display.grid(row=0, column=3, rowspan=7, sticky=N)
# Status Bar
self.statusText = StringVar()
self.statusText.set("Waiting for File...")
status = Label(root, textvariable=self.statusText, bd=1, relief=SUNKEN, anchor=W)
status.pack(side=BOTTOM, fill=X)
root.mainloop()
def dataaskopenfile(self):
""" Asks for files to process and displays them in the output window"""
self.reset()
if self.template:
Label(self.display, text=str("Template Selected: " + self.template[0]), anchor='w').pack(fill=X)
self.datafiles = filedialog.askopenfiles(mode='r', filetypes=[('All Files', '.*'),('Csv Files','*.csv'),
('Excel Workbook', '*.xlsx'), ('Excel 97-2003 Workbook', '.xls')],
defaultextension="*.csv")
if self.datafiles is not None:
self.datafiles = [file.name for file in self.datafiles]
Label(self.display, text="Selected Files: ", anchor='w').pack(fill=X)
self.filetext(self.datafiles)
self.statusText.set("Ready to Process Files...")
return self.datafiles
def dataaskopenfolder(self):
"""Asks for folder to process and displays the contained files in the output window"""
self.reset()
if self.template is not None:
Label(self.display, text=str("Template Selected: " + self.template.name), anchor='w').pack(fill=X)
folder = filedialog.askdirectory()
if folder != '':
self.datafiles = []
for file in os.listdir(folder):
self.datafiles.append(os.path.join(folder,file))
Label(self.display, text=str("Selected Folder: " + folder), anchor='w').pack(fill=X)
self.filetext(self.datafiles)
return folder
def filetext(self, files):
"""Provides text for output box given a list of files"""
remove_file = lambda x, m: (lambda p: self.removefile(x, m))
for file in files:
label = Label(self.display, text=str("\t" + file), anchor='w')
if os.name == 'posix':
label.bind("<Button-2>", remove_file(file, label))
else:
label.bind("<Button-3>", remove_file(file, label))
label.pack(fill=X)
def maketemplate(self, event):
"""Opens webbrowser to create template page on Data-oracle website"""
webbrowser.open_new("http://www.data-oracle.com/upload/createTemplate/")
def process_report(self):
"""Runs program and generates report at the end"""
self.progress["value"] = 0
self.setstatus("Processing Files...")
Thread(target=process_files, args=(self.datafiles, self.template), kwargs={'window':self}).start()
def process_export(self):
"""Runs program and exports results to file"""
self.progress["value"] = 0
self.setstatus("Processing Files...")
exportfile = ''
try:
exportfile = filedialog.asksaveasfile(mode='w', defaultextension='*.csv', filetypes=[('Csv Files', '*.csv'),
('All Files', '.*')])
exportfile.close()
Thread(target=process_files, args=(self.datafiles, self.template),
kwargs={'exportfile': exportfile.name, 'window': self}).start()
except PermissionError:
# Occurs if export file is open
self.setstatus("ERROR: Permission Denied, ensure export file is not open in another program")
def removefile(self, file, label):
"""Removes file from process list and removes label"""
print("Removing: ", file)
self.datafiles.remove(file)
label.destroy()
def reset(self):
"""Resets all files"""
mainwindow = self.display.winfo_parent()
mainwindow = self.display._nametowidget(mainwindow)
self.display.destroy()
self.display = Frame(mainwindow)
self.display.grid(row=0, column=3, rowspan=7, sticky=N)
self.setstatus("Waiting for File...")
self.progress["value"] = 0
def templateaskopenfile(self):
"""Asks for template to use in processing"""
self.template = []
template = filedialog.askopenfile(mode='r', filetypes=[('All Files', '.*'), ('Csv Files', '*.csv')],
defaultextension="*.csv")
if template is not None:
self.template.append(template.name)
if hasattr(self, 'templateLabel'):
self.templateLabel.destroy()
self.templateLabel = Label(self.display, text=str("Template Selected: " + self.template[0]), anchor='w')
self.templateLabel.pack(fill=X)
self.setstatus("Ready to Process Folder...")
return self.template
def setmaxprogress(self, max):
self.progress["maximum"] = max
def step_progress(self):
self.progress.step()
def setstatus(self, msg):
self.statusText.set(msg)
class Exporter(object):
"""Class that creates a file containing analysis of all files run in program
Methods:
write_stats -- writes summary of a single data object
write_summary -- writes summary of all files to be run after processing all files
Variables:
filename -- file name to save export file as
total_files -- total number of files processed
total_invalid -- total number of invalid rows
total_empty -- total number of empty columns
total_errors -- total numher of errors throughout files
"""
def __init__(self, filename, offline=True):
self.filename = filename
self.total_files = 0
self.total_invalid = 0
self.total_empty = 0
self.total_errors = 0
self.total_col = 0
if not offline:
with open(self.filename, 'w') as fp:
pass
def write_stats(self, data):
"""Writes statistics of a single data object"""
with open(self.filename, 'r+') as fp:
fp.seek(0,2)
fp.write("Analysis of " + os.path.split(data.filename)[1] + '\n')
self.total_files += 1
fp.write("Number of Invalid rows: " + str(len(data.invalid_rows)) + '\n')
self.total_invalid += len(data.invalid_rows)
empty_columns = [column.header for column in data.columns if column.empty]
fp.write("Number of Empty Columns: " + str(len(empty_columns)) + '\n')
self.total_empty = len(empty_columns)
fp.write("Number of Error Cells: " + str(len(data.errors)) + '\n')
self.total_errors = len(data.errors)
fp.write("Number of Valid Columns: " + str(len(data.columns)) + '\n')
self.total_col = str(len(data.columns))
if data.delimiter_type == ',':
fp.write("Delimiter: comma\n")
else:
fp.write("Delimiter: " + data.delimiter_type + '\n')
fp.write("\n")
def write_summary(self):
"""Writes summary of all files processed"""
temp_file = os.path.join(os.path.split(self.filename)[0],"Tempfile")
with open( temp_file, 'w') as fp:
fp.write("Error Report " + os.path.split(self.filename)[1] + "\n\n")
fp.write("Total Files Analysed: " + str(self.total_files) + "\n")
fp.write("Total Invalid Rows: " + str(self.total_invalid) + "\n")
fp.write("Total Empty Columns: " + str(self.total_empty) + "\n")
fp.write("Total Valid Columns: " + str(self.total_col) + "\n")
fp.write("Total Errors: " + str(self.total_errors) + "\n\n")
with open(self.filename, 'r') as fd:
for line in fd:
fp.write(line)
os.remove(self.filename)
os.rename(temp_file, self.filename)
def write_error(self, data):
"""Writes error message for files not processed fully"""
with open(self.filename, 'r+') as fp:
fp.seek(0,2)
fp.write("Analysis of " + os.path.split(data.filename)[1] + '\n')
fp.write("ERROR: Unable to read file, no readable data detected.\n\n")
def main(*args, **kwargs):
"""
Create Data and Report objects, providing necessary information for them
to run analysis and create desired outputs (i.e. HTML report or writing to exported file).
Keyword Arguments:
args -- Arguments provided to the program at runtime.
exporter -- Exporter object if applicable
"""
exporter = kwargs.pop('exporter', None)
window = kwargs.pop('window', None)
filename = args[0]
print("[Step 1/7] Processing file: ",filename)
print("[Step 2/7] Reading data")
if window is not None:
window.step_progress()
window.setstatus("Processing " + filename + "...")
if len(args) > 1:
temp = Template(args[1])
data = Data(filename, temp)
else:
data = Data(filename)
if not data.raw_data:
print("ERROR: Unable to read file: " + filename)
window.setstatus("ERROR: Unable to read file: " + filename)
if exporter is not None:
exporter.write_error(data)
return None
data.remove_invalid()
data.create_columns()
data.clean()
print("[Step 3/7] Running pre-analysis")
if window is not None:
window.step_progress()
data.pre_analysis()
print("[Step 4/7] Finding Errors")
if window is not None:
window.step_progress()
data.find_errors()
print("[Step 5/7] Running Analysis")
if window is not None:
window.step_progress()
window.setstatus("Running Analysis on " + filename + "...")
data.analysis()
if exporter is None:
print("[Step 6/7] Generating report")
report = Report(data)
str_report = report.html_report()
html = report.gen_html(str_report)
# returns string of html, also generates html report for debugging purposes
print("[Step 7/7] Report Successfully Generated")
print("Completed analysis for: ",filename)
if window is not None:
window.step_progress()
webbrowser.open("file://"+html,new=2)
else:
print("[Step 6/7] Generating report")
exporter.write_stats(data)
print("[Step 7/7] Report Successfully Generated")
if window is not None:
window.step_progress()
print("Completed analysis for: ", filename)
if window is not None:
window.setstatus("Completed Analysis for " + filename)
def get_file_dir(location):
"""Returns the directory of the file with the file name
Keyword arguments:
location -- A file path.
"""
return location.rpartition('\\')
def process_files(files, templates, exportfile='', window=None):
"""Process files and templates and runs the program over them. Converts excel files
and applies template to each file
Keyword arguments:
files -- files to be processed
templates -- files to use as templates in processing
exportfile -- file to export analysis to if applicable
"""
filenames = []
excel = []
for file in files:
name_ext = os.path.splitext(file)
# TODO handle empty sheets
if name_ext[1] == '.xls' or name_ext[1] == '.xlsx':
print("[Step 0/7] Converting to csv file")
wb = xlrd.open_workbook(file)
sheet_names = wb.sheet_names()
if len(sheet_names) == 1:
sh = wb.sheet_by_name(sheet_names[0])
new_name = os.path.splitext(file)[0] + ".csv"
with open(new_name, 'w', newline='') as fp:
wr = csv.writer(fp)
for rownum in range(sh.nrows):
wr.writerow(sh.row_values(rownum))
filenames.append(new_name)
excel.append(new_name)
else:
for sheet in sheet_names:
sh = wb.sheet_by_name(sheet)
new_name = os.path.join(os.path.splitext(file)[0] + "_" + sheet + ".csv")
try:
with open(new_name, 'w', newline='') as fp:
wr = csv.writer(fp)
for rownum in range(sh.nrows):
wr.writerow(sh.row_values(rownum))
except PermissionError:
# If created csv file already exists and is open
window.setstatus("ERROR: Permission Denied, ensure " + new_name + " is not open in another program")
return None
filenames.append(new_name)
excel.append(new_name)
elif name_ext[1] == '.csv':
filenames.append(file)
else:
print("ERROR: Unsupported file type: " + file)
if window is not None:
window.setstatus("WARNING: Unsupported file type " + file)
if exportfile != '':
export = Exporter(exportfile)
else:
export = None
if window is not None:
window.setmaxprogress(len(filenames) * 5.0 + 0.01)
if templates != None or templates:
if len(templates) == 1:
for name in filenames:
main(name, templates[0], exporter=export, window=window)
else:
num_templates = len(templates)
print(num_templates)
num_files = len(filenames)
if num_templates == num_files:
for i in range(0, num_files):
main(filenames[i], templates[i], exporter=export, window=window)
else:
# TODO keep functionality when excel files have multiple sheets
print("Error, different number of files and templates")
else:
for name in filenames:
main(name, exporter=export, window=window)
if export != None:
export.write_summary()
if excel:
for file in excel:
os.remove(file)
if __name__ == '__main__':
"""If the program is run with application.py as the argument to the command line
execution begins here. This will process all the command line arguments before
proceeding.
"""
files = []
templates = []
if len(sys.argv) > 1:
terminal = True
pathname = os.path.dirname(sys.argv[0])
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,\
description=textwrap.dedent('''\
Processes Csv files.
----------------------------------
Can process one or more csv files. Can specify template to describe
data further. Templates can be used to describe one or more csv files.
If using multiple templates for multiple files list templates in the
same order as the files they correspond to.
'''))
parser.add_argument('filenames', nargs='+',\
help='one or more filenames for the processor to analyse')
parser.add_argument('-t', nargs='+', metavar='template', help='a template for the given files')
args = parser.parse_args()
process_files(args.filenames, args.t)
else:
DisplayWindow()
|
Osu to code.py
|
import sys
import subprocess
import pygame
import time
import os
import shutil
import threading
import random
global codename
global name
global songsfolder
global songsfolder
global name
osufile = input("Osu file location? ")
osu = open(osufile, encoding="utf8")
osufolder = osufile.split("\\")[0:-1]
osufolder = "\\".join(osufolder)
name = input("What should it be called? ")
songsfolder = "\\".join(osufolder.split("\\")[0:-1]) + "\\"
try:
os.mkdir(name)
except FileExistsError:
shutil.rmtree(name)
os.mkdir(name)
print("Worked")
codename = name + "/" + name + ".py"
code = open(name + "/" + name + ".py", 'w+')
# afile = input("Audio file location? ")
audioout = name + "/" + "audio.mp3"
# subprocess.run(["ffmpeg", "-i", afile, audioout])
shutil.copy("lights.py", name + "/" + "lights.py")
shutil.copy("phue.py", name + "/" + "phue.py")
osu.readline()
osu.readline()
osu.readline()
afile = osu.readline().rstrip("\n").split(":")[1][1:]
afile = osufolder + "\\" + afile
shutil.copy(afile, name + "/" + "audio.mp3")
HitObjectFound = False
while not HitObjectFound:
line = osu.readline().rstrip("\n")
if line == '[HitObjects]':
HitObjectFound = True
code.write("import time\n")
code.write("import lights\n")
code.write("\n")
EofFound = False
comboStarters = [5, 6, 12]
prevtime = 0
while not EofFound:
hitobj = osu.readline().rstrip("\n").split(",")
try:
timee = int(hitobj[2])
combo = int(hitobj[3])
if combo in comboStarters:
if prevtime == 0:
code.write("print('3')\n")
code.write("time.sleep(" + str(2) + ")\n")
code.write("print('2')\n")
code.write("time.sleep(" + str(2) + ")\n")
code.write("print('1')\n")
code.write("time.sleep(" + str(2) + ")\n")
code.write("print('Go')\n")
code.write("time.sleep(" + str(((timee - prevtime)/1000) + .16) + ")\n")
code.write("lights.next()\n")
prevtime = timee
else:
if timee >= 50:
code.write("time.sleep(" + str(((timee - prevtime)/1000)-.0572) + ")\n")
code.write("lights.next()\n")
code.write("print('Bang')\n")
prevtime = timee
else:
code.write("time.sleep(" + str(((timee - prevtime)/1000)) + ")\n")
code.write("lights.next()\n")
code.write("print('Bang')\n")
prevtime = timee
except IndexError:
EofFound = True
osu.close()
code.close()
def thread1():
global codename
subprocess.run(["python", codename])
def thread2():
global name
time.sleep(5.89)
subprocess.call(["gst-play-1.0", name + '/audio.mp3'], shell=True)
t1 = threading.Thread(target=thread1)
t2 = threading.Thread(target=thread2)
def thread3():
listofsongs = os.listdir(songsfolder)
global codename
global name
global t1
global t2
actualist = []
for x in listofsongs:
try:
int(x.split(" ")[0])
actualist.append(x)
except BaseException:
pass
while True:
time.sleep(15)
next_target = random.choice(actualist)
currfolder = songsfolder + next_target + "\\"
newosu = random.choice([x for x in os.listdir(currfolder) if x[-1] == 'u'])
name = newosu[0:3] + currfolder[-4:-2]
nwfile = currfolder + newosu
makecode(osufile=nwfile, names=name)
t1.join()
t2.join()
t1 = threading.Thread(target=thread1)
t2 = threading.Thread(target=thread2)
t1.start()
t2.start()
t1.start()
t2.start()
|
client.py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009- Spyder Project Contributors
#
# Distributed under the terms of the MIT License
# (see spyder/__init__.py for details)
# -----------------------------------------------------------------------------
"""
Client widget for the IPython Console.
This is the widget used on all its tabs.
"""
# Standard library imports
from __future__ import absolute_import # Fix for Issue 1356
import codecs
import os
import os.path as osp
from string import Template
from threading import Thread
import time
# Third party imports (qtpy)
from qtpy.QtCore import QUrl, QTimer, Signal, Slot
from qtpy.QtGui import QKeySequence
from qtpy.QtWidgets import (QHBoxLayout, QLabel, QMenu, QMessageBox,
QToolButton, QVBoxLayout, QWidget)
# Local imports
from spyder.config.base import (_, get_image_path, get_module_source_path,
running_under_pytest)
from spyder.config.gui import get_font, get_shortcut
from spyder.utils import icon_manager as ima
from spyder.utils import sourcecode
from spyder.utils.encoding import get_coding
from spyder.utils.environ import RemoteEnvDialog
from spyder.utils.programs import get_temp_dir
from spyder.utils.qthelpers import (add_actions, create_action,
create_toolbutton, DialogManager,
MENU_SEPARATOR)
from spyder.py3compat import to_text_string
from spyder.widgets.browser import WebView
from spyder.plugins.ipythonconsole.widgets import ShellWidget
from spyder.widgets.mixins import SaveHistoryMixin
from spyder.plugins.variableexplorer.widgets.collectionseditor import (
CollectionsEditor)
#-----------------------------------------------------------------------------
# Templates
#-----------------------------------------------------------------------------
# Using the same css file from the Help plugin for now. Maybe
# later it'll be a good idea to create a new one.
PLUGINS_PATH = get_module_source_path('spyder', 'plugins')
CSS_PATH = osp.join(PLUGINS_PATH, 'help', 'utils', 'static', 'css')
TEMPLATES_PATH = osp.join(PLUGINS_PATH, 'ipythonconsole', 'assets', 'templates')
BLANK = open(osp.join(TEMPLATES_PATH, 'blank.html')).read()
LOADING = open(osp.join(TEMPLATES_PATH, 'loading.html')).read()
KERNEL_ERROR = open(osp.join(TEMPLATES_PATH, 'kernel_error.html')).read()
try:
time.monotonic # time.monotonic new in 3.3
except AttributeError:
time.monotonic = time.time
#-----------------------------------------------------------------------------
# Auxiliary functions
#-----------------------------------------------------------------------------
def background(f):
"""
Call a function in a simple thread, to prevent blocking
Taken from the Jupyter Qtconsole project
"""
t = Thread(target=f)
t.start()
return t
#-----------------------------------------------------------------------------
# Client widget
#-----------------------------------------------------------------------------
class ClientWidget(QWidget, SaveHistoryMixin):
"""
Client widget for the IPython Console
This is a widget composed of a shell widget and a WebView info widget
to print different messages there.
"""
SEPARATOR = '{0}## ---({1})---'.format(os.linesep*2, time.ctime())
INITHISTORY = ['# -*- coding: utf-8 -*-',
'# *** Spyder Python Console History Log ***',]
append_to_history = Signal(str, str)
def __init__(self, plugin, id_,
history_filename, config_options,
additional_options, interpreter_versions,
connection_file=None, hostname=None,
menu_actions=None, slave=False,
external_kernel=False, given_name=None,
options_button=None,
show_elapsed_time=False,
reset_warning=True,
ask_before_restart=True):
super(ClientWidget, self).__init__(plugin)
SaveHistoryMixin.__init__(self, history_filename)
# --- Init attrs
self.id_ = id_
self.connection_file = connection_file
self.hostname = hostname
self.menu_actions = menu_actions
self.slave = slave
self.external_kernel = external_kernel
self.given_name = given_name
self.show_elapsed_time = show_elapsed_time
self.reset_warning = reset_warning
self.ask_before_restart = ask_before_restart
# --- Other attrs
self.options_button = options_button
self.stop_button = None
self.reset_button = None
self.stop_icon = ima.icon('stop')
self.history = []
self.allow_rename = True
self.stderr_dir = None
self.is_error_shown = False
# --- Widgets
self.shellwidget = ShellWidget(config=config_options,
ipyclient=self,
additional_options=additional_options,
interpreter_versions=interpreter_versions,
external_kernel=external_kernel,
local_kernel=True)
self.infowidget = WebView(self)
self.set_infowidget_font()
self.loading_page = self._create_loading_page()
self._show_loading_page()
# Elapsed time
self.time_label = None
self.t0 = time.monotonic()
self.timer = QTimer(self)
self.show_time_action = create_action(self, _("Show elapsed time"),
toggled=self.set_elapsed_time_visible)
# --- Layout
vlayout = QVBoxLayout()
toolbar_buttons = self.get_toolbar_buttons()
hlayout = QHBoxLayout()
hlayout.addWidget(self.create_time_label())
hlayout.addStretch(0)
for button in toolbar_buttons:
hlayout.addWidget(button)
vlayout.addLayout(hlayout)
vlayout.setContentsMargins(0, 0, 0, 0)
vlayout.addWidget(self.shellwidget)
vlayout.addWidget(self.infowidget)
self.setLayout(vlayout)
# --- Exit function
self.exit_callback = lambda: plugin.close_client(client=self)
# --- Dialog manager
self.dialog_manager = DialogManager()
# Show timer
self.update_time_label_visibility()
#------ Public API --------------------------------------------------------
@property
def kernel_id(self):
"""Get kernel id"""
if self.connection_file is not None:
json_file = osp.basename(self.connection_file)
return json_file.split('.json')[0]
@property
def stderr_file(self):
"""Filename to save kernel stderr output."""
stderr_file = None
if self.connection_file is not None:
stderr_file = self.kernel_id + '.stderr'
if self.stderr_dir is not None:
stderr_file = osp.join(self.stderr_dir, stderr_file)
else:
try:
stderr_file = osp.join(get_temp_dir(), stderr_file)
except (IOError, OSError):
stderr_file = None
return stderr_file
@property
def stderr_handle(self):
"""Get handle to stderr_file."""
if self.stderr_file is not None:
# Needed to prevent any error that could appear.
# See issue 6267
try:
handle = codecs.open(self.stderr_file, 'w', encoding='utf-8')
except Exception:
handle = None
else:
handle = None
return handle
def remove_stderr_file(self):
"""Remove stderr_file associated with the client."""
try:
# Defer closing the stderr_handle until the client
# is closed because jupyter_client needs it open
# while it tries to restart the kernel
self.stderr_handle.close()
os.remove(self.stderr_file)
except Exception:
pass
def configure_shellwidget(self, give_focus=True):
"""Configure shellwidget after kernel is started"""
if give_focus:
self.get_control().setFocus()
# Set exit callback
self.shellwidget.set_exit_callback()
# To save history
self.shellwidget.executing.connect(self.add_to_history)
# For Mayavi to run correctly
self.shellwidget.executing.connect(
self.shellwidget.set_backend_for_mayavi)
# To update history after execution
self.shellwidget.executed.connect(self.update_history)
# To update the Variable Explorer after execution
self.shellwidget.executed.connect(
self.shellwidget.refresh_namespacebrowser)
# To enable the stop button when executing a process
self.shellwidget.executing.connect(self.enable_stop_button)
# To disable the stop button after execution stopped
self.shellwidget.executed.connect(self.disable_stop_button)
# To show kernel restarted/died messages
self.shellwidget.sig_kernel_restarted.connect(
self.kernel_restarted_message)
# To correctly change Matplotlib backend interactively
self.shellwidget.executing.connect(
self.shellwidget.change_mpl_backend)
# To show env and sys.path contents
self.shellwidget.sig_show_syspath.connect(self.show_syspath)
self.shellwidget.sig_show_env.connect(self.show_env)
# To sync with working directory toolbar
self.shellwidget.executed.connect(self.shellwidget.get_cwd)
# To apply style
self.set_color_scheme(self.shellwidget.syntax_style, reset=False)
# To hide the loading page
self.shellwidget.sig_prompt_ready.connect(self._hide_loading_page)
# Show possible errors when setting Matplotlib backend
self.shellwidget.sig_prompt_ready.connect(
self._show_mpl_backend_errors)
def enable_stop_button(self):
self.stop_button.setEnabled(True)
def disable_stop_button(self):
# This avoids disabling automatically the button when
# re-running files on dedicated consoles.
# See issue #5958
if not self.shellwidget._executing:
self.stop_button.setDisabled(True)
@Slot()
def stop_button_click_handler(self):
"""Method to handle what to do when the stop button is pressed"""
self.stop_button.setDisabled(True)
# Interrupt computations or stop debugging
if not self.shellwidget._reading:
self.interrupt_kernel()
else:
self.shellwidget.write_to_stdin('exit')
def show_kernel_error(self, error):
"""Show kernel initialization errors in infowidget."""
# Replace end of line chars with <br>
eol = sourcecode.get_eol_chars(error)
if eol:
error = error.replace(eol, '<br>')
# Don't break lines in hyphens
# From https://stackoverflow.com/q/7691569/438386
error = error.replace('-', '‑')
# Create error page
message = _("An error ocurred while starting the kernel")
kernel_error_template = Template(KERNEL_ERROR)
page = kernel_error_template.substitute(css_path=CSS_PATH,
message=message,
error=error)
# Show error
self.infowidget.setHtml(page, QUrl.fromLocalFile(CSS_PATH))
self.shellwidget.hide()
self.infowidget.show()
# Tell the client we're in error mode
self.is_error_shown = True
def get_name(self):
"""Return client name"""
if self.given_name is None:
# Name according to host
if self.hostname is None:
name = _("Console")
else:
name = self.hostname
# Adding id to name
client_id = self.id_['int_id'] + u'/' + self.id_['str_id']
name = name + u' ' + client_id
elif self.given_name in ["Pylab", "SymPy", "Cython"]:
client_id = self.id_['int_id'] + u'/' + self.id_['str_id']
name = self.given_name + u' ' + client_id
else:
name = self.given_name + u'/' + self.id_['str_id']
return name
def get_control(self):
"""Return the text widget (or similar) to give focus to"""
# page_control is the widget used for paging
page_control = self.shellwidget._page_control
if page_control and page_control.isVisible():
return page_control
else:
return self.shellwidget._control
def get_kernel(self):
"""Get kernel associated with this client"""
return self.shellwidget.kernel_manager
def get_options_menu(self):
"""Return options menu"""
reset_action = create_action(self, _("Remove all variables"),
icon=ima.icon('editdelete'),
triggered=self.reset_namespace)
env_action = create_action(
self,
_("Show environment variables"),
icon=ima.icon('environ'),
triggered=self.shellwidget.get_env
)
syspath_action = create_action(
self,
_("Show sys.path contents"),
icon=ima.icon('syspath'),
triggered=self.shellwidget.get_syspath
)
self.show_time_action.setChecked(self.show_elapsed_time)
additional_actions = [reset_action,
MENU_SEPARATOR,
env_action,
syspath_action,
self.show_time_action]
if self.menu_actions is not None:
return self.menu_actions + additional_actions
else:
return additional_actions
def get_toolbar_buttons(self):
"""Return toolbar buttons list."""
buttons = []
# Code to add the stop button
if self.stop_button is None:
self.stop_button = create_toolbutton(
self,
text=_("Stop"),
icon=self.stop_icon,
tip=_("Stop the current command"))
self.disable_stop_button()
# set click event handler
self.stop_button.clicked.connect(self.stop_button_click_handler)
if self.stop_button is not None:
buttons.append(self.stop_button)
# Reset namespace button
if self.reset_button is None:
self.reset_button = create_toolbutton(
self,
text=_("Remove"),
icon=ima.icon('editdelete'),
tip=_("Remove all variables"),
triggered=self.reset_namespace)
if self.reset_button is not None:
buttons.append(self.reset_button)
if self.options_button is None:
options = self.get_options_menu()
if options:
self.options_button = create_toolbutton(self,
text=_('Options'), icon=ima.icon('tooloptions'))
self.options_button.setPopupMode(QToolButton.InstantPopup)
menu = QMenu(self)
add_actions(menu, options)
self.options_button.setMenu(menu)
if self.options_button is not None:
buttons.append(self.options_button)
return buttons
def add_actions_to_context_menu(self, menu):
"""Add actions to IPython widget context menu"""
inspect_action = create_action(self, _("Inspect current object"),
QKeySequence(get_shortcut('console',
'inspect current object')),
icon=ima.icon('MessageBoxInformation'),
triggered=self.inspect_object)
clear_line_action = create_action(self, _("Clear line or block"),
QKeySequence(get_shortcut(
'console',
'clear line')),
triggered=self.clear_line)
reset_namespace_action = create_action(self, _("Remove all variables"),
QKeySequence(get_shortcut(
'ipython_console',
'reset namespace')),
icon=ima.icon('editdelete'),
triggered=self.reset_namespace)
clear_console_action = create_action(self, _("Clear console"),
QKeySequence(get_shortcut('console',
'clear shell')),
triggered=self.clear_console)
quit_action = create_action(self, _("&Quit"), icon=ima.icon('exit'),
triggered=self.exit_callback)
add_actions(menu, (None, inspect_action, clear_line_action,
clear_console_action, reset_namespace_action,
None, quit_action))
return menu
def set_font(self, font):
"""Set IPython widget's font"""
self.shellwidget._control.setFont(font)
self.shellwidget.font = font
def set_infowidget_font(self):
"""Set font for infowidget"""
font = get_font(option='rich_font')
self.infowidget.set_font(font)
def set_color_scheme(self, color_scheme, reset=True):
"""Set IPython color scheme."""
# Needed to handle not initialized kernel_client
# See issue 6996
try:
self.shellwidget.set_color_scheme(color_scheme, reset)
except AttributeError:
pass
def shutdown(self):
"""Shutdown kernel"""
if self.get_kernel() is not None and not self.slave:
self.shellwidget.kernel_manager.shutdown_kernel()
if self.shellwidget.kernel_client is not None:
background(self.shellwidget.kernel_client.stop_channels)
def interrupt_kernel(self):
"""Interrupt the associanted Spyder kernel if it's running"""
# Needed to prevent a crash when a kernel is not running.
# See issue 6299
try:
self.shellwidget.request_interrupt_kernel()
except RuntimeError:
pass
@Slot()
def restart_kernel(self):
"""
Restart the associated kernel.
Took this code from the qtconsole project
Licensed under the BSD license
"""
sw = self.shellwidget
if not running_under_pytest() and self.ask_before_restart:
message = _('Are you sure you want to restart the kernel?')
buttons = QMessageBox.Yes | QMessageBox.No
result = QMessageBox.question(self, _('Restart kernel?'),
message, buttons)
else:
result = None
if (result == QMessageBox.Yes or
running_under_pytest() or
not self.ask_before_restart):
if sw.kernel_manager:
if self.infowidget.isVisible():
self.infowidget.hide()
sw.show()
try:
sw.kernel_manager.restart_kernel(
stderr=self.stderr_handle)
except RuntimeError as e:
sw._append_plain_text(
_('Error restarting kernel: %s\n') % e,
before_prompt=True
)
else:
# For issue 6235. IPython was changing the setting of
# %colors on windows by assuming it was using a dark
# background. This corrects it based on the scheme.
self.set_color_scheme(sw.syntax_style)
sw._append_html(_("<br>Restarting kernel...\n<hr><br>"),
before_prompt=False)
else:
sw._append_plain_text(
_('Cannot restart a kernel not started by Spyder\n'),
before_prompt=True
)
@Slot(str)
def kernel_restarted_message(self, msg):
"""Show kernel restarted/died messages."""
if not self.is_error_shown:
# If there are kernel creation errors, jupyter_client will
# try to restart the kernel and qtconsole prints a
# message about it.
# So we read the kernel's stderr_file and display its
# contents in the client instead of the usual message shown
# by qtconsole.
try:
stderr = self._read_stderr()
except Exception:
stderr = None
if stderr:
self.show_kernel_error('<tt>%s</tt>' % stderr)
else:
self.shellwidget._append_html("<br>%s<hr><br>" % msg,
before_prompt=False)
@Slot()
def inspect_object(self):
"""Show how to inspect an object with our Help plugin"""
self.shellwidget._control.inspect_current_object()
@Slot()
def clear_line(self):
"""Clear a console line"""
self.shellwidget._keyboard_quit()
@Slot()
def clear_console(self):
"""Clear the whole console"""
self.shellwidget.clear_console()
@Slot()
def reset_namespace(self):
"""Resets the namespace by removing all names defined by the user"""
self.shellwidget.reset_namespace(warning=self.reset_warning,
message=True)
def update_history(self):
self.history = self.shellwidget._history
@Slot(object)
def show_syspath(self, syspath):
"""Show sys.path contents."""
if syspath is not None:
editor = CollectionsEditor()
editor.setup(syspath, title="sys.path contents", readonly=True,
width=600, icon=ima.icon('syspath'))
self.dialog_manager.show(editor)
else:
return
@Slot(object)
def show_env(self, env):
"""Show environment variables."""
self.dialog_manager.show(RemoteEnvDialog(env))
def create_time_label(self):
"""Create elapsed time label widget (if necessary) and return it"""
if self.time_label is None:
self.time_label = QLabel()
return self.time_label
def show_time(self, end=False):
"""Text to show in time_label."""
if self.time_label is None:
return
elapsed_time = time.monotonic() - self.t0
# System time changed to past date, so reset start.
if elapsed_time < 0:
self.t0 = time.monotonic()
elapsed_time = 0
if elapsed_time > 24 * 3600: # More than a day...!
fmt = "%d %H:%M:%S"
else:
fmt = "%H:%M:%S"
if end:
color = "#AAAAAA"
else:
color = "#AA6655"
text = "<span style=\'color: %s\'><b>%s" \
"</b></span>" % (color,
time.strftime(fmt, time.gmtime(elapsed_time)))
self.time_label.setText(text)
def update_time_label_visibility(self):
"""Update elapsed time visibility."""
self.time_label.setVisible(self.show_elapsed_time)
@Slot(bool)
def set_elapsed_time_visible(self, state):
"""Slot to show/hide elapsed time label."""
self.show_elapsed_time = state
if self.time_label is not None:
self.time_label.setVisible(state)
#------ Private API -------------------------------------------------------
def _create_loading_page(self):
"""Create html page to show while the kernel is starting"""
loading_template = Template(LOADING)
loading_img = get_image_path('loading_sprites.png')
if os.name == 'nt':
loading_img = loading_img.replace('\\', '/')
message = _("Connecting to kernel...")
page = loading_template.substitute(css_path=CSS_PATH,
loading_img=loading_img,
message=message)
return page
def _show_loading_page(self):
"""Show animation while the kernel is loading."""
self.shellwidget.hide()
self.infowidget.show()
self.infowidget.setHtml(self.loading_page,
QUrl.fromLocalFile(CSS_PATH))
def _hide_loading_page(self):
"""Hide animation shown while the kernel is loading."""
self.infowidget.hide()
self.shellwidget.show()
self.infowidget.setHtml(BLANK)
self.shellwidget.sig_prompt_ready.disconnect(self._hide_loading_page)
def _read_stderr(self):
"""Read the stderr file of the kernel."""
f = open(self.stderr_file, 'rb')
try:
stderr_text = f.read()
# This is needed since the stderr file could be encoded
# in something different to utf-8.
# See issue 4191
encoding = get_coding(stderr_text)
stderr_text = to_text_string(stderr_text, encoding)
return stderr_text
finally:
f.close()
def _show_mpl_backend_errors(self):
"""
Show possible errors when setting the selected Matplotlib backend.
"""
if not self.external_kernel:
self.shellwidget.silent_execute(
"get_ipython().kernel._show_mpl_backend_errors()")
self.shellwidget.sig_prompt_ready.disconnect(
self._show_mpl_backend_errors)
|
smsSender.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import random
import json
import os
import re
import time
import traceback
from messaging.sms import SmsDeliver
from messaging.sms import SmsSubmit
from Queue import Queue
import flask
app = flask.Flask(__name__)
if not app.debug:
import logging
handler = logging.FileHandler('sms.log')
handler.setLevel(logging.WARNING)
app.logger.addHandler(handler)
FROM_KEY = "from"
TO_KEY = "to"
MSG_KEY = "msg"
STATUS_KEY = "status"
WHEN_KEY = "when"
SMS_COUNT = "smsCount"
Q_SMSINBOX = "smsInbox"
DAILY_MAX = 2
STORAGE_SELECT = 'AT+CPMS="SM","SM","SM"'
def __textResponse(data):
return (data, 200, {'Content-Type': 'text/plain; charset=utf-8'})
@app.route("/sms/outbox", methods=["POST"])
@app.route("/sms/send", methods=["POST"])
def send():
to = flask.request.args.get(TO_KEY, '')
msg = flask.request.args.get(MSG_KEY, '')
# also allow "text" param
msg = flask.request.args.get("text", '') if msg == '' else msg
if to == '' or msg == '':
return __textResponse('"to" or "msg" argument missing or invalid!')
cfg = flask.current_app.config
count = cfg.get(SMS_COUNT, 0)
if count >= DAILY_MAX:
return __textResponse("ERROR: max daily sms reached")
cfg[SMS_COUNT] = (count + 1)
serialOut = cfg.get('serialOut', None)
if serialOut != None:
serialOut.put(json.dumps({TO_KEY: to, MSG_KEY: msg}))
return __textResponse("sent")
else:
return __textResponse('no serial port queue')
@app.route("/sms/reset")
def reset():
cfg = flask.current_app.config
cfg[SMS_COUNT] = 0
return __textResponse("reset done")
@app.route("/sms/in")
@app.route("/sms/inbox")
@app.route("/sms/received")
def received():
pretty = flask.request.args.get('pretty', '') != ''
try:
serialIn = flask.current_app.config.get(Q_SMSINBOX, None)
data = None
nextData = serialIn.get(False) if not serialIn.empty() else None
# get the newest data that's in the queue
while nextData != None:
data = nextData
nextData = serialIn.get(False) if not serialIn.empty() else None
if data != None:
flask.current_app.config['lastSms'] = data
else:
data = flask.current_app.config.get('lastSms', '{}')
if pretty:
return __textResponse(json.dumps(json.loads(data), indent=2))
else:
return __textResponse(data)
except:
reason = traceback.format_exc()
print(u"error occured:\n%s"%reason)
return __textResponse("error")
def __openSerial(SERIALPORT):
import serial
ser = serial.Serial(port=SERIALPORT, baudrate=9600, timeout=0.5, writeTimeout=5)
time.sleep(1)
return ser
def __constructMsg(to, msg):
pdu = SmsSubmit(to, msg).to_pdu()[0]
result = 'ATZ\r' + \
'AT+CMGF=0\r' + \
STORAGE_SELECT + '\r' + \
('AT+CMGS=%s\r'%pdu.length) + \
pdu.pdu + chr(26)
return result
def __readNextAck(ser, altAck=None):
output = ''
done = False
print(u"waiting for ack")
started = time.time()
while not done:
data = ser.readline()
if data:
output += data
lines = re.split("\r[\n]", output)
count = sum(l == 'OK' or l == 'ERROR' or l.startswith('+CMS ERROR') for l in lines)
if altAck != None:
count += altAck(lines)
done = count > 0
else:
time.sleep(0.1)
if time.time() - started > 10:
raise Exception('could not read ack in time')
print(u"acked by: '%s'"%output)
def __serSend(ser, data):
print(u"writing: %s"%data)
ser.write(bytearray(data, 'ASCII'))
def __readSms(ser):
ser.flushInput()
msgs = ['ATZ',
'AT+CMGF=0',
STORAGE_SELECT, # select sim-card storage (required on some modems)
'AT+CMGL=4'] # 4 is "ALL" (get read & unread)
for idx, msg in enumerate(msgs):
__serSend(ser, "%s\r"%msg)
time.sleep(0.1)
if idx==2:
ser.flushInput()
time.sleep(2)
output = ""
done = False
print(u"reading sms from device")
started = time.time()
while not done:
output += ser.readline()
print(u"read: %s"%output)
lines = re.split("\r[\n]", output)
done = lines.count("OK") + lines.count("ERROR") >= 1
if time.time() - started > 20:
raise Exception('could not read sms in time')
ser.flushInput()
lines = re.split("\r[\n]", output)
msgs = []
nextIsPdu = False
smsIdx = 0
for line in lines:
if nextIsPdu:
# print(u"parse pdu: %s"%line)
sms = SmsDeliver(line)
sms.smsIdx = smsIdx
msgs.append(sms)
if line.startswith("+CMGL"):
nextIsPdu = True
match = re.search("\+CMGL: (\d+),", line)
smsIdx = match.group(1)
else:
nextIsPdu = False
smsIdx = 0
msgs = sorted(msgs, key=lambda x:x.date)
for msg in msgs:
print(u"received from %s msg:\n%s\n\n"%(unicode(msg.number), unicode(msg.text)))
return msgs
def __deleteSms(ser, smsList):
__serSend(ser, "ATZ\r")
time.sleep(1)
__serSend(ser, STORAGE_SELECT + '\r')
__readNextAck(ser)
for sms in smsList:
__serSend(ser, "AT+CMGD=%s\r"%sms.smsIdx)
__readNextAck(ser)
ser.flushInput()
def __trimSmsInbox(ser, msgs, maxSize):
deleteMe = []
if len(msgs) > maxSize:
for i in xrange(len(msgs) - maxSize):
deleteMe.append(msgs[i])
if len(deleteMe) > 0:
print(u"deleting %s sms"%len(deleteMe))
__deleteSms(ser, deleteMe)
else:
print(u"no sms to delete, only %s sms"%len(msgs))
def __toDict(msg):
return {'idx':msg.smsIdx, FROM_KEY: msg.number, MSG_KEY: msg.text,
WHEN_KEY: str(msg.date)}
def __serialLoop(smsInbox, serialOut, idleIn):
ser = None
serialLastClosed = time.time()
serialLastReopened = time.time() - 120
while True:
time.sleep(0.2)
while ser == None or not ser.isOpen():
SERIALPORT = '/dev/ttyUsbModem'
print("opening serial connection %s"%(ser == None))
if time.time() - serialLastReopened < 60:
print(u"reopening serial port for the second time in 1 minute, resetting modem")
import resetModem
resetModem.resetModem()
print(u"modem reset done, giving time for modem reboot")
time.sleep(30)
serialLastReopened = time.time()
try:
ser = None
ser = __openSerial(SERIALPORT)
ser.flushInput()
except:
print("error writing, try reopening")
time.sleep(0.5)
try:
if ser != None:
ser.close()
ser = None
except:
pass
sendMe = None
try:
sendMe = serialOut.get(False)
except:
pass
if sendMe != None:
try:
msg = json.loads(sendMe)
encodedMsg = __constructMsg(msg.get(TO_KEY), msg.get(MSG_KEY))
print(u"sending sms to %s"%msg.get(TO_KEY))
ser.flushInput()
parts = encodedMsg.split("\r")
for idx, part in enumerate(parts):
__serSend(ser, part+"\r")
#if idx != len(parts):
__readNextAck(ser, lambda x: len([a for a in x if a.startswith(">")]))
ser.flush()
except:
print(u"error while writing: '%s' error: %s"%(sendMe, traceback.format_exc()))
serialIn.put(json.dumps({STATUS_KEY:'error writing to serial %s'%(sys.exc_info()[0])}))
ser = None
else:
# nothing to send
if time.time() - serialLastClosed > 5 * 60:
# more than 5 minutes since last serial re-open, force reopen serial
# this is done to give modem a chance to reset
serialLastClosed = time.time()
try:
ser.close()
ser = None
time.sleep(5)
except:
pass
else:
# check for incoming messages
try:
time.sleep(4)
msgs = []
msgs = __readSms(ser)
if len(msgs) > 0:
all = []
for msg in msgs:
msgDict = __toDict(msg)
all.append(msgDict)
idleIn.put(json.dumps(msgDict))
smsInbox.put(json.dumps(all))
# delte after successfully broadcasting
# TODO: it's not certain that they have been broadcasted, they are just in the queue
# __deleteSms(ser, msgs)
__trimSmsInbox(ser, msgs, 5)
except:
reason = traceback.format_exc()
print(u"error reading sms inbox:\n%s"%reason)
ser = None
def idleBroadcaster(idleMsgs, serialOut):
from MqHelper import MqHelper
mq = MqHelper('sms')
def callback(topic, msg):
print('mq callback')
serialOut.put(msg)
mq.subscribe('/sms/outbox', callback)
while True:
try:
mq.loop()
except:
print(u"error in mq loop")
time.sleep(2)
time.sleep(0.2)
msg = None
try:
msg = idleMsgs.get(False)
except:
pass
if msg != None:
mq.send('/sms/inbox', msg)
if __name__ == "__main__":
import threading
serialOut = Queue()
smsInbox = Queue()
serialIdleIn = Queue()
def serialWorker():
while True:
try:
__serialLoop(smsInbox, serialOut, serialIdleIn)
except:
reason = traceback.format_exc()
print(u"fatal exception in serialLoop reason:\n%s"%reason)
time.sleep(10)
t = threading.Thread(target=serialWorker)
t.daemon = True
t.start()
broadcaster = threading.Thread(target=idleBroadcaster, args=[serialIdleIn, serialOut])
broadcaster.daemon = True
broadcaster.start()
if len(sys.argv) > 1:
print(u"test mode, not starting http server")
time.sleep(2)
print(u"sending")
# send balance-check SMS (for austrian provider 'HOT')
serialOut.put(json.dumps({TO_KEY:"6700", MSG_KEY:"GUT"}))
for i in range(100):
try:
msg = serialIdleIn.get(False)
if msg != None:
print(u"%s"%msg)
except:
pass
time.sleep(2)
sys.exit(0)
app.config[SMS_COUNT] = 0
app.config['serialOut'] = serialOut
app.config[Q_SMSINBOX] = smsInbox
app.run(host="0.0.0.0", port=5353) # 5353 somehow looks like smsm
# example commandline how to send sms via mosquitto:
# mosquitto_pub -t '/sms/outbox' -m '{"to":"6700","msg":"GUT"}'
# example of how to send sms via curl
# curl --data "" "http://127.0.0.1:5353/sms/send?to=6700&msg=GUT"
|
rbssh.py
|
#!/usr/bin/env python
#
# rbssh.py -- A custom SSH client for use in Review Board.
#
# This is used as an ssh replacement that can be used across platforms with
# a custom .ssh directory. OpenSSH doesn't respect $HOME, instead reading
# /etc/passwd directly, which causes problems for us. Using rbssh, we can
# work around this.
#
#
# Copyright (c) 2010-2011 Beanbag, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import getpass
import logging
import os
import select
import socket
import sys
import tempfile
import time
from optparse import OptionParser
import paramiko
from reviewboard import get_version_string
from reviewboard.scmtools import sshutils
from reviewboard.scmtools.core import SCMTool
DEBUG = os.getenv('DEBUG_RBSSH')
options = None
class PlatformHandler(object):
def __init__(self, channel):
self.channel = channel
def shell(self):
raise NotImplemented
def transfer(self):
raise NotImplemented
def process_channel(self, channel):
if channel.closed:
return False
logging.debug('!! process_channel\n')
if channel.recv_ready():
data = channel.recv(4096)
if not data:
logging.debug('!! stdout empty\n')
return False
sys.stdout.write(data)
sys.stdout.flush()
if channel.recv_stderr_ready():
data = channel.recv_stderr(4096)
if not data:
logging.debug('!! stderr empty\n')
return False
sys.stderr.write(data)
sys.stderr.flush()
if channel.exit_status_ready():
logging.debug('!!! exit_status_ready\n')
return False
return True
def process_stdin(self, channel):
logging.debug('!! process_stdin\n')
try:
buf = os.read(sys.stdin.fileno(), 1)
except OSError:
buf = None
if not buf:
logging.debug('!! stdin empty\n')
return False
result = channel.send(buf)
return True
class PosixHandler(PlatformHandler):
def shell(self):
import termios
import tty
oldtty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
self.handle_communications()
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
def transfer(self):
import fcntl
fd = sys.stdin.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
self.handle_communications()
def handle_communications(self):
while True:
rl, wl, el = select.select([self.channel, sys.stdin], [], [])
if self.channel in rl:
if not self.process_channel(self.channel):
break
if sys.stdin in rl:
if not self.process_stdin(self.channel):
self.channel.shutdown_write()
break
class WindowsHandler(PlatformHandler):
def shell(self):
self.handle_communications()
def transfer(self):
self.handle_communications()
def handle_communications(self):
import threading
logging.debug('!! begin_windows_transfer\n')
self.channel.setblocking(0)
def writeall(channel):
while self.process_channel(channel):
pass
logging.debug('!! Shutting down reading\n')
channel.shutdown_read()
writer = threading.Thread(target=writeall, args=(self.channel,))
writer.start()
try:
while self.process_stdin(self.channel):
pass
except EOFError:
pass
logging.debug('!! Shutting down writing\n')
self.channel.shutdown_write()
def print_version(option, opt, value, parser):
parser.print_version()
sys.exit(0)
def parse_options(args):
global options
hostname = None
parser = OptionParser(usage='%prog [options] [user@]hostname [command]',
version='%prog ' + get_version_string())
parser.disable_interspersed_args()
parser.add_option('-l',
dest='username', metavar='USERNAME', default=None,
help='the user to log in as on the remote machine')
parser.add_option('-p', '--port',
type='int', dest='port', metavar='PORT', default=None,
help='the port to connect to')
parser.add_option('-q', '--quiet',
action='store_true', dest='quiet', default=False,
help='suppress any unnecessary output')
parser.add_option('-s',
dest='subsystem', metavar='SUBSYSTEM', default=None,
nargs=2,
help='the subsystem to use (ssh or sftp)')
parser.add_option('-V',
action='callback', callback=print_version,
help='display the version information and exit')
(options, args) = parser.parse_args(args)
if options.subsystem:
if len(options.subsystem) != 2:
parser.error('-s requires a hostname and a valid subsystem')
elif options.subsystem[1] not in ('sftp', 'ssh'):
parser.error('Invalid subsystem %s' % options.subsystem[1])
hostname, options.subsystem = options.subsystem
if len(args) == 0 and not hostname:
parser.print_help()
sys.exit(1)
if not hostname:
hostname = args[0]
args = args[1:]
return hostname, args
def main():
if DEBUG:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-18s %(levelname)-8s '
'%(message)s',
datefmt='%m-%d %H:%M',
filename='rbssh.log',
filemode='w')
logging.debug('%s' % sys.argv)
logging.debug('PID %s' % os.getpid())
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(logging.Formatter('%(message)s'))
ch.addFilter(logging.Filter('root'))
logging.getLogger('').addHandler(ch)
path, command = parse_options(sys.argv[1:])
if '://' not in path:
path = 'ssh://' + path
username, hostname = SCMTool.get_auth_from_uri(path, options.username)
if username is None:
username = getpass.getuser()
logging.debug('!!! %s, %s, %s' % (hostname, username, command))
client = sshutils.get_ssh_client()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
attempts = 0
password = None
success = False
while True:
try:
client.connect(hostname, username=username, password=password)
break
except paramiko.AuthenticationException, e:
if attempts == 3 or not sys.stdin.isatty():
logging.error('Too many authentication failures for %s' %
username)
sys.exit(1)
attempts += 1
password = getpass.getpass("%s@%s's password: " %
(username, hostname))
except paramiko.SSHException, e:
logging.error('Error connecting to server: %s' % e)
sys.exit(1)
except Exception, e:
logging.error('Unknown exception during connect: %s (%s)' %
(e, type(e)))
sys.exit(1)
transport = client.get_transport()
channel = transport.open_session()
if sys.platform in ('cygwin', 'win32'):
logging.debug('!!! Using WindowsHandler')
handler = WindowsHandler(channel)
else:
logging.debug('!!! Using PosixHandler')
handler = PosixHandler(channel)
if options.subsystem == 'sftp':
logging.debug('!!! Invoking sftp subsystem')
channel.invoke_subsystem('sftp')
handler.transfer()
elif command:
logging.debug('!!! Sending command %s' % command)
channel.exec_command(' '.join(command))
handler.transfer()
else:
logging.debug('!!! Opening shell')
channel.get_pty()
channel.invoke_shell()
handler.shell()
logging.debug('!!! Done')
status = channel.recv_exit_status()
client.close()
return status
if __name__ == '__main__':
main()
# ... with blackjack, and hookers.
|
sparmap.py
|
"""This module contains a simple parallel map implementation based on
the multiprocessing package. It allows the use of bounded functions
as processing functions, and maintains memory usage under control by
using bounded queues internally.
"""
from multiprocessing import Queue, Process
from collections import namedtuple
import sys
import traceback
TOMBSTONE = '7PE&YeDu5#ybgTf0rJgk9u!'
_MAX_QUEUE_SIZE = 100
_Signal = namedtuple('Signal', ['termination', 'exceptions'])
class Signal(_Signal):
"""Controls the signaling behavior of sparmap.
If termination is set to True, workers will receive sparmap.TOMBSTONE
when there is no more work to be done. Otherwise they will be simply
shut down.
If exceptions is set to True, sparmap will put place a tuple containing the
(input element, exception generated) in the results whenever some input causes
a worker to crash with an exception. Otherwise, such exceptions will simply
cause the worker to die and generate no other apparent behavior to clients.
"""
pass
SIGNAL_ALL = Signal(True, True)
SIGNAL_NONE = Signal(False, False)
def parmap(source, fun, workers, max_queue_size=_MAX_QUEUE_SIZE, signal=SIGNAL_NONE):
"""Runs a parallel map operation over a source sequence.
:param source: a sequence over which to run the parallel map.
:param fun: a function which will be applied once for each
element. The function takes a single parameter.
:param workers: number of parallel workers to use.
:param max_queue_size: the maximum size of (workers + 1) internal queues.
:return: an iterator which gets lazily populated with results as they become
available.
"""
return parflatmap(source, _mapper(fun), workers, max_queue_size, signal)
def parflatmap(source, fun, workers, max_queue_size=_MAX_QUEUE_SIZE, signal=SIGNAL_NONE):
"""Runs a flat parallel map over a source sequence. The main
difference of flatmap with respect to a 'regular' parallel map is
that in flatmap the function 'fun' is supplied an extra parameter
-- an emitter -- which 'fun' can call to output as many records as
it wants in response to a single record being input.
:param source: a sequence over which to run the parallel flat map.
:param fun: a function which will be applied once for each
element. The function takes two parameters - one for the
element, another for an emitter function.
:param workers: number of parallel workers to use.
:param max_queue_size: the maximum size of (workers + 1) internal
queues.
:return: an iterator which gets lazily populated with results as
they become available.
"""
input_queue = Queue(max_queue_size)
output_queue = Queue(max_queue_size)
# Workers (process data).
processes = [_started(
Process(target=_worker, args=(input_queue, output_queue, fun, signal))
) for i in range(0, workers)]
# Pusher (pushes data items into work queue).
pusher = _started(Process(target=_pusher, args=(source, input_queue, workers)))
return _result(output_queue, processes, pusher)
def _started(process):
process.daemon = 1
process.start()
return process
def _pusher(input, input_queue, n_workers):
try:
for task in input:
input_queue.put(task)
finally:
for i in range(0, n_workers):
input_queue.put(TOMBSTONE)
def _worker(input_queue, output_queue, fun, signal):
emit = lambda x: output_queue.put(x)
record = 'if you see this, it is a bug in sparmap'
try:
record = input_queue.get()
while record != TOMBSTONE:
fun(record, emit)
sys.stderr.flush()
record = input_queue.get()
except Exception:
ex_type, value = sys.exc_info()[:2]
if signal.exceptions:
output_queue.put((record, ex_type, value))
finally:
if signal.termination:
try:
# tries to dispatch tombstone...
fun(TOMBSTONE, emit)
except:
# ... and if it fails we don't care. Just
# log the thing.
traceback.print_exc()
# Bubbles up exceptions but reports death or
# __result__ will never terminate.
sys.stderr.flush()
output_queue.put(TOMBSTONE)
sys.stderr.flush()
def _mapper(fun):
return lambda record, emit: emit(fun(record))
def _result(output_queue, workers, pusher):
tombstones = 0
n_workers = len(workers)
while tombstones != n_workers:
result = output_queue.get()
if result == TOMBSTONE:
tombstones += 1
else:
yield result
# There are no more workers alive, no sense
# in having a pusher.
pusher.terminate()
pusher.join()
# Waits for children to die.
for worker in workers:
worker.join()
|
reducer.py
|
import os
import threading
from fedn.clients.reducer.control import ReducerControl
from fedn.clients.reducer.interfaces import ReducerInferenceInterface
from fedn.clients.reducer.restservice import ReducerRestService
from fedn.clients.reducer.state import ReducerStateToString
from fedn.common.security.certificatemanager import CertificateManager
from fedn.clients.reducer.statestore.mongoreducerstatestore import MongoReducerStateStore
class InvalidReducerConfiguration(Exception):
pass
class MissingReducerConfiguration(Exception):
pass
class Reducer:
def __init__(self, statestore):
""" """
self.statestore = statestore
config = self.statestore.get_reducer()
if not config:
print("REDUCER: Failed to retrive Reducer config, exiting.")
raise MissingReducerConfiguration()
self.name = config['name']
self.token = config['token']
try:
path = config['path']
except KeyError:
path = os.getcwd()
self.certificate_manager = CertificateManager(os.getcwd() + "/certs/")
self.control = ReducerControl(self.statestore)
self.inference = ReducerInferenceInterface()
rest_certificate = self.certificate_manager.get_or_create("reducer")
self.rest = ReducerRestService(config['name'], self.control, self.certificate_manager, certificate=rest_certificate)
def run(self):
threading.Thread(target=self.rest.run, daemon=True).start()
import time
from datetime import datetime
try:
old_state = self.control.state()
t1 = datetime.now()
while True:
time.sleep(1)
if old_state != self.control.state():
delta = datetime.now() - t1
print("Reducer in state {} for {} seconds. Entering {} state".format(ReducerStateToString(old_state),delta.seconds,ReducerStateToString(self.control.state())), flush=True)
t1 = datetime.now()
old_state = self.control.state()
self.control.monitor()
except (KeyboardInterrupt, SystemExit):
print("Exiting..", flush=True)
|
cbtogen.py
|
"""
cbtogen
~~~~~~~
Simple thread-based code which can convert a call-back style data generation
pattern to a generator style pattern.
Sample use case is to consume data from `xml.sax.parse`. This function takes
a callback object to which all the parsed XML data is sent. By using the
classes in this module, we can automatically spin up a separate thread which
calls the `parse` function and which writes the data to a queue. The consumer
code can consume this data from a Python generator without worrying about the
threading code.
"""
import threading, queue
class EarlyTerminate(Exception):
"""Raised to indicate that we do not require any further data and that,
if possible, the provider of data should cleanup and exit."""
pass
class CallbackToGenerator():
"""Gives a Context Manager which returns a generator which yields the data
sent to the callback.
Start with a data provider which sends data to a callback object. You
should write your own callback object which forwards on the data to the
:method:`send` method of this class. Then call :method:`set_handler` with
the data provider and your callback. Hence:
class HandlerInterface():
def one(self, data):
pass
def two(self, data):
pass
class OurHandler(HandlerInterface):
def __init__(self, delegate):
self._delegate = delegate
def one(self, data):
self._delegate.send("one", data)
def two(self, data):
self._delegate.send("two", data)
# Normal usage of the data provider:
handler = HandlerInterface()
provider(handler)
# The `handler` gets pushed the data as a callback
# Usage of this class to provider a generator
generator = cbtogen.CallbackToGenerator()
handler = OurHandler(generator)
generator.set_handler(provider, handler)
with generator:
for x in generator:
# x is of type Wrapper
print("Got {} / {}".format(x.name, x.data))
# Use of "with" ensures that if an exception is thrown, the thread
# is automatically closed.
"""
def __init__(self, queuesize=65536):
self._queue = queue.Queue(maxsize=queuesize)
self._terminate = False
def notify(self, data):
"""Notify of some data. Your callback handler should, after possible
processing, push data to this method. Can accept any data, but if
you notify of an :class:`Exception` then the exception will be raised
by the iterator; if you notify with the `StopIteration` type then the
iterator will stop (but the *strongly* preferred way to end iteration
is to let the callback thread end.)
Will raise an exception of type :class:`EarlyTerminate` to signal that
data generation should be stopped.
:param data: The data object to add to the internal queue.
"""
if self._terminate:
self._queue.put(StopIteration)
raise EarlyTerminate()
self._queue.put(data)
def send(self, name, data):
"""Standardised way to send data. The iterator will yield an instance
of :class:`Wrapper` with the `name`/`data` pair.
:param name: Name of the callback event which generated this data.
:param data: Tuple of data, or `None`.
"""
self.notify(Wrapper(name, data))
def __enter__(self):
def ourtask():
try:
self._func()
except Exception as ex:
self._queue.put(ex)
self._queue.put(StopIteration)
self._thread = threading.Thread(target=ourtask)
self._thread.start()
return iter(self)
def __exit__(self, type, value, traceback):
self._terminate = True
while self._thread.is_alive():
try:
self._queue.get(timeout=0.1)
except queue.Empty:
pass
def __iter__(self):
while True:
try:
datum = self._queue.get(timeout=1)
if datum is StopIteration:
break
if isinstance(datum, Exception):
raise datum
yield datum
except queue.Empty:
if not self._thread.is_alive():
break
def set_callback_function(self, func):
"""Set the function to invoke on a seperate thread to generate data.
See also :method:`set_handler` which can be more useful.
:param func: A callable object (i.e. a function object which can be
invoked with no parameters).
"""
self._func = func
def set_handler(self, func, handler):
"""Set the function to invoke on a separate thread to generate data.
:param func: A callable object with signature `func(handler)`.
:param handler: The handler object of the type expected by `func`.
"""
def routine():
func(handler)
self.set_callback_function(routine)
class Wrapper():
"""Standard way to wrapping the result of a callback into a "name" and a
tuple of "data".
:param name: String name to identify which callback method was invoked.
Typically just the name of the callback method.
:param data: The wrapped data, or `None`.
"""
def __init__(self, name, data=None):
self._name = name
self._data = data
@property
def name(self):
"""The name of the callback event which received the data."""
return self._name
@property
def data(self):
"""The wrapped data, typically an object, a tuple, or `None`."""
return self._data
|
usb_counter_fpga.py
|
#!/usr/bin/env python3
"""
USB mini counter based on FPGA
Collection of functions to simplify the integration of the USB counter in
Python scripts.
"""
import multiprocessing
import os
import time
from os.path import expanduser
from typing import Tuple
import numpy as np
import serial
import serial.tools.list_ports
from ..g2lib import g2lib
from . import serial_connection
READEVENTS_PROG = expanduser("~") + "/programs/usbcntfpga/apps/readevents4a"
TTL = "TTL"
NIM = "NIM"
def pattern_to_channel(pattern):
if pattern == 4:
return 3
elif pattern == 8:
return 4
elif pattern == 1 or pattern == 2 or pattern == 0:
return pattern
def channel_to_pattern(channel):
return int(2 ** (channel - 1))
# class Threading(object):
# """ Threading example class
# The run() method will be started and it will run in the background
# until the application exits.
# ref: http://sebastiandahlgren.se/2014/06/27/running-a-method-as-a-background-thread-in-python/ # noqa
# """
# def __init__(self, interval=1):
# """ Constructor
# :type interval: int
# :param interval: Check interval, in seconds
# """
# self.interval = interval
# thread = threading.Thread(target=self.run, args=())
# thread.daemon = True # Daemonize thread
# thread.start() # Start the execution
# def run(self):
# """ Method that runs forever """
# while True:
# # Do something
# print('Doing something imporant in the background')
# time.sleep(self.interval)
class TimeStampTDC1(object):
"""
The usb counter is seen as an object through this class,
inherited from the generic serial one.
"""
DEVICE_IDENTIFIER = "TDC1"
TTL_LEVELS = "TTL"
NIM_LEVELS = "NIM"
def __init__(
self, device_path=None, integration_time=1, mode="singles", level="NIM"
):
"""
Function to initialize the counter device.
It requires the full path to the serial device as arguments,
otherwise it will
initialize the first counter found in the system
"""
if device_path is None:
device_path = (
serial_connection.search_for_serial_devices(self.DEVICE_IDENTIFIER)
)[0]
print("Connected to", device_path)
self._device_path = device_path
# self._com = serial_connection.SerialConnection(device_path)
self._com = serial.Serial(device_path, timeout=0.1)
self._com.write(b"\r\n")
self._com.readlines()
self.mode = mode
self.level = level
self.int_time = integration_time
self.accumulate_timestamps = False # flag for timestamp accumulation service
self.accumulated_timestamps_filename = (
"timestamps.raw" # binary file where timestamps are stored
)
time.sleep(0.2)
@property
def int_time(self):
"""
Controls the integration time set in the counter
:getter: returns integration time in seconds
:setter: Set integration
:param value: integration time in seconds
:type value: int
:returns: integration time in seconds
:rtype: int
"""
self._com.write(b"time?\r\n")
return int(self._com.readline())
@int_time.setter
def int_time(self, value: float):
value *= 1000
if value < 1:
print("Invalid integration time.")
else:
self._com.write("time {:d};".format(int(value)).encode())
self._com.readlines()
def get_counts(self, duration_seconds: int = None) -> Tuple:
"""[summary]
Args:
duration_seconds (int, optional): [description]. Defaults to None.
Returns:
List: [description]
"""
self._com.timeout = 0.05
if duration_seconds is None:
duration_seconds = self.int_time
else:
self.int_time = duration_seconds
self._com.timeout = duration_seconds
self._com.write(b"singles;counts?\r\n")
t_start = time.time()
while True:
if self._com.inWaiting() > 0:
break
if time.time() > (t_start + duration_seconds + 0.1):
print(time.time() - t_start)
raise serial.SerialTimeoutException("Command timeout")
counts = self._com.readline()
self._com.timeout = 0.05
return tuple([int(i) for i in counts.split()])
@property
def mode(self):
# mode = int(self._com.getresponse('MODE?'))
self._com.write(b"mode?\r\n")
mode = int(self._com.readline())
if mode == 0:
return "singles"
if mode == 1:
return "pairs"
if mode == 3:
return "timestamp"
@mode.setter
def mode(self, value):
if value.lower() == "singles":
self.write_only("singles")
if value.lower() == "pairs":
self.write_only("pairs")
if value.lower() == "timestamp":
self.write_only("timestamp")
def write_only(self, cmd):
self._com.write((cmd + "\r\n").encode())
self._com.readlines()
time.sleep(0.1)
@property
def level(self):
"""Set type of incoming pulses"""
self._com.write(b"level?\r\n")
return self._com.readline()
# return self._com.getresponse('LEVEL?')
@level.setter
def level(self, value: str):
if value.lower() == "nim":
self.write_only("NIM")
elif value.lower() == "ttl":
self.write_only("TTL")
else:
print("Accepted input is a string and either 'TTL' or 'NIM'")
# time.sleep(0.1)
@property
def threshold(self):
"""Returns the threshold level"""
return self.level
@threshold.setter
def threshold(self, value: float):
"""Sets the the threshold the input pulse needs to exceed to trigger an event.
Args:
value (float): threshold value in volts can be negative or positive
"""
if value < 0:
self.write_only("NEG {}".format(value))
else:
self.write_only("POS {}".format(value))
@property
def clock(self) -> str:
"""Choice of clock"""
self._com.write("REFCLK?\r\n")
return self._com.readline()
@clock.setter
def clock(self, value: str):
"""Set the clock source internel or external
Args:
value (str): 0 autoselect clock, 1 force external clock,
2 force internal clock reference
"""
self.write_only("REFCLK {}".format(value).encode())
def _stream_response_into_buffer(self, cmd: str, acq_time: float) -> bytes:
"""Streams data from the timestamp unit into a buffer.
Args:
cmd (str): Executes the given command to start the stream.
acq_time (float): Reads data for acq_time seconds.
Returns:
bytes: Returns the raw data.
"""
# this function bypass the termination character
# (since there is none for timestamp mode),
# streams data from device for the integration time.
# Stream data for acq_time seconds into a buffer
ts_list = []
time0 = time.time()
self._com.write((cmd + "\r\n").encode())
while (time.time() - time0) <= acq_time:
ts_list.append(self._com.read((1 << 20) * 4))
self._com.write(b"abort\r\n")
self._com.readlines()
return b"".join(ts_list)
def get_counts_and_coincidences(self, t_acq: float = 1) -> Tuple[int, ...]:
"""Counts single events and coinciding events in channel pairs.
Args:
t_acq (float, optional): Time duration to count events in seperated
channels and coinciding events in 2 channels. Defaults to 1.
Returns:
Tuple[int, int , int, int, int, int, int, int]: Events ch1, ch2, ch3, ch4;
Coincidences: ch1-ch3, ch1-ch4, ch2-ch3, ch2-ch4
"""
self.mode = "pairs"
self._com.readlines() # empties buffer
if t_acq is None:
t_acq = self.int_time
else:
self.int_time = t_acq
self._com.timeout = t_acq
self._com.write(b"pairs;counts?\r\n")
t_start = time.time()
while True:
if self._com.inWaiting() > 0:
break
if time.time() > (t_start + t_acq + 0.1):
print(time.time() - t_start)
raise serial.SerialTimeoutException("Command timeout")
singlesAndPairs = self._com.readline()
self._com.timeout = 1
return tuple([int(i) for i in singlesAndPairs.split()])
def get_timestamps(self, t_acq: float = 1):
"""Acquires timestamps and returns 2 lists. The first one containing
the time and the second the event channel.
Args:
t_acq (float, optional):
Duration of the the timestamp acquisition in seconds. Defaults to 1.
Returns:
Tuple[List[int], List[str]]:
Returns the event times in ns and the corresponding event channel.
The channel are returned as string where a 1 indicates the
trigger channel.
For example an event in channel 2 would correspond to "0010".
Two coinciding events in channel 3 and 4 correspond to "1100"
"""
self.mode = "singles"
level = float(self.level.split()[0])
level_str = "NEG" if level < 0 else "POS"
self._com.readlines() # empties buffer
# t_acq_for_cmd = t_acq if t_acq < 65 else 0
cmd_str = "INPKT;{} {};time {};timestamp;counts?;".format(
level_str, level, (t_acq if t_acq < 65 else 0) * 1000
)
buffer = self._stream_response_into_buffer(cmd_str, t_acq + 0.1)
# '*RST;INPKT;'+level+';time '+str(t_acq * 1000)+';timestamp;counts?',t_acq+0.1) # noqa
# buffer contains the timestamp information in binary.
# Now convert them into time and identify the event channel.
# Each timestamp is 32 bits long.
bytes_hex = buffer[::-1].hex()
ts_word_list = [
int(bytes_hex[i : i + 8], 16) for i in range(0, len(bytes_hex), 8)
][::-1]
ts_list = []
event_channel_list = []
periode_count = 0
periode_duration = 1 << 27
prev_ts = -1
for ts_word in ts_word_list:
time_stamp = ts_word >> 5
pattern = ts_word & 0x1F
if prev_ts != -1 and time_stamp < prev_ts:
periode_count += 1
# print(periode_count)
prev_ts = time_stamp
if (pattern & 0x10) == 0:
ts_list.append(time_stamp + periode_duration * periode_count)
event_channel_list.append("{0:04b}".format(pattern & 0xF))
ts_list = np.array(ts_list) * 2
event_channel_list = event_channel_list
return ts_list, event_channel_list
def count_g2(
self,
t_acq: float,
bin_width: int = 2,
bins: int = 500,
ch_start: int = 1,
ch_stop: int = 2,
ch_stop_delay: float = 0,
):
"""
Returns pairs and singles counts from usbcounter timestamp data.
Computes g2 between channels 1 and 2 of timestamp
and sum the coincidences within specified window
:param t_acq: acquisition time in seconds
:type t_acq: float
:returns: ch_start counts, ch_stop counts, actual acquistion time,
time bin array, histogram
:rtype: {int, int, int, float, float}
Notes
-----
Actual acquisition time is obtained from the returned timestamps.
This might differ slightly from the acquisition time passed to the timestamp
device in the arguments of this function. If there are no counts in a given
timespan, no timestamps are obtained. In this case, t_acq is taken to be the
actual acquisition time.
"""
t, channel = self.get_timestamps(t_acq)
"""
OLDER CODE:
"""
# channel = np.array([pattern_to_channel(int(i, 2)) for i in channel])
# t_ch1 = t[channel == ch_start]
# t_ch2 = t[channel == ch_stop]
"""
NEWER CODE:
convert string expression of channel elements to a number, and mask it against
desired channels the mask ensures that timestamp events that arrive at the
channels within one time resolution is still registered.
"""
t_ch1 = t[[int(ch, 2) & channel_to_pattern(ch_start) != 0 for ch in channel]]
t_ch2 = t[[int(ch, 2) & channel_to_pattern(ch_stop) != 0 for ch in channel]]
histo = g2lib.delta_loop(
t_ch1, t_ch2 + ch_stop_delay, bins=bins, bin_width_ns=bin_width
)
total_time = t[-1] if len(t) > 0 else t_acq
return {
"channel1": len(t_ch1),
"channel2": len(t_ch2),
"total_time": total_time,
"time_bins": np.arange(0, bins * bin_width, bin_width),
"histogram": histo,
}
def help(self):
"""
Prints device help text
"""
self._com.write(b"help\r\n")
[print(k) for k in self._com.readlines()]
def _continuous_stream_timestamps_to_file(self, filename: str):
"""
Indefinitely streams timestamps to a file
WARNING: ensure there is sufficient disk space: 32 bits x total events required
"""
self.mode = "singles"
level = float(self.level.split()[0])
level_str = "NEG" if level < 0 else "POS"
self._com.readlines() # empties buffer
# t_acq_for_cmd = t_acq if t_acq < 65 else 0
cmd_str = "INPKT;{} {};time {};timestamp;counts?;".format(level_str, level, 0)
self._com.write((cmd_str + "\r\n").encode())
while True:
buffer = self._com.read((1 << 20) * 4)
with open(filename, "ab+") as f:
f.write(buffer)
f.close()
def start_continuous_stream_timestamps_to_file(self):
"""
Starts the timestamp streaming service to file in the brackground
"""
if os.path.exists(self.accumulated_timestamps_filename):
os.remove(
self.accumulated_timestamps_filename
) # remove previous accumulation file for a fresh start
else:
pass
self.accumulate_timestamps = True
self.proc = multiprocessing.Process(
target=self._continuous_stream_timestamps_to_file,
args=(self.accumulated_timestamps_filename,),
)
self.proc.daemon = True # Daemonize thread
self.proc.start() # Start the execution
def stop_continuous_stream_timestamps_to_file(self):
"""
Stops the timestamp streaming service to file in the brackground
"""
self.accumulate_timestamps = False
self.proc.terminate()
time.sleep(0.5)
self.proc.close()
self._com.write(b"abort\r\n")
self._com.readlines()
def read_timestamps_bin(self, binary_stream):
"""
Reads the timestamps accumulated in a binary sequence
Returns:
Tuple[List[float], List[str]]:
Returns the event times in ns and the corresponding event channel.
The channel are returned as string where a 1 indicates the
trigger channel.
For example an event in channel 2 would correspond to "0010".
Two coinciding events in channel 3 and 4 correspond to "1100"
"""
bytes_hex = binary_stream[::-1].hex()
ts_word_list = [
int(bytes_hex[i : i + 8], 16) for i in range(0, len(bytes_hex), 8)
][::-1]
ts_list = []
event_channel_list = []
periode_count = 0
periode_duration = 1 << 27
prev_ts = -1
for ts_word in ts_word_list:
time_stamp = ts_word >> 5
pattern = ts_word & 0x1F
if prev_ts != -1 and time_stamp < prev_ts:
periode_count += 1
# print(periode_count)
prev_ts = time_stamp
if (pattern & 0x10) == 0:
ts_list.append(time_stamp + periode_duration * periode_count)
event_channel_list.append("{0:04b}".format(pattern & 0xF))
ts_list = np.array(ts_list) * 2
event_channel_list = event_channel_list
return ts_list, event_channel_list
def read_timestamps_from_file(self):
"""
Reads the timestamps accumulated in a binary file
"""
with open(self.accumulated_timestamps_filename, "rb") as f:
lines = f.read()
f.close()
return self.read_timestamps_bin(lines)
def read_timestamps_from_file_as_dict(self):
"""
Reads the timestamps accumulated in a binary file
Returns dictionary where timestamps['channel i'] is the timestamp array
in nsec for the ith channel
"""
timestamps = {}
(
times,
channels,
) = (
self.read_timestamps_from_file()
) # channels may involve coincidence signatures such as '0101'
for channel in range(1, 5, 1): # iterate through channel numbers 1, 2, 3, 4
timestamps["channel {}".format(channel)] = times[
[int(ch, 2) & channel_to_pattern(channel) != 0 for ch in channels]
]
return timestamps
def real_time_processing(self):
"""
Real-time processes the timestamps that are saved in the background.
Grabs a number of lines of timestamps to process (defined as a section):
since reading from a file is time-consuming, we grab a couple at a go.
"""
raise NotImplementedError()
|
test_fetcher.py
|
from SimpleHTTPServer import SimpleHTTPRequestHandler
import BaseHTTPServer
import base64
import os
import SocketServer
import threading
import urllib2
from nose.tools import ok_, eq_
from django.test import TestCase
from tardis.tardis_portal.fetcher import get_privileged_opener
class TestWebServer:
'''
Utility class for running a test web server with a given handler.
'''
class QuietSimpleHTTPRequestHandler(SimpleHTTPRequestHandler):
'''
Simple subclass that only prints output to STDOUT, not STDERR
'''
def log_message(self, msg, *args):
print msg % args
def _isAuthorized(self):
if self.headers.getheader('Authorization') is None:
return False
t, creds = self.headers.getheader('Authorization').split(" ")
if t != "Basic":
return False
if base64.b64decode(creds) != "username:password":
return False
return True
def do_GET(self):
if not self._isAuthorized():
self.send_response(401, 'Unauthorized')
self.send_header('WWW-Authenticate', 'Basic realm="Test"')
self.end_headers()
return
SimpleHTTPRequestHandler.do_GET(self)
class ThreadedTCPServer(SocketServer.ThreadingMixIn, \
BaseHTTPServer.HTTPServer):
pass
def __init__(self):
self.handler = self.QuietSimpleHTTPRequestHandler
def start(self):
server = self.ThreadedTCPServer(('127.0.0.1', self.getPort()),
self.handler)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
self.server = server
return server.socket.getsockname()
def getUrl(self):
return 'http://%s:%d/' % self.server.socket.getsockname()
@classmethod
def getPort(cls):
return 4272
def stop(self):
self.server.shutdown()
self.server.socket.close()
class PrivilegedOpenerTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.priorcwd = os.getcwd()
os.chdir(os.path.dirname(__file__))
cls.server = TestWebServer()
cls.server.start()
@classmethod
def tearDownClass(cls):
os.chdir(cls.priorcwd)
cls.server.stop()
def testCredentials(self):
'''
Test that the walker manages credentials.
'''
address = 'http://localhost:%d/test.jpg' % \
(TestWebServer.getPort())
# We shouldn't be able to get the file without credentials
try:
urllib2.urlopen(address)
ok_(False, 'Should have thrown error')
except urllib2.HTTPError:
pass
# We should be able to get it with the provided credentials
try:
f = get_privileged_opener().open(address)
eq_(f.getcode(), 200, 'Should have been: "200 OK"')
except urllib2.HTTPError:
ok_(False, 'Should not have thrown error')
|
demo2.py
|
"""
2019/12/08 14:55
141.【Python多任务编程】join阻塞方法
"""
"""
父进程会等待子进程执行完毕后在退出:
如果在父进程中执行完所有代码后,还有子进程在执行,那么父进程会等待子进程执行完所有代码后再退出。
Process对象的join方法:
使用Process创建子进程,调用start方法后,父子进程会在各自的进程中不断的执行代码。
有时候如果想等待子进程执行完毕后再执行下面的代码,那么这时候调用join方法。
"""
from multiprocessing import Process
import time
def zhiliao():
print('===子进程开始===')
for x in range(0, 5):
print('子进程: %s' % x)
time.sleep(1)
if __name__ == '__main__':
p = Process(target=zhiliao)
p.start()
print('主进程...')
p.join()
# print('执行主进程代码...')
# for x in range(0, 6):
# print('主进程: %s' % x)
# time.sleep(1)
print('所有子进程代码执行')
|
main.py
|
import os
os.system('pip3 install -r requirements.txt')
from os import name,system
from random import choice
from colorama import init,Fore,Style
from threading import Thread,Lock,active_count
from sys import stdout
from time import sleep
from datetime import datetime
import requests
import json
import time
import string
import random
print("""
╔═══╗ ╔╗ ╔╗ ╔═══╗
║╔═╗║ ║║ ╔╝║ ║╔═╗║
║║ ╚╝╔══╗ ╔╗╔╗╔══╗╔══╗╔══╗╔══╗║║ ╔══╗╔═╗╚╗║ ║╚═╝║
║║╔═╗╚ ╗║ ║╚╝║║╔╗║║╔═╝║╔╗║║╔╗║║║ ║╔╗║║╔╝ ║║ ╚══╗║
║╚╩═║║╚╝╚╗║║║║║║═╣║╚═╗║╚╝║║╚╝║║╚╗║║═╣║║ ╔╝╚╗╔══╝║
╚═══╝╚═══╝╚╩╩╝╚══╝╚══╝╚══╝╚══╝╚═╝╚══╝╚╝ ╚══╝╚═══╝
""")
menu = input("""
1. Token Gen
2. Token Checker
""")
if menu == '1':
print('How many: ')
x = int(input())
tokentxt = open('Data/tokens.txt', 'w+')
while not (x == 0) :
tokens = (''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(30)))
print(tokens)
x -= 1
a_file = open("Data/tokens.txt", "a")
a_file.write(tokens + '\n')
a_file.close()
print('\n \nGenned All Tokens')
time.sleep(0.5)
print('\n \nSupport Server :- https://discord.gg/Rsg2AsjqKx')
time.sleep(2)
quit()
class Main:
def clear(self):
if name == 'posix':
system('clear')
elif name in ('ce', 'nt', 'dos'):
system('cls')
else:
print("\n") * 120
def SetTitle(self,title:str):
if name == 'posix':
stdout.write(f"\x1b]2;{title}\x07")
elif name in ('ce', 'nt', 'dos'):
system(f'title {title}')
else:
stdout.write(f"\x1b]2;{title}\x07")
def ReadFile(self,filename,method):
with open(filename,method,encoding='utf8') as f:
content = [line.strip('\n') for line in f]
return content
def ReadJson(self,filename,method):
with open(filename,method) as f:
return json.load(f)
def __init__(self):
self.SetTitle('[Twitch Token Checker]')
self.clear()
self.title = Style.BRIGHT+Fore.MAGENTA+"""
╔═══╗ ╔╗ ╔╗ ╔═══╗
║╔═╗║ ║║ ╔╝║ ║╔═╗║
║║ ╚╝╔══╗ ╔╗╔╗╔══╗╔══╗╔══╗╔══╗║║ ╔══╗╔═╗╚╗║ ║╚═╝║
║║╔═╗╚ ╗║ ║╚╝║║╔╗║║╔═╝║╔╗║║╔╗║║║ ║╔╗║║╔╝ ║║ ╚══╗║
║╚╩═║║╚╝╚╗║║║║║║═╣║╚═╗║╚╝║║╚╝║║╚╗║║═╣║║ ╔╝╚╗╔══╝║
╚═══╝╚═══╝╚╩╩╝╚══╝╚══╝╚══╝╚══╝╚═╝╚══╝╚╝ ╚══╝╚═══╝
"""
print(self.title)
config = self.ReadJson('configs.json','r')
self.use_proxy = config['use_proxy']
self.proxy_type = config['proxy_type']
self.threads_num = config['threads']
self.webhook_enable = config['webhook_enable']
self.webhook_url = config['webhook_url']
print('')
self.hits = 0
self.bads = 0
self.retries = 0
self.webhook_retries = 0
self.lock = Lock()
def SendWebhook(self,title,message,icon_url,thumbnail_url,proxy,useragent):
try:
timestamp = str(datetime.utcnow())
message_to_send = {"embeds": [{"title": title,"description": message,"color": 65362,"author": {"name": " DISCORD SERVER [CLICK HERE]","url": "https://discord.gg/FFqXjZgp","icon_url": icon_url},"footer": {"text": "MADE BY LOOT AIO TEAM","icon_url": icon_url},"thumbnail": {"url": thumbnail_url},"timestamp": timestamp}]}
headers = {
'User-Agent':useragent,
'Pragma':'no-cache',
'Accept':'*/*',
'Content-Type':'application/json'
}
payload = json.dumps(message_to_send)
if self.use_proxy == 1:
response = requests.post(self.webhook_url,data=payload,headers=headers,proxies=proxy)
else:
response = requests.post(self.webhook_url,data=payload,headers=headers)
if response.text == "":
pass
elif "You are being rate limited." in response.text:
self.webhook_retries += 1
self.SendWebhook(title,message,icon_url,thumbnail_url,proxy,useragent)
else:
self.webhook_retries += 1
self.SendWebhook(title,message,icon_url,thumbnail_url,proxy,useragent)
except:
self.webhook_retries += 1
self.SendWebhook(title,message,icon_url,thumbnail_url,proxy,useragent)
def GetRandomUserAgent(self):
useragents = self.ReadFile('Data/useragents.txt','r')
return choice(useragents)
def PrintText(self,bracket_color:Fore,text_in_bracket_color:Fore,text_in_bracket,text):
self.lock.acquire()
stdout.flush()
text = text.encode('ascii','replace').decode()
stdout.write(Style.BRIGHT+bracket_color+'['+text_in_bracket_color+text_in_bracket+bracket_color+'] '+bracket_color+text+'\n')
self.lock.release()
def GetRandomProxy(self):
proxies_file = self.ReadFile('Data/proxies.txt','r')
proxies = {}
if self.proxy_type == 1:
proxies = {
"http":"http://{0}".format(choice(proxies_file)),
"https":"https://{0}".format(choice(proxies_file))
}
elif self.proxy_type == 2:
proxies = {
"http":"socks4://{0}".format(choice(proxies_file)),
"https":"socks4://{0}".format(choice(proxies_file))
}
else:
proxies = {
"http":"socks5://{0}".format(choice(proxies_file)),
"https":"socks5://{0}".format(choice(proxies_file))
}
return proxies
def TitleUpdate(self):
while True:
self.SetTitle(f'[Twitch Token Checker] ^| HITS: {self.hits} ^| BADS: {self.bads} ^| RETRIES: {self.retries} ^| WEBHOOK RETRIES: {self.webhook_retries} ^| THREADS: {active_count()-1}')
sleep(0.1)
def TokenCheck(self,token):
try:
useragent = self.GetRandomUserAgent()
headers = {
'User-Agent':useragent,
'Authorization':f'OAuth {token}'
}
response = ''
proxy = ''
link = 'https://id.twitch.tv/oauth2/validate'
if self.use_proxy == 1:
proxy = self.GetRandomProxy()
response = requests.get(link,headers=headers,proxies=proxy)
else:
response = requests.get(link,headers=headers)
if 'client_id' in response.text:
self.PrintText(Fore.MAGENTA,Fore.WHITE,'HIT',token)
with open('Results/hits.txt','a',encoding='utf8') as f:
f.write(token+'\n')
response_data = response.text.replace('\n','')
with open('Results/detailed_hits.txt','a',encoding='utf8') as f:
f.write(f'{token} | {response_data}\n')
self.hits += 1
elif 'invalid access token' in response.text:
self.PrintText(Fore.RED,Fore.WHITE,'BAD',token)
with open('Results/bads.txt','a',encoding='utf8') as f:
f.write(token+'\n')
self.bads += 1
if self.webhook_enable == 1:
self.SendWebhook('Twitch Token',token,'https://cdn.discordapp.com/attachments/860086329059442699/860207876592893982/R2338f5ea90e75416983f8239e212cff4.png',proxy,useragent)
else:
self.retries += 1
self.TokenCheck(token)
except:
self.retries += 1
self.TokenCheck(token)
def Start(self):
Thread(target=self.TitleUpdate).start()
tokens = self.ReadFile('Data/tokens.txt','r')
for token in tokens:
Run = True
while Run:
if active_count() <= self.threads_num:
Thread(target=self.TokenCheck,args=(token,)).start()
Run = False
if __name__ == "__main__":
main = Main()
main.Start()
|
network_heartbeat.py
|
"""Network server heartbeat wrapper
Perl might be better for efficiency.
But we will use python for now.
Non-zero status means *this* failed, not the wrapped command.
"""
import argparse
import os
import shlex
import socket
import subprocess
import sys
import threading
import time
DESCRIPTION = """
We wrap a system call to produce a heartbeat.
"""
EPILOG = """
We log to the status server, and forward command stdout/stderr as well.
"""
class _Formatter(argparse.RawDescriptionHelpFormatter, argparse.ArgumentDefaultsHelpFormatter):
pass
_FORMATTER_CLASS = _Formatter
def parse_args(args):
parser = argparse.ArgumentParser(
description=DESCRIPTION,
epilog=EPILOG,
formatter_class=_FORMATTER_CLASS,
)
parser.add_argument('--rate',
help='Heartbeat rate, in seconds',
type=int,
default=600,
)
parser.add_argument('--heartbeat-server',
help='Address of the heartbeat server',
required=True,
)
parser.add_argument('--heartbeat-port',
help='Port of the heartbeat server',
type=int,
required=True,
)
parser.add_argument('--jobid',
help='Our jobid',
required=True,
)
parser.add_argument('--exit-dir',
help='Path to emergency exit sentinel directory',
required=True,
)
parser.add_argument('--directory',
help='Directory in which to run COMMAND.',
default='.',
)
parser.add_argument('command',
help='System call (to be joined by " "). We will block on this and return its result.',
nargs='+',
#required=True,
)
return parser.parse_args(args)
# send message delimited with a \0
def socket_send(socket, message):
socket.sendall(b'{}\0'.format(message))
def log(heartbeat_server, jobid, msg):
hsocket = socket.socket()
try:
hsocket.connect(heartbeat_server)
socket_send(hsocket, 's {} {}\n'.format(jobid, msg))
hsocket.close()
except IOError: # better to miss a line than terminate
pass
def thread_heartbeat(heartbeat_server, jobid, sleep_s):
pid = os.getpid()
pgid = os.getpgid(0)
hsocket = socket.socket()
try:
hsocket.connect(heartbeat_server)
socket_send(hsocket, 'i {} {} {}'.format(jobid, pid, pgid))
hsocket.close()
except IOError: # we hope it's a temporary error
pass
while True:
time.sleep(sleep_s)
hsocket = socket.socket()
try:
hsocket.connect(heartbeat_server)
socket_send(hsocket, 'h {}'.format(jobid))
hsocket.close()
except IOError: # we hope it's a temporary error
pass
def start_heartbeat(heartbeat_server, jobid, sleep_s):
hb = threading.Thread(target=thread_heartbeat, args=(heartbeat_server, jobid, sleep_s))
log(heartbeat_server, jobid, 'alive? {}'.format(
bool(hb.is_alive())))
hb.daemon = True
hb.start()
return hb
def run(args):
heartbeat_server = (args.heartbeat_server, args.heartbeat_port)
jobid = args.jobid
log(heartbeat_server, jobid, repr(args))
os.chdir(args.directory)
exit_dir = args.exit_dir
exit_fn = os.path.join(os.path.abspath(exit_dir), jobid)
cwd = os.getcwd()
hostname = socket.getfqdn()
sleep_s = args.rate
log(heartbeat_server, jobid, """
cwd:{cwd!r}
hostname={hostname}
heartbeat_server={heartbeat_server!r}
jobid={jobid}
exit_dir={exit_dir!r}
sleep_s={sleep_s!r}""".format(
**locals()))
log(heartbeat_server, jobid, "before setpgid: pid={} pgid={}".format(os.getpid(), os.getpgid(0)))
try:
os.setpgid(0, 0) # This allows the entire tree of procs to be killed.
log(heartbeat_server, jobid, " after setpgid: pid={} pgid={}".format(
os.getpid(), os.getpgid(0)))
except OSError as e:
log(heartbeat_server, jobid, ' Unable to set pgid. Possibly a grid job? Hopefully there will be no dangling processes when killed: {}'.format(
repr(e)))
thread = start_heartbeat(heartbeat_server, jobid, sleep_s)
log(heartbeat_server, jobid, 'alive? {} pid={} pgid={}'.format(
bool(thread.is_alive()), os.getpid(), os.getpgid(0)))
call = ' '.join(args.command)
log(heartbeat_server, jobid, 'In cwd: {}, Blocking call: {!r}'.format(
os.getcwd(), call))
sp = subprocess.Popen(shlex.split(call), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# forward all output to server until job ends, then get exit value
with sp.stdout as f:
for line in iter(f.readline, b''):
# can't use log() for this because it appends a \n
hsocket = socket.socket()
try:
hsocket.connect(heartbeat_server)
socket_send(hsocket, 's {} {}'.format(jobid, line))
hsocket.close()
except IOError: # better to miss a line than terminate
pass
rc = sp.wait()
log(heartbeat_server, jobid, ' returned: {!r}'.format(
rc))
hsocket = socket.socket()
try:
hsocket.connect(heartbeat_server)
socket_send(hsocket, 'e {} {}'.format(jobid, rc))
hsocket.close()
except IOError as e:
log(heartbeat_server, jobid, 'could not update heartbeat server with exit status: {} {}: {!r}'.format(jobid, rc, e))
with open(exit_fn, 'w') as f:
f.write(str(rc))
# sys.exit(rc) # No-one would see this anyway.
def main():
args = parse_args(sys.argv[1:])
run(args)
if __name__ == "__main__":
main()
|
ilustrado.py
|
""" This file implements the GA algorithm and acts as main(). """
# standard library
import multiprocessing as mp
import subprocess as sp
import logging
import glob
import shutil
import os
import time
import sys
from traceback import print_exc
from json import dumps, dump
from copy import deepcopy, copy
# external libraries
import numpy as np
from pkg_resources import require
# matador modules
import matador.compute
import matador.compute.slurm
from matador.scrapers.castep_scrapers import (
res2dict,
castep2dict,
cell2dict,
param2dict,
)
from matador.export import doc2res
from matador.export.utils import generate_hash
from matador.fingerprints.similarity import get_uniq_cursor
from matador.fingerprints.pdf import PDFFactory
from matador.utils.chem_utils import get_formula_from_stoich, get_root_source
from matador.hull import QueryConvexHull
# ilustrado modules
from .adapt import adapt
from .generation import Generation
from .fitness import FitnessCalculator
from .util import strip_useless, LOG, NewbornProcess
__version__ = require("ilustrado")[0].version
# As this class has many settings that are hacked directly into __dict__, disable these warnings.
# pylint: disable=access-member-before-definition
# pylint: disable=attribute-defined-outside-init
# pylint: disable bad-continuation
class ArtificialSelector:
""" ArtificialSelector takes an initial gene pool
and applies a genetic algorithm to optimise some
fitness function.
Keyword Arguments:
gene_pool (list(dict)) : initial cursor to use as "Generation 0",
seed (str) : seed name of cell and param files for CASTEP,
seed_prefix (str) : if not specifying a seed, this name will prefix all runs
fitness_metric (str) : currently either 'hull' or 'test',
hull (QueryConvexHull) : matador QueryConvexHull object to calculate distances,
res_path (str) : path to folder of res files to create hull, if no hull object passed
mutation_rate (float) : rate at which to perform single-parent mutations (DEFAULT: 0.5)
crossover_rate (float) : rate at which to perform crossovers (DEFAULT: 0.5)
num_generations (int) : number of generations to breed before quitting (DEFAULT: 5)
num_survivors (int) : number of structures to survive to next generation for breeding
(DEFAULT: 10)
population (int) : number of structures to breed in any given generation
(DEFAULT: 25)
failure_ratio (int) : maximum number of attempts per success (DEFAULT: 5)
elitism (float) : fraction of next generation to be comprised of elite
structures from previous generation (DEFAULT: 0.2)
best_from_stoich (bool) : whether to always include the best structure from a
stoichiomtery in the next generation,
mutations (list(str)) : list of mutation names to use,
structure_filter (fn(doc)) : any function that takes a matador doc and returns True
or False,
check_dupes (bool) : if True, filter relaxed structures for uniqueness on-the-fly (DEFAULT: True)
check_dupes_hull (bool) : compare pdf with all hull structures (DEFAULT: True)
sandbagging (bool) : whether or not to disfavour nearby compositions (DEFAULT: False)
minsep_dict (dict) : dictionary containing element-specific minimum separations, e.g.
{('K', 'K'): 2.5, ('K', 'P'): 2.0}. These should only be set such that
atoms do not overlap; let the DFT deal with bond lengths. No effort is made
to push apart atoms that are too close, the trial will simply be discarded. (DEFAULT: None)
max_num_mutations (int) : maximum number of mutations to perform on a single structure,
max_num_atoms (int) : most atoms allowed in a structure post-mutation/crossover,
nodes (list(str)) : list of node names to run on,
ncores (int or list(int)) : specifies the number of cores used by listed `nodes` per thread,
nprocs (int) : total number of processes,
recover_from (str) : recover from previous run_hash, by default ilustrado will recover
if it finds only one run hash in the folder
load_only (bool) : only load structures, do not continue breeding (DEFAULT: False)
executable (str) : path to DFT binary (DEFAULT: castep)
compute_mode (str) : either `direct`, `slurm`, `manual` (DEFAULT: direct)
max_num_nodes (int) : amount of array jobs to run per generation in `slurm` mode,
walltime_hrs (int) : maximum walltime for a SLURM array job,
slurm_template (str) : path to template slurm script that includes module loads etc,
entrypoint (str) : path to script that initialised this object, such that it can
be called by SLURM
debug (bool) : maximum printing level
testing (bool) : run test code only if true
verbosity (int) : extra printing level,
loglevel (str) : follows std library logging levels.
"""
def __init__(self, **kwargs):
""" This is the main entrypoint. Initialises parameters,
gene pool and begins the GA.
"""
prop_defaults = {
# important, required parameters
"gene_pool": None,
"seed": None,
"seed_prefix": None,
"fitness_metric": "hull",
"hull": None,
"res_path": None,
# recovery and loading parameters
"recover_from": None,
"load_only": False,
# GA numerical parameters
"mutation_rate": 1.0,
"crossover_rate": 0.0,
"num_generations": 5,
"num_survivors": 10,
"population": 25,
"elitism": 0.2,
"max_num_mutations": 3,
"max_num_atoms": 30,
# other GA options
"best_from_stoich": True,
"mutations": None,
"structure_filter": None,
"check_dupes": True,
"check_dupes_hull": True,
"failure_ratio": 5,
"sandbagging": False,
"minsep_dict": None,
# logistical and compute parameters
"compute_mode": "direct",
"ase_calculator": None,
"nodes": None,
"ncores": None,
"nprocs": 1,
"relaxer_params": None,
"executable": "castep",
"max_num_nodes": None,
"walltime_hrs": None,
"slurm_template": None,
"entrypoint": None,
# debug and logging parameters
"debug": False,
"testing": False,
"emt": False,
"verbosity": 0,
"loglevel": "info",
}
# cache current params to reload again later
self.current_params = deepcopy(prop_defaults)
self.current_params.update(kwargs)
self.__dict__.update(prop_defaults)
self.__dict__.update(kwargs)
splash_screen = (
r" _ _ _ _" + "\n"
r" (_)| | | | | |" + "\n"
r" _ | | _ _ ___ | |_ _ __ __ _ __| | ___" + "\n"
r" | || || | | |/ __|| __|| '__| / _` | / _` | / _ \ " + "\n"
r" | || || |_| |\__ \| |_ | | | (_| || (_| || (_) |" + "\n"
r" |_||_| \__,_||___/ \__||_| \__,_| \__,_| \___/" + "\n\n"
"****************************************************\n"
)
print("\033[92m\033[1m")
print("\n" + splash_screen)
print("\033[0m")
print("Loading harsh realities of life...", end="")
# post-load checks
if self.relaxer_params is None:
self.relaxer_params = dict()
self.next_gen = None
if isinstance(self.ncores, list):
if len(self.ncores) != len(self.nodes):
raise RuntimeError(
"Length mismatch between ncores and nodes list: {} vs {}".format(
self.ncores, self.nodes
)
)
# set up computing resource
if self.compute_mode not in ("slurm", "direct", "manual"):
raise RuntimeError("`compute_mode` must be one of `slurm`, `direct`, `manual`.")
if self.compute_mode == "slurm":
errors = []
if not isinstance(self.walltime_hrs, int):
errors.append(
"`walltime_hrs` specified incorrectly {}".format(self.walltime_hrs)
)
elif not self.walltime_hrs > 0:
errors.append(
"`walltime_hrs` specified incorrectly {}".format(self.walltime_hrs)
)
if not isinstance(self.max_num_nodes, int):
errors.append(
"`max_num_nodes` specified incorrectly {}".format(
self.max_num_nodes
)
)
elif not self.max_num_nodes > 0:
errors.append(
"`max_num_nodes` specified incorrectly {}".format(
self.max_num_nodes
)
)
if not isinstance(self.slurm_template, str):
errors.append(
"`slurm_template` must be a valid path, not {}".format(
self.slurm_template
)
)
elif not os.path.isfile(self.slurm_template):
errors.append(
"`slurm_template` file {} does not exist".format(
self.slurm_template
)
)
if errors:
raise RuntimeError(
"Invalid specification for `compute_mode='slurm'`, errors: \n{}".format(
"\n".join(errors)
)
)
self.slurm_dict = matador.compute.slurm.get_slurm_env()
if self.compute_mode == "direct":
if self.nodes is not None:
if self.nprocs != len(self.nodes):
logging.warning(
"Specified procs {} being replaced by number of nodes {}".format(
self.nprocs, len(self.nodes)
)
)
self.nprocs = len(self.nodes)
# set up GA logistics
self.run_hash = generate_hash()
self.generations = [] # list to store all generations
self.num_elite = int(self.elitism * self.num_survivors)
self.num_accepted = self.num_survivors - self.num_elite
self.max_attempts = self.failure_ratio * self.population
if self.num_survivors > self.population + self.num_elite:
raise RuntimeError(
"More survivors than total population: {} vs {}".format(
self.num_survivors, self.population + self.num_elite
)
)
if self.num_accepted > self.population:
raise RuntimeError(
"More accepted than total population: {} vs {}".format(
self.num_accepted, self.population + self.num_elite
)
)
if self.mutations is not None and isinstance(self.mutations, str):
self.mutations = [self.mutations]
else:
self.mutations = ["permute_atoms", "random_strain", "nudge_positions", "vacancy", "transmute_atoms"]
try:
from VoronoiNetwork import Vornetclass
self.mutations.append("voronoi")
except ImportError:
LOG.warning("Disabling Voronoi mutation.")
pass
if not isinstance(self.max_num_mutations, int) and self.max_num_mutations < 0:
raise RuntimeError(
"`max_num_mutations` must be >= 0, not {}".format(
self.max_num_mutations
)
)
if not isinstance(self.max_num_atoms, int) and self.max_num_atoms < 1:
raise RuntimeError(
"`max_num_atoms` must be >= 1, not {}".format(self.max_num_atoms)
)
# recover from specified run
if self.recover_from is not None:
if isinstance(self.recover_from, str):
self.run_hash = self.recover_from.split("/")[-1]
# try to look for gen0 files, if multiple are found, safely exit
else:
gen0_files = glob.glob("*gen0.json")
if len(gen0_files) > 1:
raise SystemExit(
"Several incomplete runs found in this folder, please tidy up before re-running."
)
if len(gen0_files) == 1:
self.run_hash = gen0_files[0].split("/")[-1].replace("-gen0.json", "")
self.recover_from = self.run_hash
else:
print("No recovery possible, starting fresh run.")
# set up logging
numeric_loglevel = getattr(logging, self.loglevel.upper(), None)
if not isinstance(numeric_loglevel, int):
raise SystemExit(
self.loglevel,
"is an invalid log level, please use either `info`, `debug` or `warning`.",
)
file_handler = logging.FileHandler(self.run_hash + ".log", mode="a")
file_handler.setLevel(numeric_loglevel)
file_handler.setFormatter(
logging.Formatter("%(asctime)s - %(name)s | %(levelname)8s: %(message)s")
)
LOG.addHandler(file_handler)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(numeric_loglevel)
stream_handler.setFormatter(
logging.Formatter("%(asctime)s - %(name)s | %(levelname)8s: %(message)s")
)
LOG.addHandler(stream_handler)
LOG.info("Starting up ilustrado {}".format(__version__))
# initialise fitness calculator
if self.fitness_metric == "hull" and self.hull is None:
if self.res_path is not None and os.path.isfile(self.res_path):
res_files = glob.glob("{}/*.res".format(self.res_path))
if not res_files:
raise SystemExit("No structures found in {}".format(self.res_path))
self.cursor = []
for res in res_files:
self.cursor.append(res2dict(res))
self.hull = QueryConvexHull(cursor=self.cursor)
raise SystemExit(
"Need to pass a QueryConvexHull object to use hull distance metric."
)
if self.fitness_metric in ["dummy", "hull_test"]:
self.testing = True
if self.testing and self.compute_mode == "slurm":
raise SystemExit("Please use `compute_mode=direct` for testing.")
print("Done!")
self.fitness_calculator = FitnessCalculator(
fitness_metric=self.fitness_metric,
hull=self.hull,
sandbagging=self.sandbagging,
debug=self.debug,
)
LOG.debug("Successfully initialised fitness calculator.")
# if we're checking hull pdfs too, make this list now
if self.check_dupes_hull:
print("Computing extra PDFs from hull...")
PDFFactory(self.hull.cursor)
self.extra_pdfs = deepcopy(self.hull.cursor)
# remove pdf object from cursor so generation can be serialized
for ind, _ in enumerate(self.hull.cursor):
del self.hull.cursor[ind]["pdf"]
else:
self.extra_pdfs = None
LOG.info("Successfully initialised similarity lists.")
if self.recover_from is not None:
print("Attempting to recover from run {}".format(self.run_hash))
if isinstance(self.recover_from, str):
LOG.info(
"Attempting to recover from previous run {}".format(self.run_hash)
)
self.recover()
if not self.load_only:
self.start()
def start(self):
""" Start running GA. """
print("Initialising quantum mechanics...", end=" ")
# read parameters for relaxation from seed files
if self.seed is not None:
seed = self.seed
errors = []
self.cell_dict, success_cell = cell2dict(seed, db=False)
self.param_dict, success_param = param2dict(seed, db=False)
if not success_cell:
errors.append("Failed to read cell file: {}".format(self.cell_dict))
if not success_param:
errors.append("Failed to read param file: {}".format(self.param_dict))
if errors:
raise RuntimeError("{}".format(errors.join("\n")))
else:
self.seed = "ilustrado"
if self.seed_prefix is not None:
self.seed = self.seed_prefix
self.cell_dict = {}
self.param_dict = {}
print("Done!\n")
LOG.debug("Successfully initialised cell and param files.")
if self.recover_from is None:
self.seed_generation_0(self.gene_pool)
if self.debug:
print(self.nodes)
if self.nodes is not None:
LOG.info("Running on nodes: {}".format(" ".join(self.nodes)))
elif self.compute_mode == "slurm":
LOG.info("Running through SLURM queue")
else:
LOG.info("Running on localhost only")
if self.debug:
print(
"Current number of generations: {}. Target number: {}".format(
len(self.generations), self.num_generations
)
)
# run GA self.num_generations
while len(self.generations) < self.num_generations:
self.breed_generation()
LOG.info("Successfully bred generation {}".format(len(self.generations)))
assert len(self.generations) == self.num_generations
self.finalise_files_for_export()
print("Reached target number of generations!")
print("Completed GA!")
LOG.info("Reached target number of generations!")
LOG.info("Completed GA!")
def breed_generation(self):
""" Build next generation from mutations/crossover of current and
perform relaxations if necessary.
"""
# initialise next_gen
if self.next_gen is None:
self.next_gen = Generation(
self.run_hash,
len(self.generations),
self.num_survivors,
self.num_accepted,
fitness_calculator=self.fitness_calculator,
)
# newborns is a list of structures, initially raw then relaxed
if self.compute_mode == "direct":
self.continuous_birth()
elif self.compute_mode in ("slurm", "manual"):
self.batch_birth()
if len(self.next_gen) < self.population:
LOG.warning("Next gen is smaller than desired population.")
# assert len(self.next_gen) >= self.population
self.next_gen.rank()
LOG.info("Ranked structures in generation {}".format(len(self.generations)))
if not self.testing:
cleaned = self.next_gen.clean()
LOG.info(
"Cleaned structures in generation {}, removed {}".format(
len(self.generations), cleaned
)
)
self.enforce_elitism()
self.reset_and_dump()
print(self.generations[-1])
def write_unrelaxed_generation(self):
""" Perform mutations and write res files for the resulting
structures. Additionally, dump an unrelaxed json file.
"""
while len(self.next_gen) < self.max_attempts:
newborn = self.birth_new_structure()
self.next_gen.birth(newborn)
for newborn in self.next_gen:
newborn = strip_useless(newborn)
doc2res(newborn, newborn["source"][0], info=False)
self.next_gen.dump("unrelaxed")
def batch_birth(self):
""" Assess whether a generation has been relaxed already. This is done by
checking for the existence of a file called <run_hash>-genunrelaxed.json.
If so, match the relaxations up with the cached unrelaxed structures
and rank them ready for the next generation.
If not, create a new generation of structures, dump the unrelaxed structures to file,
create the jobscripts to relax them, submit them and the job to check up on the relaxations,
then exit.
"""
print("Beginning birthing of generation {}...".format(len(self.generations)))
fname = "{}-genunrelaxed.json".format(self.run_hash)
if os.path.isfile(fname):
LOG.info("Found existing generation to be relaxed...")
# load the unrelaxed structures into a dummy generation
assert os.path.isfile(fname)
unrelaxed_gen = Generation(
self.run_hash,
len(self.generations),
self.num_survivors,
self.num_accepted,
dumpfile=fname,
fitness_calculator=None,
)
# check to see which unrelaxed structures completed successfully
LOG.info("Scanning for completed relaxations...")
for _, newborn in enumerate(unrelaxed_gen):
completed_castep_filename = "completed/{}.castep".format(newborn["source"][0])
completed_res_filename = "completed/{}.res".format(newborn["source"][0])
doc = None
s = None
if os.path.isfile(completed_castep_filename):
doc, s = castep2dict(completed_res_filename, db=True)
elif os.path.isfile(completed_res_filename):
doc, s = res2dict(completed_res_filename, db=True)
# if we find a res file in a completed folder, assumed it was relaxed
doc["optimised"] = True
# if all was a success, then "birth" the structure, after checking for uniqueness
if s and isinstance(doc, dict):
newborn = strip_useless(newborn)
doc = strip_useless(doc)
newborn.update(doc)
assert newborn.get("parents") is not None
LOG.info("Scraping result for {}".format(newborn["source"][0]))
self.scrape_result(newborn)
else:
LOG.warning(
"Failed to add {}, data found: {}".format(newborn["source"][0], doc)
)
# if there are not enough unrelaxed structures after that run, clean up then resubmit
LOG.info(
"Found {} structures out of target {}".format(
len(self.next_gen), self.population
)
)
if len(self.next_gen) < self.population:
LOG.info("Initialising new relaxation jobs...")
num_remaining = matador.compute.reset_job_folder()
# check if we can even finish this generation
if num_remaining < self.population - len(self.next_gen):
LOG.warning(
"There were too many failures, not enough remaining calculations to reach target."
)
LOG.warning(
"Consider restarting with a larger allowed failure_ratio."
)
raise SystemExit(
"Failed to return enough successful structures to continue, exiting..."
)
if self.compute_mode == "slurm":
# adjust number of nodes so we don't get stuck in the queue
if self.max_num_nodes > num_remaining:
LOG.info("Adjusted max num nodes to {}".format(self.max_num_nodes))
self.max_num_nodes = self.population - len(self.next_gen)
self.slurm_submit_relaxations_and_monitor()
LOG.info("Exiting monitor...")
exit(0)
# otherwise, remove unfinished structures from job file and release control of this generation
else:
LOG.info("Found enough structures to continue!".format())
count = 0
for doc in unrelaxed_gen:
structure = doc["source"][0] + ".res"
if os.path.isfile(structure):
os.remove(structure)
count += 1
LOG.info("Removed {} structures from job folder.".format(count))
return
# otherwise, generate a new unrelaxed generation and submit
else:
LOG.info("Initialising new generation...")
self.write_unrelaxed_generation()
if self.compute_mode == "slurm":
self.slurm_submit_relaxations_and_monitor()
LOG.info("Exiting monitor...")
exit(0)
def slurm_submit_relaxations_and_monitor(self):
""" Prepare and submit the appropriate slurm files.
"""
LOG.info("Preparing to submit slurm scripts...")
relax_fname = "{}_relax.job".format(self.run_hash)
# override jobname with this run's hash to allow for selective job killing
self.slurm_dict["SLURM_JOB_NAME"] = self.run_hash
compute_string = "run3 {}".format(self.seed)
matador.compute.slurm.write_slurm_submission_script(
relax_fname,
self.slurm_dict,
compute_string,
self.walltime_hrs,
template=self.slurm_template,
)
if self.max_num_nodes > self.max_attempts:
self.max_num_nodes = self.max_attempts
LOG.info("Adjusted max num nodes to {}".format(self.max_num_nodes))
# prepare script to read in results
monitor_fname = "{}_monitor.job".format(self.run_hash)
compute_string = "python {} >> ilustrado.out 2>> ilustrado.err".format(
self.entrypoint
)
matador.compute.slurm.write_slurm_submission_script(
monitor_fname,
self.slurm_dict,
compute_string,
1,
template=self.slurm_template,
)
# submit jobs, if any exceptions, cancel all jobs
try:
array_job_id = matador.compute.slurm.submit_slurm_script(
relax_fname, num_array_tasks=self.max_num_nodes
)
LOG.info("Submitted job array: {}".format(array_job_id))
monitor_job_id = matador.compute.slurm.submit_slurm_script(
monitor_fname, depend_on_job=array_job_id
)
LOG.info("Submitted monitor job: {}".format(monitor_job_id))
except Exception as exc:
LOG.error("Something went wrong, trying to cancel all jobs: {}".format(exc))
output = matador.compute.slurm.scancel_all_matching_jobs(name=self.run_hash)
LOG.error("scancel output: {}".format(output))
raise SystemExit("Something went wrong, please check the log file.")
def continuous_birth(self):
""" Create new generation and relax "as they come", filling the compute
resources allocated.
"""
newborns = []
procs = []
# queues is a list of mp.Queues where return values will end up
queues = []
if self.nodes is None:
free_nodes = self.nprocs * [None]
if isinstance(self.ncores, list):
free_cores = self.nprocs * [None]
else:
free_cores = self.nprocs * [self.ncores]
else:
free_nodes = deepcopy(self.nodes)
if isinstance(self.ncores, list):
free_cores = deepcopy(self.ncores)
else:
free_cores = len(self.nodes) * [self.ncores]
attempts = 0
print("Computing generation {}:".format(len(self.generations)))
print(89 * "─")
print(
"{:^25} {:^10} {:^10} {:^10} {:^30}".format(
"ID", "Formula", "# atoms", "Status", "Mutations"
)
)
print(89 * "─")
# print any recovered structures that already exist
if self.next_gen:
for _, structure in enumerate(self.next_gen):
print(
"{:^25} {:^10} {:^10} {:^10} {:^30}".format(
structure["source"][0],
get_formula_from_stoich(structure["stoichiometry"]),
structure["num_atoms"],
"Recovered",
", ".join(structure["mutations"]),
)
)
self.used_sources = [doc["source"][0] for doc in self.next_gen]
else:
self.used_sources = []
try:
finished = False
while attempts < self.max_attempts and not finished:
# if we've reached the target popn, try to kill remaining processes nicely
if len(self.next_gen) >= self.population:
finished = True
# while there are still processes running, try to kill them with kill files
# that should end the job at the completion of the next CASTEP run
self._kill_all_gently(procs, newborns, queues)
# are we using all nodes? if not, start some processes
elif len(procs) < self.nprocs and len(self.next_gen) < self.population:
# generate structure
newborn = self.birth_new_structure()
newborn_id = len(newborns)
newborns.append(newborn)
# clear up and assess CPU resources
node = free_nodes.pop()
ncores = free_cores.pop()
# actually relax structure (or not, if testing is turned on)
# TODO: refactor to be more general
if self.ase_calculator:
from ilustrado.util import AseRelaxation
queues.append(mp.Queue())
relaxer = AseRelaxation(newborns[-1], queues[-1], calculator=self.ase_calculator)
else:
if self.testing:
from ilustrado.util import FakeComputeTask as ComputeTask
else:
from matador.compute import ComputeTask
queues.append(mp.Queue())
relaxer = ComputeTask(
ncores=ncores,
nnodes=None,
node=node,
res=newborns[-1],
param_dict=self.param_dict,
cell_dict=self.cell_dict,
verbosity=1,
killcheck=True,
reopt=False,
executable=self.executable,
output_queue=queues[-1],
start=False,
**self.relaxer_params
)
# store proc object with structure ID, node name, output queue and number of cores
procs.append(
NewbornProcess(
newborn_id,
node,
mp.Process(target=relaxer.relax),
ncores=ncores,
)
)
procs[-1].process.start()
LOG.info(
"Initialised relaxation for newborn {} on node {} with {} cores.".format(
", ".join(newborns[-1]["source"]), node, ncores
)
)
# are we using all nodes? if so, are they all still running?
elif (
all([proc.process.is_alive() for proc in procs])
and len(procs) == self.nprocs
):
# poll processes every second
time.sleep(1)
# so we were using all nodes, but some have died...
else:
LOG.debug("Suspected at least one dead node")
# then find the dead ones, collect their results and
# delete them so we're no longer using all nodes
found_node = False
for ind, proc in enumerate(procs):
if not proc.process.is_alive():
LOG.debug("Found dead node {}".format(proc.node))
try:
result = queues[ind].get(timeout=60)
except Exception:
result = False
LOG.warning(
"Node {} failed to write to queue for newborn {}".format(
proc.node,
", ".join(newborns[proc.newborn_id]["source"]),
)
)
if isinstance(result, dict):
self.scrape_result(result, proc=proc, newborns=newborns)
try:
procs[ind].process.join(timeout=10)
LOG.debug(
"Process {proc.newborn_id} on node {proc.node} died gracefully.".format(
proc=proc
)
)
except Exception:
LOG.warning(
"Process {proc.newborn_id} on node {proc.node} has not died gracefully.".format(
proc=proc
)
)
procs[ind].process.terminate()
LOG.warning(
"Process {proc.newborn_id} on node {proc.node} terminated forcefully.".format(
proc=proc
)
)
if result is not False:
free_nodes.append(proc.node)
free_cores.append(proc.ncores)
del procs[ind]
del queues[ind]
attempts += 1
found_node = True
break
# new_free_nodes, new_free_cores, found_node, extra_attempts = self._collect_from_nodes(
# procs, newborns, queues
# )
# attempts += extra_attempts
# if new_free_nodes:
# free_nodes.append(new_free_nodes)
# free_cores.append(new_free_cores)
if not found_node:
time.sleep(10)
break
except Exception as exc:
LOG.warning("Something has gone terribly wrong...")
LOG.error("Exception caught:", exc_info=True)
print_exc()
# clean up on error/interrupt
if len(procs) > 1:
self.kill_all(procs)
raise exc
LOG.info("No longer breeding structures in this generation.")
# clean up at end either way
if len(procs) > 1:
LOG.info(
"Trying to kill {} on {} processes.".format(self.executable, len(procs))
)
self.kill_all(procs)
if attempts >= self.max_attempts:
LOG.warning("Failed to return enough successful structures to continue...")
print(
"Failed to return enough successful structures to continue, exiting..."
)
exit()
def enforce_elitism(self):
""" Add elite structures from previous generations
to bourgeoisie of current generation, through the merit
of their ancestors alone.
"""
# add random elite structures from previous gen
if self.num_elite <= len(self.generations[-1].bourgeoisie):
probabilities = (
np.asarray([doc["fitness"] for doc in self.generations[-1].bourgeoisie])
+ 0.0001
)
probabilities /= np.sum(probabilities)
elites = deepcopy(
np.random.choice(
self.generations[-1].bourgeoisie,
self.num_elite,
replace=False,
p=probabilities,
)
)
else:
elites = deepcopy(self.generations[-1].bourgeoisie)
if self.debug:
for doc in elites:
print(
"Adding doc {} at {} eV/atom".format(
" ".join(doc["text_id"]), doc["hull_distance"]
)
)
self.next_gen.set_bourgeoisie(
elites=elites, best_from_stoich=self.best_from_stoich
)
LOG.info("Added elite structures from previous generation to next gen.")
LOG.info("New length of next gen: {}.".format(len(self.next_gen)))
LOG.info(
"New length of bourgeoisie: {}.".format(len(self.next_gen.bourgeoisie))
)
def reset_and_dump(self):
""" Add now complete generation to generation list, reset
the next_gen variable and write dump files.
"""
# copy next generation to list of generations
self.generations.append(copy(self.next_gen))
# reset next_gen ready for, well, the next gen
self.next_gen = None
assert self.generations[-1] is not None
LOG.info(
"Added current generation {} to generation list.".format(
len(self.generations) - 1
)
)
# remove interim dump file and create new ones for populace and bourgeoisie
self.generations[-1].dump(len(self.generations) - 1)
self.generations[-1].dump_bourgeoisie(len(self.generations) - 1)
if os.path.isfile("{}-gencurrent.json".format(self.run_hash)):
os.remove("{}-gencurrent.json".format(self.run_hash))
if os.path.isfile("{}-genunrelaxed.json".format(self.run_hash)):
os.remove("{}-genunrelaxed.json".format(self.run_hash))
LOG.info(
"Dumped generation file for generation {}".format(len(self.generations) - 1)
)
def birth_new_structure(self):
""" Generate a new structure from current settings.
Returns:
dict: newborn structure to be optimised
"""
possible_parents = (
self.generations[-1].populace
if len(self.generations) == 1
else self.generations[-1].bourgeoisie
)
newborn = adapt(
possible_parents,
self.mutation_rate,
self.crossover_rate,
mutations=self.mutations,
max_num_mutations=self.max_num_mutations,
max_num_atoms=self.max_num_atoms,
structure_filter=self.structure_filter,
minsep_dict=self.minsep_dict,
debug=self.debug,
)
newborn_source_id = len(self.next_gen)
if self.compute_mode == "direct":
while (
"{}-GA-{}-{}x{}".format(
self.seed, self.run_hash, len(self.generations), newborn_source_id
)
in self.used_sources
):
newborn_source_id += 1
self.used_sources.append(
"{}-GA-{}-{}x{}".format(
self.seed, self.run_hash, len(self.generations), newborn_source_id
)
)
newborn["source"] = [
"{}-GA-{}-{}x{}".format(
self.seed, self.run_hash, len(self.generations), newborn_source_id
)
]
LOG.info(
"Initialised newborn {} with mutations ({})".format(
", ".join(newborn["source"]), ", ".join(newborn["mutations"])
)
)
return newborn
def scrape_result(self, result, proc=None, newborns=None):
""" Check process for result and scrape into self.next_gen if successful,
with duplicate detection if desired. If the optional arguments are provided,
extra logging info will be found when running in `direct` mode.
Parameters:
result (dict): containing output from process
Keyword Arguments:
proc (tuple) : standard process tuple from above,
newborns (list): of new structures to append result to.
"""
if self.debug:
if proc is not None:
print(proc)
print(dumps(result, sort_keys=True))
if result.get("optimised"):
status = "Relaxed"
if proc is not None:
LOG.debug(
"Newborn {} successfully optimised".format(
", ".join(newborns[proc.newborn_id]["source"])
)
)
if result.get("parents") is None:
LOG.warning(
"Failed to get parents for newborn {}.".format(
", ".join(newborns[proc.newborn_id]["source"])
)
)
result["parents"] = newborns[proc.newborn_id]["parents"]
result["mutations"] = newborns[proc.newborn_id]["mutations"]
result = strip_useless(result)
dupe = False
if self.check_dupes:
dupe = self.is_newborn_dupe(result, extra_pdfs=self.extra_pdfs)
if dupe:
status = "Duplicate"
if proc is not None:
LOG.debug(
"Newborn {} is a duplicate and will not be included.".format(
", ".join(newborns[proc.newborn_id]["source"])
)
)
else:
LOG.debug(
"Newborn {} is a duplicate and will not be included.".format(
result["source"][0]
)
)
with open(self.run_hash + "-dupe.json", "a") as f:
dump(result, f, sort_keys=False, indent=2)
if not dupe:
self.next_gen.birth(result)
if proc is not None:
LOG.info(
"Newborn {} added to next generation.".format(
", ".join(newborns[proc.newborn_id]["source"])
)
)
else:
LOG.info(
"Newborn {} added to next generation.".format(
result["source"][0]
)
)
LOG.info("Current generation size: {}".format(len(self.next_gen)))
self.next_gen.dump("current")
LOG.debug("Dumping json file for interim generation...")
else:
status = "Failed"
result = strip_useless(result)
with open(self.run_hash + "-failed.json", "a") as f:
dump(result, f, sort_keys=False, indent=2)
print(
"{:^25} {:^10} {:^10} {:^10} {:^30}".format(
result["source"][0],
get_formula_from_stoich(result["stoichiometry"]),
result["num_atoms"],
status,
", ".join(result["mutations"]),
)
)
def kill_all(self, procs):
""" Loop over processes and kill them all.
Parameters:
procs (list): list of :obj:`NewbornProcess` in form documented above.
"""
for proc in procs:
if self.nodes is not None:
sp.run(
["ssh", proc.node, "pkill {}".format(self.executable)],
timeout=15,
stdout=sp.DEVNULL,
shell=False,
)
proc.process.terminate()
def recover(self):
""" Attempt to recover previous generations from files in cwd
named '<run_hash>_gen{}.json'.format(gen_idx).
"""
if not os.path.isfile(("{}-gen0.json").format(self.run_hash)):
exit("Failed to load run, files missing for {}".format(self.run_hash))
if (
os.path.isfile(("{}-gencurrent.json").format(self.run_hash))
and self.compute_mode != "slurm"
):
incomplete = True
LOG.info("Found incomplete generation for {}".format(self.run_hash))
else:
incomplete = False
try:
i = 0
while os.path.isfile("{}-gen{}.json".format(self.run_hash, i)):
LOG.info(
"Trying to load generation {} from run {}.".format(i, self.run_hash)
)
fname = "{}-gen{}.json".format(self.run_hash, i)
self.generations.append(
Generation(
self.run_hash,
i,
self.num_survivors,
self.num_accepted,
dumpfile=fname,
fitness_calculator=None,
)
)
LOG.info(
"Successfully loaded {} structures into generation {} from run {}.".format(
len(self.generations[-1]), i, self.run_hash
)
)
i += 1
print("Recovered from run {}".format(self.run_hash))
LOG.info("Successfully loaded run {}.".format(self.run_hash))
except Exception:
print_exc()
LOG.error(
"Something went wrong when reloading run {}".format(self.run_hash)
)
exit("Something went wrong when reloading run {}".format(self.run_hash))
if not self.generations:
raise SystemExit("No generations found!")
for i, _ in enumerate(self.generations):
if not self.testing:
if i != 0:
removed = self.generations[i].clean()
LOG.info(
"Removed {} structures from generation {}".format(removed, i)
)
if i == len(self.generations) - 1 and len(self.generations) > 1:
if self.num_elite <= len(self.generations[-2].bourgeoisie):
# generate elites with probability proportional to their fitness, but ensure every p is non-zero
probabilities = (
np.asarray(
[doc["fitness"] for doc in self.generations[-2].bourgeoisie]
)
+ 0.0001
)
probabilities /= np.sum(probabilities)
elites = deepcopy(
np.random.choice(
self.generations[-2].bourgeoisie,
self.num_elite,
replace=False,
p=probabilities,
)
)
else:
elites = deepcopy(self.generations[-2].bourgeoisie)
self.generations[i].set_bourgeoisie(
best_from_stoich=self.best_from_stoich, elites=elites
)
else:
bourge_fname = "{}-gen{}-bourgeoisie.json".format(self.run_hash, i)
if os.path.isfile(bourge_fname):
self.generations[i].load_bourgeoisie(bourge_fname)
else:
self.generations[i].set_bourgeoisie(
best_from_stoich=self.best_from_stoich
)
LOG.info(
"Bourgeoisie contains {} structures: generation {}".format(
len(self.generations[i].bourgeoisie), i
)
)
assert len(self.generations[i]) >= 1
assert len(self.generations[i].bourgeoisie) >= 1
if incomplete:
LOG.info(
"Trying to load incomplete generation from run {}.".format(
self.run_hash
)
)
fname = "{}-gen{}.json".format(self.run_hash, "current")
self.next_gen = Generation(
self.run_hash,
len(self.generations),
self.num_survivors,
self.num_accepted,
dumpfile=fname,
fitness_calculator=self.fitness_calculator,
)
LOG.info(
"Successfully loaded {} structures into current generation ({}) from run {}.".format(
len(self.next_gen), len(self.generations), self.run_hash
)
)
assert len(self.next_gen) >= 1
def seed_generation_0(self, gene_pool):
""" Set up first generation from gene pool.
Parameters:
gene_pool (list(dict)): list of structure with which to seed generation.
"""
self.gene_pool = gene_pool
for ind, parent in enumerate(self.gene_pool):
if "_id" in parent:
del self.gene_pool[ind]["_id"]
# check gene pool is sensible
errors = []
if not isinstance(self.gene_pool, list):
errors.append("Initial gene pool not a list: {}".format(self.gene_pool))
if not len(self.gene_pool) >= 1:
errors.append(
"Initial gene pool not long enough: {}".format(self.gene_pool)
)
if errors:
raise SystemExit("Initial genee pool is not sensible: \n".join(errors))
generation = Generation(
self.run_hash,
0,
len(gene_pool),
len(gene_pool),
fitness_calculator=self.fitness_calculator,
populace=self.gene_pool,
)
generation.rank()
generation.set_bourgeoisie(best_from_stoich=False)
LOG.info(
"Successfully initialised generation 0 with {} members".format(
len(generation)
)
)
generation.dump(0)
generation.dump_bourgeoisie(0)
print(generation)
self.generations.append(generation)
def is_newborn_dupe(self, newborn, extra_pdfs=None):
""" Check each generation for a duplicate structure to the current newborn,
using PDF calculator from matador.
Parameters:
newborn (dict): new structure to screen against the existing,
Keyword Arguments:
extra_pdfs (list(dict)): any extra PDFs to compare to, e.g. other hull structures
not used to seed any generation
Returns:
bool: True if duplicate, else False.
"""
for ind, gen in enumerate(self.generations):
if ind == 0:
if gen.is_dupe(newborn, extra_pdfs=extra_pdfs):
return True
else:
if gen.is_dupe(newborn):
return True
return False
def finalise_files_for_export(self):
""" Move unique structures from gen1 onwards to folder "<run_hash>-results". """
path = "{}-results".format(self.run_hash)
os.makedirs(path.format(self.run_hash), exist_ok=True)
LOG.info("Moving unique files to {}-results/...".format(self.run_hash))
cursor = [struc for gen in self.generations[1:] for struc in gen]
uniq_inds, _, _, _, = get_uniq_cursor(cursor, projected=True)
cursor = [cursor[ind] for ind in uniq_inds]
for doc in cursor:
source = get_root_source(doc)
if not source:
LOG.warning("Issue writing {}".format(doc["source"]))
continue
else:
doc2res(
doc, "{}/{}".format(path, source), overwrite=False, hash_dupe=False
)
if os.path.isfile("completed/{}".format(source.replace(".res", ".castep"))):
shutil.copy(
"completed/{}".format(source.replace(".res", ".castep")),
"{}/{}".format(path, source.replace(".res", ".castep")),
)
def _kill_all_gently(self, procs, newborns, queues):
""" Kill all running processes.
Parameters:
procs (list): list of `:obj:NewbornProcess` objects.
newborns (list): list of corresponding structures.
queues (list): list of queues that were collecting results.
"""
kill_attempts = 0
while procs and kill_attempts < 5:
for ind, proc in enumerate(procs):
# create kill file so that matador will stop next finished CASTEP
filename = "{}.kill".format(newborns[proc.newborn_id]["source"][0])
with open(filename, "w"):
pass
# wait 1 minute for CASTEP run
if proc.process.join(timeout=60) is not None:
result = queues[ind].get(timeout=60)
if isinstance(result, dict):
self.scrape_result(result, proc=proc, newborns=newborns)
del procs[ind]
kill_attempts += 1
if kill_attempts >= 5:
for ind, proc in enumerate(procs):
proc.process.terminate()
del procs[ind]
|
client.py
|
import asyncio
import logging
import sys
import time
from threading import Thread, Event
from typing import Union, List, Tuple
from asyncio import Transport, Protocol
from bs4 import BeautifulSoup
import kik_unofficial.callbacks as callbacks
import kik_unofficial.datatypes.exceptions as exceptions
import kik_unofficial.datatypes.xmpp.chatting as chatting
import kik_unofficial.datatypes.xmpp.group_adminship as group_adminship
import kik_unofficial.datatypes.xmpp.login as login
import kik_unofficial.datatypes.xmpp.roster as roster
import kik_unofficial.datatypes.xmpp.sign_up as sign_up
import kik_unofficial.xmlns_handlers as xmlns_handlers
from kik_unofficial.datatypes.xmpp.auth_stanza import AuthStanza
from kik_unofficial.datatypes.xmpp import account, xiphias
from kik_unofficial.utilities.threading_utils import run_in_new_thread
from kik_unofficial.datatypes.xmpp.base_elements import XMPPElement
from kik_unofficial.http import profile_pictures, content
HOST, PORT = "talk1110an.kik.com", 5223
log = logging.getLogger('kik_unofficial')
class KikClient:
"""
The main kik class with which you're managing a kik connection and sending commands
"""
def __init__(self, callback: callbacks.KikClientCallback, kik_username, kik_password,
kik_node=None, device_id_override=None, android_id_override=None):
"""
Initializes a connection to Kik servers.
If you want to automatically login too, use the username and password parameters.
:param callback: a callback instance containing your callbacks implementation.
This way you'll get notified whenever certain event happen.
Look at the KikClientCallback class for more details
:param kik_username: the kik username or email to log in with.
:param kik_password: the kik password to log in with.
:param kik_node: the username plus 3 letters after the "_" and before the "@" in the JID. If you know it,
authentication will happen faster and without a login. otherwise supply None.
"""
self.username = kik_username
self.password = kik_password
self.kik_node = kik_node
self.kik_email = None
self.device_id_override = device_id_override
self.android_id_override = android_id_override
self.callback = callback
self.authenticator = AuthStanza(self)
self.connected = False
self.authenticated = False
self.connection = None
self.is_expecting_connection_reset = False
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self._known_users_information = set()
self._new_user_added_event = Event()
self.should_login_on_connection = kik_username is not None and kik_password is not None
self._connect()
def _connect(self):
"""
Runs the kik connection thread, which creates an encrypted (SSL based) TCP connection
to the kik servers.
"""
self.kik_connection_thread = Thread(target=self._kik_connection_thread_function, name="Kik Connection")
self.kik_connection_thread.start()
def _on_connection_made(self):
"""
Gets called when the TCP connection to kik's servers is done and we are connected.
Now we might initiate a login request or an auth request.
"""
if self.username is not None and self.password is not None and self.kik_node is not None:
# we have all required credentials, we can authenticate
log.info("[+] Establishing authenticated connection using kik node '{}'...".format(self.kik_node))
message = login.EstablishAuthenticatedSessionRequest(self.kik_node, self.username, self.password, self.device_id_override)
self.initial_connection_payload = message.serialize()
else:
message = login.MakeAnonymousStreamInitTag(self.device_id_override, n = 1)
self.initial_connection_payload = message.serialize()
self.connection.send_raw_data(self.initial_connection_payload)
def _establish_authenticated_session(self, kik_node):
"""
Updates the kik node and creates a new connection to kik servers.
This new connection will be initiated with another payload which proves
we have the credentials for a specific user. This is how authentication is done.
:param kik_node: The user's kik node (everything before '@' in JID).
"""
self.kik_node = kik_node
log.info("[+] Closing current connection and creating a new authenticated one.")
self.disconnect()
self._connect()
def login(self, username, password, captcha_result=None):
"""
Sends a login request with the given kik username and password
:param username: Your kik username or email
:param password: Your kik password
:param captcha_result: If this parameter is provided, it is the answer to the captcha given in the previous
login attempt.
"""
self.username = username
self.password = password
login_request = login.LoginRequest(username, password, captcha_result, self.device_id_override, self.android_id_override)
login_type = "email" if '@' in self.username else "username"
log.info("[+] Logging in with {} '{}' and a given password {}..."
.format(login_type, username, '*' * len(password)))
return self._send_xmpp_element(login_request)
def register(self, email, username, password, first_name, last_name, birthday="1974-11-20", captcha_result=None):
"""
Sends a register request to sign up a new user to kik with the given details.
"""
self.username = username
self.password = password
register_message = sign_up.RegisterRequest(email, username, password, first_name, last_name, birthday, captcha_result,
self.device_id_override, self.android_id_override)
log.info("[+] Sending sign up request (name: {} {}, email: {})...".format(first_name, last_name, email))
return self._send_xmpp_element(register_message)
def request_roster(self, is_big=True, timestamp=None):
"""
Requests the list of chat partners (people and groups). This is called roster in XMPP terms.
"""
log.info("[+] Requesting roster (list of chat partners)...")
return self._send_xmpp_element(roster.FetchRosterRequest(is_big=is_big, timestamp=timestamp))
# -------------------------------
# Common Messaging Operations
# -------------------------------
def send_chat_message(self, peer_jid: str, message: str, bot_mention_jid=None):
"""
Sends a text chat message to another person or a group with the given JID/username.
:param peer_jid: The Jabber ID for which to send the message (looks like username_ejs@talk.kik.com)
If you don't know the JID of someone, you can also specify a kik username here.
:param message: The actual message body
:param bot_mention_jid: If an official bot is referenced, their jid must be embedded as mention for them
to respond.
"""
peer_jid = self.get_jid(peer_jid)
if self.is_group_jid(peer_jid):
log.info("[+] Sending chat message '{}' to group '{}'...".format(message, peer_jid))
return self._send_xmpp_element(chatting.OutgoingGroupChatMessage(peer_jid, message, bot_mention_jid))
else:
log.info("[+] Sending chat message '{}' to user '{}'...".format(message, peer_jid))
return self._send_xmpp_element(chatting.OutgoingChatMessage(peer_jid, message, False, bot_mention_jid))
def send_chat_image(self, peer_jid: str, file, forward=True):
"""
Sends an image chat message to another person or a group with the given JID/username.
:param peer_jid: The Jabber ID for which to send the message (looks like username_ejs@talk.kik.com)
If you don't know the JID of someone, you can also specify a kik username here.
:param file: The path to the image file OR its bytes OR an IOBase object to send.
"""
peer_jid = self.get_jid(peer_jid)
if self.is_group_jid(peer_jid):
log.info("[+] Sending chat image to group '{}'...".format(peer_jid))
imageRequest = chatting.OutgoingGroupChatImage(peer_jid, file, forward)
else:
log.info("[+] Sending chat image to user '{}'...".format(peer_jid))
imageRequest = chatting.OutgoingChatImage(peer_jid, file, False, forward)
content.upload_gallery_image(imageRequest, self.kik_node + '@talk.kik.com', self.username, self.password)
return self._send_xmpp_element(imageRequest)
def send_read_receipt(self, peer_jid: str, receipt_message_id: str, group_jid=None):
"""
Sends a read receipt for a previously sent message, to a specific user or group.
:param peer_jid: The JID of the user to which to send the receipt.
:param receipt_message_id: The message ID that the receipt is sent for
:param group_jid If the receipt is sent for a message that was sent in a group,
this parameter should contain the group's JID
"""
log.info("[+] Sending read receipt to JID {} for message ID {}".format(peer_jid, receipt_message_id))
return self._send_xmpp_element(chatting.OutgoingReadReceipt(peer_jid, receipt_message_id, group_jid))
def send_delivered_receipt(self, peer_jid: str, receipt_message_id: str, group_jid: str = None):
"""
Sends a receipt indicating that a specific message was received, to another person.
:param peer_jid: The other peer's JID to send to receipt to
:param receipt_message_id: The message ID for which to generate the receipt
:param group_jid: The group's JID, in case the receipt is sent in a group (None otherwise)
"""
log.info("[+] Sending delivered receipt to JID {} for message ID {}".format(peer_jid, receipt_message_id))
return self._send_xmpp_element(chatting.OutgoingDeliveredReceipt(peer_jid, receipt_message_id, group_jid))
def send_is_typing(self, peer_jid: str, is_typing: bool):
"""
Updates the 'is typing' status of the bot during a conversation.
:param peer_jid: The JID that the notification will be sent to
:param is_typing: If true, indicates that we're currently typing, or False otherwise.
"""
if self.is_group_jid(peer_jid):
return self._send_xmpp_element(chatting.OutgoingGroupIsTypingEvent(peer_jid, is_typing))
else:
return self._send_xmpp_element(chatting.OutgoingIsTypingEvent(peer_jid, is_typing))
def send_gif_image(self, peer_jid: str, search_term):
"""
Sends a GIF image to another person or a group with the given JID/username.
The GIF is taken from tendor.com, based on search keywords.
:param peer_jid: The Jabber ID for which to send the message (looks like username_ejs@talk.kik.com
:param search_term: The search term to use when searching GIF images on tendor.com
"""
if self.is_group_jid(peer_jid):
log.info("[+] Sending a GIF message to group '{}'...".format(peer_jid))
return self._send_xmpp_element(chatting.OutgoingGIFMessage(peer_jid, search_term, True))
else:
log.info("[+] Sending a GIF message to user '{}'...".format(peer_jid))
return self._send_xmpp_element(chatting.OutgoingGIFMessage(peer_jid, search_term, False))
def request_info_of_users(self, peer_jids: Union[str, List[str]]):
"""
Requests basic information (username, JID, display name, picture) of some users.
When the information arrives, the callback on_peer_info_received() will fire.
:param peer_jids: The JID(s) or the username(s) for which to request the information.
If you want to request information for more than one user, supply a list of strings.
Otherwise, supply a string
"""
return self._send_xmpp_element(roster.QueryUsersInfoRequest(peer_jids))
def add_friend(self, peer_jid):
return self._send_xmpp_element(roster.AddFriendRequest(peer_jid))
def remove_friend(self, peer_jid):
return self._send_xmpp_element(roster.RemoveFriendRequest(peer_jid))
def send_link(self, peer_jid, link, title, text='', app_name='Webpage'):
return self._send_xmpp_element(chatting.OutgoingLinkShareEvent(peer_jid, link, title, text, app_name))
def xiphias_get_users(self, peer_jids: Union[str, List[str]]):
"""
Calls the new format xiphias message to request user data such as profile creation date
and background picture URL.
:param peer_jids: one jid, or a list of jids
"""
return self._send_xmpp_element(xiphias.UsersRequest(peer_jids))
def xiphias_get_users_by_alias(self, alias_jids: Union[str, List[str]]):
"""
Like xiphias_get_users, but for aliases instead of jids.
:param alias_jids: one jid, or a list of jids
"""
return self._send_xmpp_element(xiphias.UsersByAliasRequest(alias_jids))
# --------------------------
# Group Admin Operations
# -------------------------
def change_group_name(self, group_jid: str, new_name: str):
"""
Changes the a group's name to something new
:param group_jid: The JID of the group whose name should be changed
:param new_name: The new name to give to the group
"""
log.info("[+] Requesting a group name change for JID {} to '{}'".format(group_jid, new_name))
return self._send_xmpp_element(group_adminship.ChangeGroupNameRequest(group_jid, new_name))
def add_peer_to_group(self, group_jid, peer_jid):
"""
Adds someone to a group
:param group_jid: The JID of the group into which to add a user
:param peer_jid: The JID of the user to add
"""
log.info("[+] Requesting to add user {} into the group {}".format(peer_jid, group_jid))
return self._send_xmpp_element(group_adminship.AddToGroupRequest(group_jid, peer_jid))
def remove_peer_from_group(self, group_jid, peer_jid):
"""
Kicks someone out of a group
:param group_jid: The group JID from which to remove the user
:param peer_jid: The JID of the user to remove
"""
log.info("[+] Requesting removal of user {} from group {}".format(peer_jid, group_jid))
return self._send_xmpp_element(group_adminship.RemoveFromGroupRequest(group_jid, peer_jid))
def ban_member_from_group(self, group_jid, peer_jid):
"""
Bans a member from the group
:param group_jid: The JID of the relevant group
:param peer_jid: The JID of the user to ban
"""
log.info("[+] Requesting ban of user {} from group {}".format(peer_jid, group_jid))
return self._send_xmpp_element(group_adminship.BanMemberRequest(group_jid, peer_jid))
def unban_member_from_group(self, group_jid, peer_jid):
"""
Undos a ban of someone from a group
:param group_jid: The JID of the relevant group
:param peer_jid: The JID of the user to un-ban from the gorup
"""
log.info("[+] Requesting un-banning of user {} from the group {}".format(peer_jid, group_jid))
return self._send_xmpp_element(group_adminship.UnbanRequest(group_jid, peer_jid))
def join_group_with_token(self, group_hashtag, group_jid, join_token):
"""
Tries to join into a specific group, using a cryptographic token that was received earlier from a search
:param group_hashtag: The public hashtag of the group into which to join (like '#Music')
:param group_jid: The JID of the same group
:param join_token: a token that can be extracted in the callback on_group_search_response, after calling
search_group()
"""
log.info("[+] Trying to join the group '{}' with JID {}".format(group_hashtag, group_jid))
return self._send_xmpp_element(roster.GroupJoinRequest(group_hashtag, join_token, group_jid))
def leave_group(self, group_jid):
"""
Leaves a specific group
:param group_jid: The JID of the group to leave
"""
log.info("[+] Leaving group {}".format(group_jid))
return self._send_xmpp_element(group_adminship.LeaveGroupRequest(group_jid))
def promote_to_admin(self, group_jid, peer_jid):
"""
Turns some group member into an admin
:param group_jid: The group JID for which the member will become an admin
:param peer_jid: The JID of user to turn into an admin
"""
log.info("[+] Promoting user {} to admin in group {}".format(peer_jid, group_jid))
return self._send_xmpp_element(group_adminship.PromoteToAdminRequest(group_jid, peer_jid))
def demote_admin(self, group_jid, peer_jid):
"""
Turns an admin of a group into a regular user with no amidships capabilities.
:param group_jid: The group JID in which the rights apply
:param peer_jid: The admin user to demote
:return:
"""
log.info("[+] Demoting user {} to a regular member in group {}".format(peer_jid, group_jid))
return self._send_xmpp_element(group_adminship.DemoteAdminRequest(group_jid, peer_jid))
def add_members(self, group_jid, peer_jids: Union[str, List[str]]):
"""
Adds multiple users to a specific group at once
:param group_jid: The group into which to join the users
:param peer_jids: a list (or a single string) of JIDs to add to the group
"""
log.info("[+] Adding some members to the group {}".format(group_jid))
return self._send_xmpp_element(group_adminship.AddMembersRequest(group_jid, peer_jids))
# ----------------------
# Other Operations
# ----------------------
def search_group(self, search_query):
"""
Searches for public groups using a query
Results will be returned using the on_group_search_response() callback
:param search_query: The query that contains some of the desired groups' name.
"""
log.info("[+] Initiating a search for groups using the query '{}'".format(search_query))
return self._send_xmpp_element(roster.GroupSearchRequest(search_query))
def check_username_uniqueness(self, username):
"""
Checks if the given username is available for registration.
Results are returned in the on_username_uniqueness_received() callback
:param username: The username to check for its existence
"""
log.info("[+] Checking for Uniqueness of username '{}'".format(username))
return self._send_xmpp_element(sign_up.CheckUsernameUniquenessRequest(username))
def set_profile_picture(self, filename):
"""
Sets the profile picture of the current user
:param filename: The path to the file OR its bytes OR an IOBase object to set
"""
log.info("[+] Setting the profile picture to file '{}'".format(filename))
profile_pictures.set_profile_picture(filename, self.kik_node + '@talk.kik.com', self.username, self.password)
def set_background_picture(self, filename):
"""
Sets the background picture of the current user
:param filename: The path to the image file OR its bytes OR an IOBase object to set
"""
log.info("[+] Setting the background picture to filename '{}'".format(filename))
profile_pictures.set_background_picture(filename, self.kik_node + '@talk.kik.com', self.username, self.password)
def send_captcha_result(self, stc_id, captcha_result):
"""
In case a captcha was encountered, solves it using an element ID and a response parameter.
The stc_id can be extracted from a CaptchaElement, and the captcha result needs to be extracted manually with
a browser. Please see solve_captcha_wizard() for the steps needed to solve the captcha
:param stc_id: The stc_id from the CaptchaElement that was encountered
:param captcha_result: The answer to the captcha (which was generated after solved by a human)
"""
log.info("[+] Trying to solve a captcha with result: '{}'".format(captcha_result))
return self._send_xmpp_element(login.CaptchaSolveRequest(stc_id, captcha_result))
def change_display_name(self, first_name, last_name):
"""
Changes the display name
:param first_name: The first name
:param last_name: The last name
"""
log.info("[+] Changing the display name to '{} {}'".format(first_name, last_name))
return self._send_xmpp_element(account.ChangeNameRequest(first_name, last_name))
def change_password(self, new_password, email):
"""
Changes the login password
:param new_password: The new login password to set for the account
:param email: The current email of the account
"""
log.info("[+] Changing the password of the account")
return self._send_xmpp_element(account.ChangePasswordRequest(self.password, new_password, email, self.username))
def change_email(self, old_email, new_email):
"""
Changes the email of the current account
:param old_email: The current email
:param new_email: The new email to set
"""
log.info("[+] Changing account email to '{}'".format(new_email))
return self._send_xmpp_element(account.ChangeEmailRequest(self.password, old_email, new_email))
def disconnect(self):
"""
Closes the connection to kik's servers.
"""
log.info("[!] Disconnecting.")
self.connection.close()
self.is_expecting_connection_reset = True
# self.loop.call_soon(self.loop.stop)
# -----------------
# Internal methods
# -----------------
def _send_xmpp_element(self, message: XMPPElement):
"""
Serializes and sends the given XMPP element to kik servers
:param xmpp_element: The XMPP element to send
:return: The UUID of the element that was sent
"""
while not self.connected:
log.debug("[!] Waiting for connection.")
time.sleep(0.1)
if type(message.serialize()) is list:
log.debug("[!] Sending multi packet data.")
packets = message.serialize()
for p in packets:
self.loop.call_soon_threadsafe(self.connection.send_raw_data, p)
return message.message_id
else:
self.loop.call_soon_threadsafe(self.connection.send_raw_data, message.serialize())
return message.message_id
@run_in_new_thread
def _on_new_data_received(self, data: bytes):
"""
Gets called whenever we get a whole new XML element from kik's servers.
:param data: The data received (bytes)
"""
if data == b' ':
# Happens every half hour. Disconnect after 10th time. Some kind of keep-alive? Let's send it back.
self.loop.call_soon_threadsafe(self.connection.send_raw_data, b' ')
return
xml_element = BeautifulSoup(data.decode('utf-8'), features='xml')
xml_element = next(iter(xml_element)) if len(xml_element) > 0 else xml_element
# choose the handler based on the XML tag name
if xml_element.name == "k":
self._handle_received_k_element(xml_element)
if xml_element.name == "iq":
self._handle_received_iq_element(xml_element)
elif xml_element.name == "message":
self._handle_xmpp_message(xml_element)
elif xml_element.name == 'stc':
self.callback.on_captcha_received(login.CaptchaElement(xml_element))
def _handle_received_k_element(self, k_element: BeautifulSoup):
"""
The 'k' element appears to be kik's connection-related stanza.
It lets us know if a connection or a login was successful or not.
:param k_element: The XML element we just received from kik.
"""
if k_element['ok'] == "1":
self.connected = True
if 'ts' in k_element.attrs:
# authenticated!
log.info("[+] Authenticated successfully.")
self.authenticated = True
self.authenticator.send_stanza()
self.callback.on_authenticated()
elif self.should_login_on_connection:
self.login(self.username, self.password)
self.should_login_on_connection = False
else:
self.callback.on_connection_failed(login.ConnectionFailedResponse(k_element))
def _handle_received_iq_element(self, iq_element: BeautifulSoup):
"""
The 'iq' (info/query) stanzas in XMPP represents the request/ response elements.
We send an iq stanza to request for information, and we receive an iq stanza in response to this request,
with the same ID attached to it.
For a great explanation of this stanza: http://slixmpp.readthedocs.io/api/stanza/iq.html
:param iq_element: The iq XML element we just received from kik.
"""
if iq_element.error and "bad-request" in dir(iq_element.error):
raise Exception("Received a Bad Request error for stanza with ID {}".format(iq_element.attrs['id']))
query = iq_element.query
xml_namespace = query['xmlns'] if 'xmlns' in query.attrs else query['xmlns:']
self._handle_response(xml_namespace, iq_element)
def _handle_response(self, xmlns, iq_element):
"""
Handles a response that we receive from kik after our initiated request.
Examples: response to a group search, response to fetching roster, etc.
:param xmlns: The XML namespace that helps us understand what type of response this is
:param iq_element: The actual XML element that contains the response
"""
if xmlns == 'kik:iq:check-unique':
xmlns_handlers.CheckUsernameUniqueResponseHandler(self.callback, self).handle(iq_element)
elif xmlns == 'jabber:iq:register':
xmlns_handlers.RegisterOrLoginResponseHandler(self.callback, self).handle(iq_element)
elif xmlns == 'jabber:iq:roster':
xmlns_handlers.RosterResponseHandler(self.callback, self).handle(iq_element)
elif xmlns == 'kik:iq:friend' or xmlns == 'kik:iq:friend:batch':
xmlns_handlers.PeersInfoResponseHandler(self.callback, self).handle(iq_element)
elif xmlns == 'kik:iq:xiphias:bridge':
xmlns_handlers.XiphiasHandler(self.callback, self).handle(iq_element)
elif xmlns == 'kik:auth:cert':
self.authenticator.handle(iq_element)
def _handle_xmpp_message(self, xmpp_message: BeautifulSoup):
"""
an XMPP 'message' in the case of Kik is the actual stanza we receive when someone sends us a message
(weather groupchat or not), starts typing, stops typing, reads our message, etc.
Examples: http://slixmpp.readthedocs.io/api/stanza/message.html
:param xmpp_message: The XMPP 'message' element we received
"""
self._handle_kik_event(xmpp_message)
def _handle_kik_event(self, xmpp_element):
"""
Handles kik "push" events, like a new message that arrives.
:param xmpp_element: The XML element that we received with the information about the event
"""
if "xmlns" in xmpp_element.attrs:
# The XML namespace is different for iOS and Android, handle the messages with their actual type
if xmpp_element['type'] == "chat":
xmlns_handlers.XMPPMessageHandler(self.callback, self).handle(xmpp_element)
elif xmpp_element['type'] == "groupchat":
xmlns_handlers.GroupXMPPMessageHandler(self.callback, self).handle(xmpp_element)
elif xmpp_element['type'] == "receipt":
if xmpp_element.g:
self.callback.on_group_receipts_received(chatting.IncomingGroupReceiptsEvent(xmpp_element))
else:
xmlns_handlers.XMPPMessageHandler(self.callback, self).handle(xmpp_element)
else:
# iPads send messages without xmlns, try to handle it as jabber:client
xmlns_handlers.XMPPMessageHandler(self.callback, self).handle(xmpp_element)
def _on_connection_lost(self):
"""
Gets called when the connection to kik's servers is (unexpectedly) lost.
It could be that we received a connection reset packet for example.
:return:
"""
self.connected = False
if not self.is_expecting_connection_reset:
log.info("[-] The connection was unexpectedly lost")
self.is_expecting_connection_reset = False
def _kik_connection_thread_function(self):
"""
The Kik Connection thread main function.
Initiates the asyncio loop and actually connects.
"""
# If there is already a connection going, than wait for it to stop
if self.loop and self.loop.is_running():
self.loop.call_soon_threadsafe(self.connection.close)
log.debug("[!] Waiting for the previous connection to stop.")
while self.loop.is_running():
log.debug("[!] Still Waiting for the previous connection to stop.")
time.sleep(1)
log.info("[+] Initiating the Kik Connection thread and connecting to kik server...")
# create the connection and launch the asyncio loop
self.connection = KikConnection(self.loop, self)
connection_coroutine = self.loop.create_connection(lambda: self.connection, HOST, PORT, ssl=True)
self.loop.run_until_complete(connection_coroutine)
log.debug("[!] Running main loop")
self.loop.run_forever()
log.debug("[!] Main loop ended.")
self.callback.on_disconnected()
def get_jid(self, username_or_jid):
if '@' in username_or_jid:
# this is already a JID.
return username_or_jid
else:
username = username_or_jid
# first search if we already have it
if self.get_jid_from_cache(username) is None:
# go request for it
self._new_user_added_event.clear()
self.request_info_of_users(username)
if not self._new_user_added_event.wait(5.0):
raise TimeoutError("Could not get the JID for username {} in time".format(username))
return self.get_jid_from_cache(username)
def get_jid_from_cache(self, username):
for user in self._known_users_information:
if user.username.lower() == username.lower():
return user.jid
return None
@staticmethod
def log_format():
return '[%(asctime)-15s] %(levelname)-6s (thread %(threadName)-10s): %(message)s'
@staticmethod
def is_group_jid(jid):
if '@talk.kik.com' in jid:
return False
elif '@groups.kik.com' in jid:
return True
else:
raise exceptions.KikApiException('Not a valid jid')
class KikConnection(Protocol):
def __init__(self, loop, api: KikClient):
self.api = api
self.loop = loop
self.partial_data = None # type: bytes
self.partial_data_start_tag = None # type: str
self.transport = None # type: Transport
def connection_made(self, transport: Transport):
self.transport = transport
log.info("[!] Connected.")
self.api._on_connection_made()
def data_received(self, data: bytes):
log.debug("[+] Received raw data: %s", data)
if self.partial_data is None:
if len(data) < 16384:
self.loop.call_soon_threadsafe(self.api._on_new_data_received, data)
else:
log.debug("Multi-packet data, waiting for next packet.")
start_tag, is_closing = self.parse_start_tag(data)
self.partial_data_start_tag = start_tag
self.partial_data = data
else:
if self.ends_with_tag(self.partial_data_start_tag, data):
self.loop.call_soon_threadsafe(self.api._on_new_data_received, self.partial_data + data)
self.partial_data = None
self.partial_data_start_tag = None
else:
log.debug("[!] Waiting for another packet, size={}".format(len(self.partial_data)))
self.partial_data += data
@staticmethod
def parse_start_tag(data: bytes) -> Tuple[bytes, bool]:
tag = data.lstrip(b'<')
tag = tag.split(b'>')[0]
tag = tag.split(b' ')[0]
is_closing = tag.endswith(b'/')
if is_closing:
tag = tag[:-1]
return tag, is_closing
@staticmethod
def ends_with_tag(expected_end_tag: bytes, data: bytes):
return data.endswith(b'</' + expected_end_tag + b'>')
def connection_lost(self, exception):
self.loop.call_soon_threadsafe(self.api._on_connection_lost)
self.loop.stop()
def send_raw_data(self, data: bytes):
log.debug("[+] Sending raw data: %s", data)
self.transport.write(data)
def close(self):
if self.transport:
self.transport.write(b'</k>')
|
main.py
|
from tkinter import Tk, Button, filedialog, messagebox, font
from threading import Thread
from utils import Recorder, BarWindow, display_img, ICON
# Animation for when rec_button is pressed
def press_rec(self):
if recorder.is_recording:
rec_btn.configure(image=stop_pressed)
else:
rec_btn.configure(image=mic_pressed)
# starts/stops recording and creates video file
def release_rec(self):
if recorder.is_recording:
rec_btn.configure(image=mic)
progress_bar = BarWindow()
Thread(target=recorder.stop_rec, args=(progress_bar,)).start()
progress_bar.pg_bar()
else:
recorder.start_rec()
rec_btn.configure(image=stop)
# Open selected file and converts into video
def create_from_audio():
root.filename = filedialog.askopenfilename(
title="Select a file",
filetypes=(
("MP3", "*.mp3"),
("All", "*.*"),
),
)
if root.filename != "":
progress_bar = BarWindow()
try:
Thread(
target=recorder.create_from_audio, args=(root.filename, progress_bar)
).start()
progress_bar.pg_bar()
except Exception as error:
messagebox.showerror("Error", error)
root = Tk()
root.configure(width=800, height=550, bg="black")
root.minsize(width=200, height=200)
root.title("Audio Converter")
root.iconbitmap(ICON)
recorder = Recorder()
# rec_btn images
mic = display_img("images/mic.png")
mic_pressed = display_img("images/mic_pressed.png")
stop = display_img("images/stop.png")
stop_pressed = display_img("images/stop_pressed.png")
rec_btn = Button(
image=mic, borderwidth=0, relief="sunken", bg="black", activebackground="black"
)
rec_btn.bind("<ButtonPress>", press_rec)
rec_btn.bind("<ButtonRelease>", release_rec)
convert_btn = Button(
text="♫ → 📷",
command=create_from_audio,
relief="flat",
fg="white",
bg="black",
activebackground="gray",
)
convert_btn["font"] = font.Font(size=15)
if __name__ == "__main__":
rec_btn.place(relx=0.5, rely=0.5, anchor="center")
convert_btn.place(relx=1.0, y=0, anchor="ne")
root.mainloop()
|
test_writer.py
|
import os
import socket
import tempfile
import threading
import time
import mock
import msgpack
import pytest
from six.moves import BaseHTTPServer
from six.moves import socketserver
from ddtrace.constants import KEEP_SPANS_RATE_KEY
from ddtrace.internal.compat import PY3
from ddtrace.internal.compat import get_connection_response
from ddtrace.internal.compat import httplib
from ddtrace.internal.uds import UDSHTTPConnection
from ddtrace.internal.writer import AgentWriter
from ddtrace.internal.writer import LogWriter
from ddtrace.internal.writer import Response
from ddtrace.internal.writer import _human_size
from ddtrace.span import Span
from tests.utils import AnyInt
from tests.utils import BaseTestCase
from tests.utils import override_env
class DummyOutput:
def __init__(self):
self.entries = []
def write(self, message):
self.entries.append(message)
def flush(self):
pass
class AgentWriterTests(BaseTestCase):
N_TRACES = 11
def test_metrics_disabled(self):
statsd = mock.Mock()
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=False)
for i in range(10):
writer.write(
[Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)]
)
writer.stop()
writer.join()
statsd.increment.assert_not_called()
statsd.distribution.assert_not_called()
def test_metrics_bad_endpoint(self):
statsd = mock.Mock()
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=True)
for i in range(10):
writer.write(
[Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)]
)
writer.stop()
writer.join()
statsd.distribution.assert_has_calls(
[
mock.call("datadog.tracer.buffer.accepted.traces", 10, tags=[]),
mock.call("datadog.tracer.buffer.accepted.spans", 50, tags=[]),
mock.call("datadog.tracer.http.requests", writer.RETRY_ATTEMPTS, tags=[]),
mock.call("datadog.tracer.http.errors", 1, tags=["type:err"]),
mock.call("datadog.tracer.http.dropped.bytes", AnyInt(), tags=[]),
],
any_order=True,
)
def test_metrics_trace_too_big(self):
statsd = mock.Mock()
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=True)
for i in range(10):
writer.write(
[Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)]
)
writer.write(
[Span(tracer=None, name="a" * 5000, trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(2 ** 10)]
)
writer.stop()
writer.join()
statsd.distribution.assert_has_calls(
[
mock.call("datadog.tracer.buffer.accepted.traces", 10, tags=[]),
mock.call("datadog.tracer.buffer.accepted.spans", 50, tags=[]),
mock.call("datadog.tracer.buffer.dropped.traces", 1, tags=["reason:t_too_big"]),
mock.call("datadog.tracer.buffer.dropped.bytes", AnyInt(), tags=["reason:t_too_big"]),
mock.call("datadog.tracer.http.requests", writer.RETRY_ATTEMPTS, tags=[]),
mock.call("datadog.tracer.http.errors", 1, tags=["type:err"]),
mock.call("datadog.tracer.http.dropped.bytes", AnyInt(), tags=[]),
],
any_order=True,
)
def test_metrics_multi(self):
statsd = mock.Mock()
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=True)
for i in range(10):
writer.write(
[Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)]
)
writer.flush_queue()
statsd.distribution.assert_has_calls(
[
mock.call("datadog.tracer.buffer.accepted.traces", 10, tags=[]),
mock.call("datadog.tracer.buffer.accepted.spans", 50, tags=[]),
mock.call("datadog.tracer.http.requests", writer.RETRY_ATTEMPTS, tags=[]),
mock.call("datadog.tracer.http.errors", 1, tags=["type:err"]),
mock.call("datadog.tracer.http.dropped.bytes", AnyInt(), tags=[]),
],
any_order=True,
)
statsd.reset_mock()
for i in range(10):
writer.write(
[Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)]
)
writer.stop()
writer.join()
statsd.distribution.assert_has_calls(
[
mock.call("datadog.tracer.buffer.accepted.traces", 10, tags=[]),
mock.call("datadog.tracer.buffer.accepted.spans", 50, tags=[]),
mock.call("datadog.tracer.http.requests", writer.RETRY_ATTEMPTS, tags=[]),
mock.call("datadog.tracer.http.errors", 1, tags=["type:err"]),
mock.call("datadog.tracer.http.dropped.bytes", AnyInt(), tags=[]),
],
any_order=True,
)
def test_write_sync(self):
statsd = mock.Mock()
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=True, sync_mode=True)
writer.write([Span(tracer=None, name="name", trace_id=1, span_id=j, parent_id=j - 1 or None) for j in range(5)])
statsd.distribution.assert_has_calls(
[
mock.call("datadog.tracer.buffer.accepted.traces", 1, tags=[]),
mock.call("datadog.tracer.buffer.accepted.spans", 5, tags=[]),
mock.call("datadog.tracer.http.requests", writer.RETRY_ATTEMPTS, tags=[]),
mock.call("datadog.tracer.http.errors", 1, tags=["type:err"]),
mock.call("datadog.tracer.http.dropped.bytes", AnyInt(), tags=[]),
],
any_order=True,
)
def test_drop_reason_bad_endpoint(self):
statsd = mock.Mock()
writer_metrics_reset = mock.Mock()
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=False)
writer._metrics_reset = writer_metrics_reset
for i in range(10):
writer.write(
[Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)]
)
writer.stop()
writer.join()
writer_metrics_reset.assert_called_once()
assert 1 == writer._metrics["http.errors"]["count"]
assert 10 == writer._metrics["http.dropped.traces"]["count"]
def test_drop_reason_trace_too_big(self):
statsd = mock.Mock()
writer_metrics_reset = mock.Mock()
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=False)
writer._metrics_reset = writer_metrics_reset
for i in range(10):
writer.write(
[Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)]
)
writer.write(
[Span(tracer=None, name="a" * 5000, trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(2 ** 10)]
)
writer.stop()
writer.join()
writer_metrics_reset.assert_called_once()
assert 1 == writer._metrics["buffer.dropped.traces"]["count"]
assert ["reason:t_too_big"] == writer._metrics["buffer.dropped.traces"]["tags"]
def test_drop_reason_buffer_full(self):
statsd = mock.Mock()
writer_metrics_reset = mock.Mock()
writer = AgentWriter(agent_url="http://asdf:1234", buffer_size=5300, dogstatsd=statsd, report_metrics=False)
writer._metrics_reset = writer_metrics_reset
for i in range(10):
writer.write(
[Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)]
)
writer.write([Span(tracer=None, name="a", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)])
writer.stop()
writer.join()
writer_metrics_reset.assert_called_once()
assert 1 == writer._metrics["buffer.dropped.traces"]["count"]
assert ["reason:full"] == writer._metrics["buffer.dropped.traces"]["tags"]
def test_drop_reason_encoding_error(self):
n_traces = 10
statsd = mock.Mock()
writer_encoder = mock.Mock()
writer_encoder.__len__ = (lambda *args: n_traces).__get__(writer_encoder)
writer_metrics_reset = mock.Mock()
writer_encoder.encode.side_effect = Exception
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=False)
writer._encoder = writer_encoder
writer._metrics_reset = writer_metrics_reset
for i in range(n_traces):
writer.write(
[Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)]
)
writer.stop()
writer.join()
writer_metrics_reset.assert_called_once()
assert 10 == writer._metrics["encoder.dropped.traces"]["count"]
def test_keep_rate(self):
statsd = mock.Mock()
writer_run_periodic = mock.Mock()
writer_put = mock.Mock()
writer_put.return_value = Response(status=200)
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=False)
writer.run_periodic = writer_run_periodic
writer._put = writer_put
traces = [
[Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)]
for i in range(4)
]
traces_too_big = [
[Span(tracer=None, name="a" * 5000, trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(2 ** 10)]
for i in range(4)
]
# 1. We write 4 traces successfully.
for trace in traces:
writer.write(trace)
writer.flush_queue()
payload = msgpack.unpackb(writer_put.call_args.args[0])
# No previous drops.
assert 0.0 == writer._drop_sma.get()
# 4 traces written.
assert 4 == len(payload)
# 100% of traces kept (refers to the past).
# No traces sent before now so 100% kept.
for trace in payload:
assert 1.0 == trace[0]["metrics"].get(KEEP_SPANS_RATE_KEY, -1)
# 2. We fail to write 4 traces because of size limitation.
for trace in traces_too_big:
writer.write(trace)
writer.flush_queue()
# 50% of traces were dropped historically.
# 4 successfully written before and 4 dropped now.
assert 0.5 == writer._drop_sma.get()
# put not called since no new traces are available.
writer_put.assert_called_once()
# 3. We write 2 traces successfully.
for trace in traces[:2]:
writer.write(trace)
writer.flush_queue()
payload = msgpack.unpackb(writer_put.call_args.args[0])
# 40% of traces were dropped historically.
assert 0.4 == writer._drop_sma.get()
# 2 traces written.
assert 2 == len(payload)
# 50% of traces kept (refers to the past).
# We had 4 successfully written and 4 dropped.
for trace in payload:
assert 0.5 == trace[0]["metrics"].get(KEEP_SPANS_RATE_KEY, -1)
# 4. We write 1 trace successfully and fail to write 3.
writer.write(traces[0])
for trace in traces_too_big[:3]:
writer.write(trace)
writer.flush_queue()
payload = msgpack.unpackb(writer_put.call_args.args[0])
# 50% of traces were dropped historically.
assert 0.5 == writer._drop_sma.get()
# 1 trace written.
assert 1 == len(payload)
# 60% of traces kept (refers to the past).
# We had 4 successfully written, then 4 dropped, then 2 written.
for trace in payload:
assert 0.6 == trace[0]["metrics"].get(KEEP_SPANS_RATE_KEY, -1)
class LogWriterTests(BaseTestCase):
N_TRACES = 11
def create_writer(self):
self.output = DummyOutput()
writer = LogWriter(out=self.output)
for i in range(self.N_TRACES):
writer.write(
[Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(7)]
)
return writer
def test_log_writer(self):
self.create_writer()
self.assertEqual(len(self.output.entries), self.N_TRACES)
def test_humansize():
assert _human_size(0) == "0B"
assert _human_size(999) == "999B"
assert _human_size(1000) == "1KB"
assert _human_size(10000) == "10KB"
assert _human_size(100000) == "100KB"
assert _human_size(1000000) == "1MB"
assert _human_size(10000000) == "10MB"
assert _human_size(1000000000) == "1GB"
class _BaseHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
error_message_format = "%(message)s\n"
error_content_type = "text/plain"
@staticmethod
def log_message(format, *args): # noqa: A002
pass
class _APIEndpointRequestHandlerTest(_BaseHTTPRequestHandler):
expected_path_prefix = None
def do_PUT(self):
if self.expected_path_prefix is not None:
assert self.path.startswith(self.expected_path_prefix)
self.send_error(200, "OK")
class _TimeoutAPIEndpointRequestHandlerTest(_BaseHTTPRequestHandler):
def do_PUT(self):
# This server sleeps longer than our timeout
time.sleep(5)
class _ResetAPIEndpointRequestHandlerTest(_BaseHTTPRequestHandler):
def do_PUT(self):
return
_HOST = "0.0.0.0"
_PORT = 8743
_TIMEOUT_PORT = _PORT + 1
_RESET_PORT = _TIMEOUT_PORT + 1
class UDSHTTPServer(socketserver.UnixStreamServer, BaseHTTPServer.HTTPServer):
def server_bind(self):
BaseHTTPServer.HTTPServer.server_bind(self)
def _make_uds_server(path, request_handler):
server = UDSHTTPServer(path, request_handler)
t = threading.Thread(target=server.serve_forever)
# Set daemon just in case something fails
t.daemon = True
t.start()
# Wait for the server to start
resp = None
while resp != 200:
conn = UDSHTTPConnection(server.server_address, _HOST, 2019)
try:
conn.request("PUT", "/")
resp = get_connection_response(conn).status
finally:
conn.close()
time.sleep(0.01)
return server, t
@pytest.fixture
def endpoint_uds_server():
socket_name = tempfile.mktemp()
handler = _APIEndpointRequestHandlerTest
server, thread = _make_uds_server(socket_name, handler)
handler.expected_path_prefix = "/v0."
try:
yield server
finally:
handler.expected_path_prefix = None
server.shutdown()
thread.join()
os.unlink(socket_name)
def _make_server(port, request_handler):
server = BaseHTTPServer.HTTPServer((_HOST, port), request_handler)
t = threading.Thread(target=server.serve_forever)
# Set daemon just in case something fails
t.daemon = True
t.start()
return server, t
@pytest.fixture(scope="module")
def endpoint_test_timeout_server():
server, thread = _make_server(_TIMEOUT_PORT, _TimeoutAPIEndpointRequestHandlerTest)
try:
yield thread
finally:
server.shutdown()
thread.join()
@pytest.fixture(scope="module")
def endpoint_test_reset_server():
server, thread = _make_server(_RESET_PORT, _ResetAPIEndpointRequestHandlerTest)
try:
yield thread
finally:
server.shutdown()
thread.join()
@pytest.fixture
def endpoint_assert_path():
handler = _APIEndpointRequestHandlerTest
server, thread = _make_server(_PORT, handler)
def configure(expected_path_prefix=None):
handler.expected_path_prefix = expected_path_prefix
return thread
try:
yield configure
finally:
handler.expected_path_prefix = None
server.shutdown()
thread.join()
def test_agent_url_path(endpoint_assert_path):
# test without base path
endpoint_assert_path("/v0.")
writer = AgentWriter(agent_url="http://%s:%s/" % (_HOST, _PORT))
writer._encoder.put([Span(None, "foobar")])
writer.flush_queue(raise_exc=True)
# test without base path nor trailing slash
writer = AgentWriter(agent_url="http://%s:%s" % (_HOST, _PORT))
writer._encoder.put([Span(None, "foobar")])
writer.flush_queue(raise_exc=True)
# test with a base path
endpoint_assert_path("/test/v0.")
writer = AgentWriter(agent_url="http://%s:%s/test/" % (_HOST, _PORT))
writer._encoder.put([Span(None, "foobar")])
writer.flush_queue(raise_exc=True)
def test_flush_connection_timeout_connect():
writer = AgentWriter(agent_url="http://%s:%s" % (_HOST, 2019))
if PY3:
exc_type = OSError
else:
exc_type = socket.error
with pytest.raises(exc_type):
writer._encoder.put([Span(None, "foobar")])
writer.flush_queue(raise_exc=True)
def test_flush_connection_timeout(endpoint_test_timeout_server):
writer = AgentWriter(agent_url="http://%s:%s" % (_HOST, _TIMEOUT_PORT))
with pytest.raises(socket.timeout):
writer._encoder.put([Span(None, "foobar")])
writer.flush_queue(raise_exc=True)
def test_flush_connection_reset(endpoint_test_reset_server):
writer = AgentWriter(agent_url="http://%s:%s" % (_HOST, _RESET_PORT))
if PY3:
exc_types = (httplib.BadStatusLine, ConnectionResetError)
else:
exc_types = (httplib.BadStatusLine,)
with pytest.raises(exc_types):
writer._encoder.put([Span(None, "foobar")])
writer.flush_queue(raise_exc=True)
def test_flush_connection_uds(endpoint_uds_server):
writer = AgentWriter(agent_url="unix://%s" % endpoint_uds_server.server_address)
writer._encoder.put([Span(None, "foobar")])
writer.flush_queue(raise_exc=True)
def test_flush_queue_raise():
writer = AgentWriter(agent_url="http://dne:1234")
# Should not raise
writer.write([])
writer.flush_queue(raise_exc=False)
error = OSError if PY3 else IOError
with pytest.raises(error):
writer.write([])
writer.flush_queue(raise_exc=True)
def test_racing_start():
writer = AgentWriter(agent_url="http://dne:1234")
def do_write(i):
writer.write([Span(None, str(i))])
ts = [threading.Thread(target=do_write, args=(i,)) for i in range(100)]
for t in ts:
t.start()
for t in ts:
t.join()
assert len(writer._encoder) == 100
def test_additional_headers():
with override_env(dict(_DD_TRACE_WRITER_ADDITIONAL_HEADERS="additional-header:additional-value,header2:value2")):
writer = AgentWriter(agent_url="http://localhost:9126")
assert writer._headers["additional-header"] == "additional-value"
assert writer._headers["header2"] == "value2"
def test_bad_encoding(monkeypatch):
monkeypatch.setenv("DD_TRACE_API_VERSION", "foo")
with pytest.raises(ValueError):
AgentWriter(agent_url="http://localhost:9126")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.